|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Copyright (c) 2026 KylinSoft Corporation. |
| 4 | + * Copyright (c) 2026 Kaitao Cheng <[email protected]> |
| 5 | + */ |
| 6 | +#include <linux/init.h> |
| 7 | +#include <linux/types.h> |
| 8 | +#include <linux/bpf_verifier.h> |
| 9 | +#include <linux/bpf.h> |
| 10 | +#include <linux/btf.h> |
| 11 | +#include <linux/btf_ids.h> |
| 12 | +#include <linux/string.h> |
| 13 | +#include "ufq-iosched.h" |
| 14 | + |
| 15 | +struct ufq_iosched_ops ufq_ops; |
| 16 | + |
| 17 | +static const struct bpf_func_proto * |
| 18 | +bpf_ufq_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
| 19 | +{ |
| 20 | + return bpf_base_func_proto(func_id, prog); |
| 21 | +} |
| 22 | + |
| 23 | +static bool bpf_ufq_is_valid_access(int off, int size, |
| 24 | + enum bpf_access_type type, |
| 25 | + const struct bpf_prog *prog, |
| 26 | + struct bpf_insn_access_aux *info) |
| 27 | +{ |
| 28 | + if (type != BPF_READ) |
| 29 | + return false; |
| 30 | + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) |
| 31 | + return false; |
| 32 | + if (off % size != 0) |
| 33 | + return false; |
| 34 | + |
| 35 | + /* |
| 36 | + * merge_req's third argument is int *type. btf_ctx_access() treats |
| 37 | + * pointers that are not "pointer to struct" as scalars (no reg_type), |
| 38 | + * so loading the pointer from ctx leaves a SCALAR and *type stores |
| 39 | + * fail verification. Model it as a read/write buffer of merge_type. |
| 40 | + */ |
| 41 | + if (off == 16 && size == sizeof(__u64) && |
| 42 | + prog->aux->attach_func_name && |
| 43 | + !strcmp(prog->aux->attach_func_name, "merge_req")) { |
| 44 | + if (!btf_ctx_access(off, size, type, prog, info)) |
| 45 | + return false; |
| 46 | + info->reg_type = PTR_TO_BUF; |
| 47 | + return true; |
| 48 | + } |
| 49 | + |
| 50 | + return btf_ctx_access(off, size, type, prog, info); |
| 51 | +} |
| 52 | + |
| 53 | +static const struct bpf_verifier_ops bpf_ufq_verifier_ops = { |
| 54 | + .get_func_proto = bpf_ufq_get_func_proto, |
| 55 | + .is_valid_access = bpf_ufq_is_valid_access, |
| 56 | +}; |
| 57 | + |
| 58 | +static int bpf_ufq_init_member(const struct btf_type *t, |
| 59 | + const struct btf_member *member, |
| 60 | + void *kdata, const void *udata) |
| 61 | +{ |
| 62 | + const struct ufq_iosched_ops *uops = udata; |
| 63 | + struct ufq_iosched_ops *ops = kdata; |
| 64 | + u32 moff = __btf_member_bit_offset(t, member) / 8; |
| 65 | + int ret; |
| 66 | + |
| 67 | + switch (moff) { |
| 68 | + case offsetof(struct ufq_iosched_ops, name): |
| 69 | + ret = bpf_obj_name_cpy(ops->name, uops->name, |
| 70 | + sizeof(ops->name)); |
| 71 | + if (ret < 0) |
| 72 | + return ret; |
| 73 | + if (ret == 0) |
| 74 | + return -EINVAL; |
| 75 | + return 1; |
| 76 | + /* other var adding .... */ |
| 77 | + } |
| 78 | + |
| 79 | + return 0; |
| 80 | +} |
| 81 | + |
| 82 | +static int bpf_ufq_check_member(const struct btf_type *t, |
| 83 | + const struct btf_member *member, |
| 84 | + const struct bpf_prog *prog) |
| 85 | +{ |
| 86 | + return 0; |
| 87 | +} |
| 88 | + |
| 89 | +static int bpf_ufq_enable(struct ufq_iosched_ops *ops) |
| 90 | +{ |
| 91 | + ufq_ops = *ops; |
| 92 | + return 0; |
| 93 | +} |
| 94 | + |
| 95 | +static void bpf_ufq_disable(struct ufq_iosched_ops *ops) |
| 96 | +{ |
| 97 | + memset(&ufq_ops, 0, sizeof(ufq_ops)); |
| 98 | +} |
| 99 | + |
| 100 | +static int bpf_ufq_reg(void *kdata, struct bpf_link *link) |
| 101 | +{ |
| 102 | + return bpf_ufq_enable(kdata); |
| 103 | +} |
| 104 | + |
| 105 | +static void bpf_ufq_unreg(void *kdata, struct bpf_link *link) |
| 106 | +{ |
| 107 | + bpf_ufq_disable(kdata); |
| 108 | +} |
| 109 | + |
| 110 | +static int bpf_ufq_init(struct btf *btf) |
| 111 | +{ |
| 112 | + return 0; |
| 113 | +} |
| 114 | + |
| 115 | +static int bpf_ufq_update(void *kdata, void *old_kdata, struct bpf_link *link) |
| 116 | +{ |
| 117 | + /* |
| 118 | + * UFQ does not support live-updating an already-attached BPF scheduler: |
| 119 | + * partial failure during callback setup (e.g. init_sched) would be hard |
| 120 | + * to reason about, and update can race with unregister/teardown. |
| 121 | + */ |
| 122 | + return -EOPNOTSUPP; |
| 123 | +} |
| 124 | + |
| 125 | +static int bpf_ufq_validate(void *kdata) |
| 126 | +{ |
| 127 | + return 0; |
| 128 | +} |
| 129 | + |
| 130 | +static int init_sched_stub(struct request_queue *q) |
| 131 | +{ |
| 132 | + return -EPERM; |
| 133 | +} |
| 134 | + |
| 135 | +static int exit_sched_stub(struct request_queue *q) |
| 136 | +{ |
| 137 | + return -EPERM; |
| 138 | +} |
| 139 | + |
| 140 | +static int insert_req_stub(struct request_queue *q, struct request *rq, |
| 141 | + blk_insert_t flags) |
| 142 | +{ |
| 143 | + return 0; |
| 144 | +} |
| 145 | + |
| 146 | +static struct request *dispatch_req_stub(struct request_queue *q) |
| 147 | +{ |
| 148 | + return NULL; |
| 149 | +} |
| 150 | + |
| 151 | +static bool has_req_stub(struct request_queue *q, int rqs_count) |
| 152 | +{ |
| 153 | + return rqs_count > 0; |
| 154 | +} |
| 155 | + |
| 156 | +static void finish_req_stub(struct request *rq) |
| 157 | +{ |
| 158 | +} |
| 159 | + |
| 160 | +static struct request *former_req_stub(struct request_queue *q, struct request *rq) |
| 161 | +{ |
| 162 | + return NULL; |
| 163 | +} |
| 164 | + |
| 165 | +static struct request *next_req_stub(struct request_queue *q, struct request *rq) |
| 166 | +{ |
| 167 | + return NULL; |
| 168 | +} |
| 169 | + |
| 170 | +static struct request *merge_req_stub(struct request_queue *q, struct request *rq, |
| 171 | + int *type) |
| 172 | +{ |
| 173 | + *type = ELEVATOR_NO_MERGE; |
| 174 | + return NULL; |
| 175 | +} |
| 176 | + |
| 177 | +static void req_merged_stub(struct request_queue *q, struct request *rq, |
| 178 | + int type) |
| 179 | +{ |
| 180 | +} |
| 181 | + |
| 182 | +static struct ufq_iosched_ops __bpf_ops_ufq_ops = { |
| 183 | + .init_sched = init_sched_stub, |
| 184 | + .exit_sched = exit_sched_stub, |
| 185 | + .insert_req = insert_req_stub, |
| 186 | + .dispatch_req = dispatch_req_stub, |
| 187 | + .has_req = has_req_stub, |
| 188 | + .former_req = former_req_stub, |
| 189 | + .next_req = next_req_stub, |
| 190 | + .merge_req = merge_req_stub, |
| 191 | + .req_merged = req_merged_stub, |
| 192 | + .finish_req = finish_req_stub, |
| 193 | +}; |
| 194 | + |
| 195 | +static struct bpf_struct_ops bpf_iosched_ufq_ops = { |
| 196 | + .verifier_ops = &bpf_ufq_verifier_ops, |
| 197 | + .reg = bpf_ufq_reg, |
| 198 | + .unreg = bpf_ufq_unreg, |
| 199 | + .check_member = bpf_ufq_check_member, |
| 200 | + .init_member = bpf_ufq_init_member, |
| 201 | + .init = bpf_ufq_init, |
| 202 | + .update = bpf_ufq_update, |
| 203 | + .validate = bpf_ufq_validate, |
| 204 | + .name = "ufq_iosched_ops", |
| 205 | + .owner = THIS_MODULE, |
| 206 | + .cfi_stubs = &__bpf_ops_ufq_ops |
| 207 | +}; |
| 208 | + |
| 209 | +int bpf_ufq_ops_init(void) |
| 210 | +{ |
| 211 | + return register_bpf_struct_ops(&bpf_iosched_ufq_ops, ufq_iosched_ops); |
| 212 | +} |
| 213 | + |
0 commit comments