|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* Copyright (c) 2023 Meta, Inc */ |
| 3 | +#include <linux/bpf.h> |
| 4 | +#include <linux/bpf_mem_alloc.h> |
| 5 | +#include <linux/btf.h> |
| 6 | +#include <linux/btf_ids.h> |
| 7 | +#include <linux/cpumask.h> |
| 8 | + |
| 9 | +/** |
| 10 | + * struct bpf_cpumask - refcounted BPF cpumask wrapper structure |
| 11 | + * @cpumask: The actual cpumask embedded in the struct. |
| 12 | + * @usage: Object reference counter. When the refcount goes to 0, the |
| 13 | + * memory is released back to the BPF allocator, which provides |
| 14 | + * RCU safety. |
| 15 | + * |
| 16 | + * Note that we explicitly embed a cpumask_t rather than a cpumask_var_t. This |
| 17 | + * is done to avoid confusing the verifier due to the typedef of cpumask_var_t |
| 18 | + * changing depending on whether CONFIG_CPUMASK_OFFSTACK is defined or not. See |
| 19 | + * the details in <linux/cpumask.h>. The consequence is that this structure is |
| 20 | + * likely a bit larger than it needs to be when CONFIG_CPUMASK_OFFSTACK is |
| 21 | + * defined due to embedding the whole NR_CPUS-size bitmap, but the extra memory |
| 22 | + * overhead is minimal. For the more typical case of CONFIG_CPUMASK_OFFSTACK |
| 23 | + * not being defined, the structure is the same size regardless. |
| 24 | + */ |
| 25 | +struct bpf_cpumask { |
| 26 | + cpumask_t cpumask; |
| 27 | + refcount_t usage; |
| 28 | +}; |
| 29 | + |
| 30 | +static struct bpf_mem_alloc bpf_cpumask_ma; |
| 31 | + |
| 32 | +static bool cpu_valid(u32 cpu) |
| 33 | +{ |
| 34 | + return cpu < nr_cpu_ids; |
| 35 | +} |
| 36 | + |
| 37 | +__diag_push(); |
| 38 | +__diag_ignore_all("-Wmissing-prototypes", |
| 39 | + "Global kfuncs as their definitions will be in BTF"); |
| 40 | + |
| 41 | +struct bpf_cpumask *bpf_cpumask_create(void) |
| 42 | +{ |
| 43 | + struct bpf_cpumask *cpumask; |
| 44 | + |
| 45 | + cpumask = bpf_mem_alloc(&bpf_cpumask_ma, sizeof(*cpumask)); |
| 46 | + if (!cpumask) |
| 47 | + return NULL; |
| 48 | + |
| 49 | + memset(cpumask, 0, sizeof(*cpumask)); |
| 50 | + refcount_set(&cpumask->usage, 1); |
| 51 | + |
| 52 | + return cpumask; |
| 53 | +} |
| 54 | + |
| 55 | +struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) |
| 56 | +{ |
| 57 | + refcount_inc(&cpumask->usage); |
| 58 | + return cpumask; |
| 59 | +} |
| 60 | + |
| 61 | +struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) |
| 62 | +{ |
| 63 | + struct bpf_cpumask *cpumask; |
| 64 | + |
| 65 | + /* The BPF memory allocator frees memory backing its caches in an RCU |
| 66 | + * callback. Thus, we can safely use RCU to ensure that the cpumask is |
| 67 | + * safe to read. |
| 68 | + */ |
| 69 | + rcu_read_lock(); |
| 70 | + |
| 71 | + cpumask = READ_ONCE(*cpumaskp); |
| 72 | + if (cpumask && !refcount_inc_not_zero(&cpumask->usage)) |
| 73 | + cpumask = NULL; |
| 74 | + |
| 75 | + rcu_read_unlock(); |
| 76 | + return cpumask; |
| 77 | +} |
| 78 | + |
| 79 | +void bpf_cpumask_release(struct bpf_cpumask *cpumask) |
| 80 | +{ |
| 81 | + if (!cpumask) |
| 82 | + return; |
| 83 | + |
| 84 | + if (refcount_dec_and_test(&cpumask->usage)) { |
| 85 | + migrate_disable(); |
| 86 | + bpf_mem_free(&bpf_cpumask_ma, cpumask); |
| 87 | + migrate_enable(); |
| 88 | + } |
| 89 | +} |
| 90 | + |
| 91 | +u32 bpf_cpumask_first(const struct cpumask *cpumask) |
| 92 | +{ |
| 93 | + return cpumask_first(cpumask); |
| 94 | +} |
| 95 | + |
| 96 | +u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) |
| 97 | +{ |
| 98 | + return cpumask_first_zero(cpumask); |
| 99 | +} |
| 100 | + |
| 101 | +void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) |
| 102 | +{ |
| 103 | + if (!cpu_valid(cpu)) |
| 104 | + return; |
| 105 | + |
| 106 | + cpumask_set_cpu(cpu, (struct cpumask *)cpumask); |
| 107 | +} |
| 108 | + |
| 109 | +void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) |
| 110 | +{ |
| 111 | + if (!cpu_valid(cpu)) |
| 112 | + return; |
| 113 | + |
| 114 | + cpumask_clear_cpu(cpu, (struct cpumask *)cpumask); |
| 115 | +} |
| 116 | + |
| 117 | +bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) |
| 118 | +{ |
| 119 | + if (!cpu_valid(cpu)) |
| 120 | + return false; |
| 121 | + |
| 122 | + return cpumask_test_cpu(cpu, (struct cpumask *)cpumask); |
| 123 | +} |
| 124 | + |
| 125 | +bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) |
| 126 | +{ |
| 127 | + if (!cpu_valid(cpu)) |
| 128 | + return false; |
| 129 | + |
| 130 | + return cpumask_test_and_set_cpu(cpu, (struct cpumask *)cpumask); |
| 131 | +} |
| 132 | + |
| 133 | +bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) |
| 134 | +{ |
| 135 | + if (!cpu_valid(cpu)) |
| 136 | + return false; |
| 137 | + |
| 138 | + return cpumask_test_and_clear_cpu(cpu, (struct cpumask *)cpumask); |
| 139 | +} |
| 140 | + |
| 141 | +void bpf_cpumask_setall(struct bpf_cpumask *cpumask) |
| 142 | +{ |
| 143 | + cpumask_setall((struct cpumask *)cpumask); |
| 144 | +} |
| 145 | + |
| 146 | +void bpf_cpumask_clear(struct bpf_cpumask *cpumask) |
| 147 | +{ |
| 148 | + cpumask_clear((struct cpumask *)cpumask); |
| 149 | +} |
| 150 | + |
| 151 | +bool bpf_cpumask_and(struct bpf_cpumask *dst, |
| 152 | + const struct cpumask *src1, |
| 153 | + const struct cpumask *src2) |
| 154 | +{ |
| 155 | + return cpumask_and((struct cpumask *)dst, src1, src2); |
| 156 | +} |
| 157 | + |
| 158 | +void bpf_cpumask_or(struct bpf_cpumask *dst, |
| 159 | + const struct cpumask *src1, |
| 160 | + const struct cpumask *src2) |
| 161 | +{ |
| 162 | + cpumask_or((struct cpumask *)dst, src1, src2); |
| 163 | +} |
| 164 | + |
| 165 | +void bpf_cpumask_xor(struct bpf_cpumask *dst, |
| 166 | + const struct cpumask *src1, |
| 167 | + const struct cpumask *src2) |
| 168 | +{ |
| 169 | + cpumask_xor((struct cpumask *)dst, src1, src2); |
| 170 | +} |
| 171 | + |
| 172 | +bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) |
| 173 | +{ |
| 174 | + return cpumask_equal(src1, src2); |
| 175 | +} |
| 176 | + |
| 177 | +bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) |
| 178 | +{ |
| 179 | + return cpumask_intersects(src1, src2); |
| 180 | +} |
| 181 | + |
| 182 | +bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) |
| 183 | +{ |
| 184 | + return cpumask_subset(src1, src2); |
| 185 | +} |
| 186 | + |
| 187 | +bool bpf_cpumask_empty(const struct cpumask *cpumask) |
| 188 | +{ |
| 189 | + return cpumask_empty(cpumask); |
| 190 | +} |
| 191 | + |
| 192 | +bool bpf_cpumask_full(const struct cpumask *cpumask) |
| 193 | +{ |
| 194 | + return cpumask_full(cpumask); |
| 195 | +} |
| 196 | + |
| 197 | +void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) |
| 198 | +{ |
| 199 | + cpumask_copy((struct cpumask *)dst, src); |
| 200 | +} |
| 201 | + |
| 202 | +u32 bpf_cpumask_any(const struct cpumask *cpumask) |
| 203 | +{ |
| 204 | + return cpumask_any(cpumask); |
| 205 | +} |
| 206 | + |
| 207 | +u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) |
| 208 | +{ |
| 209 | + return cpumask_any_and(src1, src2); |
| 210 | +} |
| 211 | + |
| 212 | +__diag_pop(); |
| 213 | + |
| 214 | +BTF_SET8_START(cpumask_kfunc_btf_ids) |
| 215 | +BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) |
| 216 | +BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS) |
| 217 | +BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) |
| 218 | +BTF_ID_FLAGS(func, bpf_cpumask_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) |
| 219 | +BTF_ID_FLAGS(func, bpf_cpumask_first, KF_TRUSTED_ARGS) |
| 220 | +BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_TRUSTED_ARGS) |
| 221 | +BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_TRUSTED_ARGS) |
| 222 | +BTF_ID_FLAGS(func, bpf_cpumask_clear_cpu, KF_TRUSTED_ARGS) |
| 223 | +BTF_ID_FLAGS(func, bpf_cpumask_test_cpu, KF_TRUSTED_ARGS) |
| 224 | +BTF_ID_FLAGS(func, bpf_cpumask_test_and_set_cpu, KF_TRUSTED_ARGS) |
| 225 | +BTF_ID_FLAGS(func, bpf_cpumask_test_and_clear_cpu, KF_TRUSTED_ARGS) |
| 226 | +BTF_ID_FLAGS(func, bpf_cpumask_setall, KF_TRUSTED_ARGS) |
| 227 | +BTF_ID_FLAGS(func, bpf_cpumask_clear, KF_TRUSTED_ARGS) |
| 228 | +BTF_ID_FLAGS(func, bpf_cpumask_and, KF_TRUSTED_ARGS) |
| 229 | +BTF_ID_FLAGS(func, bpf_cpumask_or, KF_TRUSTED_ARGS) |
| 230 | +BTF_ID_FLAGS(func, bpf_cpumask_xor, KF_TRUSTED_ARGS) |
| 231 | +BTF_ID_FLAGS(func, bpf_cpumask_equal, KF_TRUSTED_ARGS) |
| 232 | +BTF_ID_FLAGS(func, bpf_cpumask_intersects, KF_TRUSTED_ARGS) |
| 233 | +BTF_ID_FLAGS(func, bpf_cpumask_subset, KF_TRUSTED_ARGS) |
| 234 | +BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_TRUSTED_ARGS) |
| 235 | +BTF_ID_FLAGS(func, bpf_cpumask_full, KF_TRUSTED_ARGS) |
| 236 | +BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_TRUSTED_ARGS) |
| 237 | +BTF_ID_FLAGS(func, bpf_cpumask_any, KF_TRUSTED_ARGS) |
| 238 | +BTF_ID_FLAGS(func, bpf_cpumask_any_and, KF_TRUSTED_ARGS) |
| 239 | +BTF_SET8_END(cpumask_kfunc_btf_ids) |
| 240 | + |
| 241 | +static const struct btf_kfunc_id_set cpumask_kfunc_set = { |
| 242 | + .owner = THIS_MODULE, |
| 243 | + .set = &cpumask_kfunc_btf_ids, |
| 244 | +}; |
| 245 | + |
| 246 | +BTF_ID_LIST(cpumask_dtor_ids) |
| 247 | +BTF_ID(struct, bpf_cpumask) |
| 248 | +BTF_ID(func, bpf_cpumask_release) |
| 249 | + |
| 250 | +static int __init cpumask_kfunc_init(void) |
| 251 | +{ |
| 252 | + int ret; |
| 253 | + const struct btf_id_dtor_kfunc cpumask_dtors[] = { |
| 254 | + { |
| 255 | + .btf_id = cpumask_dtor_ids[0], |
| 256 | + .kfunc_btf_id = cpumask_dtor_ids[1] |
| 257 | + }, |
| 258 | + }; |
| 259 | + |
| 260 | + ret = bpf_mem_alloc_init(&bpf_cpumask_ma, 0, false); |
| 261 | + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set); |
| 262 | + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set); |
| 263 | + return ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors, |
| 264 | + ARRAY_SIZE(cpumask_dtors), |
| 265 | + THIS_MODULE); |
| 266 | +} |
| 267 | + |
| 268 | +late_initcall(cpumask_kfunc_init); |
0 commit comments