|
| 1 | +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
| 2 | +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */ |
| 3 | + |
| 4 | +#include "macsec.h" |
| 5 | +#include <linux/mlx5/macsec.h> |
| 6 | + |
| 7 | +struct mlx5_reserved_gids { |
| 8 | + int macsec_index; |
| 9 | + const struct ib_gid_attr *physical_gid; |
| 10 | +}; |
| 11 | + |
| 12 | +struct mlx5_roce_gids { |
| 13 | + struct list_head roce_gid_list_entry; |
| 14 | + u16 gid_idx; |
| 15 | + union { |
| 16 | + struct sockaddr_in sockaddr_in; |
| 17 | + struct sockaddr_in6 sockaddr_in6; |
| 18 | + } addr; |
| 19 | +}; |
| 20 | + |
| 21 | +struct mlx5_macsec_device { |
| 22 | + struct list_head macsec_devices_list_entry; |
| 23 | + void *macdev; |
| 24 | + struct list_head macsec_roce_gids; |
| 25 | + struct list_head tx_rules_list; |
| 26 | + struct list_head rx_rules_list; |
| 27 | +}; |
| 28 | + |
| 29 | +static void cleanup_macsec_device(struct mlx5_macsec_device *macsec_device) |
| 30 | +{ |
| 31 | + if (!list_empty(&macsec_device->tx_rules_list) || |
| 32 | + !list_empty(&macsec_device->rx_rules_list) || |
| 33 | + !list_empty(&macsec_device->macsec_roce_gids)) |
| 34 | + return; |
| 35 | + |
| 36 | + list_del(&macsec_device->macsec_devices_list_entry); |
| 37 | + kfree(macsec_device); |
| 38 | +} |
| 39 | + |
| 40 | +static struct mlx5_macsec_device *get_macsec_device(void *macdev, |
| 41 | + struct list_head *macsec_devices_list) |
| 42 | +{ |
| 43 | + struct mlx5_macsec_device *iter, *macsec_device = NULL; |
| 44 | + |
| 45 | + list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) { |
| 46 | + if (iter->macdev == macdev) { |
| 47 | + macsec_device = iter; |
| 48 | + break; |
| 49 | + } |
| 50 | + } |
| 51 | + |
| 52 | + if (macsec_device) |
| 53 | + return macsec_device; |
| 54 | + |
| 55 | + macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL); |
| 56 | + if (!macsec_device) |
| 57 | + return NULL; |
| 58 | + |
| 59 | + macsec_device->macdev = macdev; |
| 60 | + INIT_LIST_HEAD(&macsec_device->tx_rules_list); |
| 61 | + INIT_LIST_HEAD(&macsec_device->rx_rules_list); |
| 62 | + INIT_LIST_HEAD(&macsec_device->macsec_roce_gids); |
| 63 | + list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list); |
| 64 | + |
| 65 | + return macsec_device; |
| 66 | +} |
| 67 | + |
| 68 | +static void mlx5_macsec_del_roce_gid(struct mlx5_macsec_device *macsec_device, u16 gid_idx) |
| 69 | +{ |
| 70 | + struct mlx5_roce_gids *current_gid, *next_gid; |
| 71 | + |
| 72 | + list_for_each_entry_safe(current_gid, next_gid, &macsec_device->macsec_roce_gids, |
| 73 | + roce_gid_list_entry) |
| 74 | + if (current_gid->gid_idx == gid_idx) { |
| 75 | + list_del(¤t_gid->roce_gid_list_entry); |
| 76 | + kfree(current_gid); |
| 77 | + } |
| 78 | +} |
| 79 | + |
| 80 | +static void mlx5_macsec_save_roce_gid(struct mlx5_macsec_device *macsec_device, |
| 81 | + const struct sockaddr *addr, u16 gid_idx) |
| 82 | +{ |
| 83 | + struct mlx5_roce_gids *roce_gids; |
| 84 | + |
| 85 | + roce_gids = kzalloc(sizeof(*roce_gids), GFP_KERNEL); |
| 86 | + if (!roce_gids) |
| 87 | + return; |
| 88 | + |
| 89 | + roce_gids->gid_idx = gid_idx; |
| 90 | + if (addr->sa_family == AF_INET) |
| 91 | + memcpy(&roce_gids->addr.sockaddr_in, addr, sizeof(roce_gids->addr.sockaddr_in)); |
| 92 | + else |
| 93 | + memcpy(&roce_gids->addr.sockaddr_in6, addr, sizeof(roce_gids->addr.sockaddr_in6)); |
| 94 | + |
| 95 | + list_add_tail(&roce_gids->roce_gid_list_entry, &macsec_device->macsec_roce_gids); |
| 96 | +} |
| 97 | + |
| 98 | +static void handle_macsec_gids(struct list_head *macsec_devices_list, |
| 99 | + struct mlx5_macsec_event_data *data) |
| 100 | +{ |
| 101 | + struct mlx5_macsec_device *macsec_device; |
| 102 | + struct mlx5_roce_gids *gid; |
| 103 | + |
| 104 | + macsec_device = get_macsec_device(data->macdev, macsec_devices_list); |
| 105 | + if (!macsec_device) |
| 106 | + return; |
| 107 | + |
| 108 | + list_for_each_entry(gid, &macsec_device->macsec_roce_gids, roce_gid_list_entry) { |
| 109 | + mlx5_macsec_add_roce_sa_rules(data->fs_id, (struct sockaddr *)&gid->addr, |
| 110 | + gid->gid_idx, &macsec_device->tx_rules_list, |
| 111 | + &macsec_device->rx_rules_list, data->macsec_fs, |
| 112 | + data->is_tx); |
| 113 | + } |
| 114 | +} |
| 115 | + |
| 116 | +static void del_sa_roce_rule(struct list_head *macsec_devices_list, |
| 117 | + struct mlx5_macsec_event_data *data) |
| 118 | +{ |
| 119 | + struct mlx5_macsec_device *macsec_device; |
| 120 | + |
| 121 | + macsec_device = get_macsec_device(data->macdev, macsec_devices_list); |
| 122 | + WARN_ON(!macsec_device); |
| 123 | + |
| 124 | + mlx5_macsec_del_roce_sa_rules(data->fs_id, data->macsec_fs, |
| 125 | + &macsec_device->tx_rules_list, |
| 126 | + &macsec_device->rx_rules_list, data->is_tx); |
| 127 | +} |
| 128 | + |
| 129 | +static int macsec_event(struct notifier_block *nb, unsigned long event, void *data) |
| 130 | +{ |
| 131 | + struct mlx5_macsec *macsec = container_of(nb, struct mlx5_macsec, blocking_events_nb); |
| 132 | + |
| 133 | + mutex_lock(&macsec->lock); |
| 134 | + switch (event) { |
| 135 | + case MLX5_DRIVER_EVENT_MACSEC_SA_ADDED: |
| 136 | + handle_macsec_gids(&macsec->macsec_devices_list, data); |
| 137 | + break; |
| 138 | + case MLX5_DRIVER_EVENT_MACSEC_SA_DELETED: |
| 139 | + del_sa_roce_rule(&macsec->macsec_devices_list, data); |
| 140 | + break; |
| 141 | + default: |
| 142 | + mutex_unlock(&macsec->lock); |
| 143 | + return NOTIFY_DONE; |
| 144 | + } |
| 145 | + mutex_unlock(&macsec->lock); |
| 146 | + return NOTIFY_OK; |
| 147 | +} |
| 148 | + |
| 149 | +void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev) |
| 150 | +{ |
| 151 | + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { |
| 152 | + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); |
| 153 | + return; |
| 154 | + } |
| 155 | + |
| 156 | + dev->macsec.blocking_events_nb.notifier_call = macsec_event; |
| 157 | + blocking_notifier_chain_register(&dev->mdev->macsec_nh, |
| 158 | + &dev->macsec.blocking_events_nb); |
| 159 | +} |
| 160 | + |
| 161 | +void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev) |
| 162 | +{ |
| 163 | + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { |
| 164 | + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); |
| 165 | + return; |
| 166 | + } |
| 167 | + |
| 168 | + blocking_notifier_chain_unregister(&dev->mdev->macsec_nh, |
| 169 | + &dev->macsec.blocking_events_nb); |
| 170 | +} |
| 171 | + |
| 172 | +int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev) |
| 173 | +{ |
| 174 | + int i, j, max_gids; |
| 175 | + |
| 176 | + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { |
| 177 | + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); |
| 178 | + return 0; |
| 179 | + } |
| 180 | + |
| 181 | + max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size); |
| 182 | + for (i = 0; i < dev->num_ports; i++) { |
| 183 | + dev->port[i].reserved_gids = kcalloc(max_gids, |
| 184 | + sizeof(*dev->port[i].reserved_gids), |
| 185 | + GFP_KERNEL); |
| 186 | + if (!dev->port[i].reserved_gids) |
| 187 | + goto err; |
| 188 | + |
| 189 | + for (j = 0; j < max_gids; j++) |
| 190 | + dev->port[i].reserved_gids[j].macsec_index = -1; |
| 191 | + } |
| 192 | + |
| 193 | + INIT_LIST_HEAD(&dev->macsec.macsec_devices_list); |
| 194 | + mutex_init(&dev->macsec.lock); |
| 195 | + |
| 196 | + return 0; |
| 197 | +err: |
| 198 | + while (i >= 0) { |
| 199 | + kfree(dev->port[i].reserved_gids); |
| 200 | + i--; |
| 201 | + } |
| 202 | + return -ENOMEM; |
| 203 | +} |
| 204 | + |
| 205 | +void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev) |
| 206 | +{ |
| 207 | + int i; |
| 208 | + |
| 209 | + if (!mlx5_is_macsec_roce_supported(dev->mdev)) |
| 210 | + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); |
| 211 | + |
| 212 | + for (i = 0; i < dev->num_ports; i++) |
| 213 | + kfree(dev->port[i].reserved_gids); |
| 214 | + |
| 215 | + mutex_destroy(&dev->macsec.lock); |
| 216 | +} |
| 217 | + |
| 218 | +int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr) |
| 219 | +{ |
| 220 | + struct mlx5_ib_dev *dev = to_mdev(attr->device); |
| 221 | + struct mlx5_macsec_device *macsec_device; |
| 222 | + const struct ib_gid_attr *physical_gid; |
| 223 | + struct mlx5_reserved_gids *mgids; |
| 224 | + struct net_device *ndev; |
| 225 | + int ret = 0; |
| 226 | + union { |
| 227 | + struct sockaddr_in sockaddr_in; |
| 228 | + struct sockaddr_in6 sockaddr_in6; |
| 229 | + } addr; |
| 230 | + |
| 231 | + if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) |
| 232 | + return 0; |
| 233 | + |
| 234 | + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { |
| 235 | + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); |
| 236 | + return 0; |
| 237 | + } |
| 238 | + |
| 239 | + rcu_read_lock(); |
| 240 | + ndev = rcu_dereference(attr->ndev); |
| 241 | + if (!ndev) { |
| 242 | + rcu_read_unlock(); |
| 243 | + return -ENODEV; |
| 244 | + } |
| 245 | + |
| 246 | + if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) { |
| 247 | + rcu_read_unlock(); |
| 248 | + return 0; |
| 249 | + } |
| 250 | + dev_hold(ndev); |
| 251 | + rcu_read_unlock(); |
| 252 | + |
| 253 | + mutex_lock(&dev->macsec.lock); |
| 254 | + macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list); |
| 255 | + if (!macsec_device) { |
| 256 | + ret = -ENOMEM; |
| 257 | + goto dev_err; |
| 258 | + } |
| 259 | + |
| 260 | + physical_gid = rdma_find_gid(attr->device, &attr->gid, |
| 261 | + attr->gid_type, NULL); |
| 262 | + if (!IS_ERR(physical_gid)) { |
| 263 | + ret = set_roce_addr(to_mdev(physical_gid->device), |
| 264 | + physical_gid->port_num, |
| 265 | + physical_gid->index, NULL, |
| 266 | + physical_gid); |
| 267 | + if (ret) |
| 268 | + goto gid_err; |
| 269 | + |
| 270 | + mgids = &dev->port[attr->port_num - 1].reserved_gids[physical_gid->index]; |
| 271 | + mgids->macsec_index = attr->index; |
| 272 | + mgids->physical_gid = physical_gid; |
| 273 | + } |
| 274 | + |
| 275 | + /* Proceed with adding steering rules, regardless if there was gid ambiguity or not.*/ |
| 276 | + rdma_gid2ip((struct sockaddr *)&addr, &attr->gid); |
| 277 | + ret = mlx5_macsec_add_roce_rule(ndev, (struct sockaddr *)&addr, attr->index, |
| 278 | + &macsec_device->tx_rules_list, |
| 279 | + &macsec_device->rx_rules_list, dev->mdev->macsec_fs); |
| 280 | + if (ret && !IS_ERR(physical_gid)) |
| 281 | + goto rule_err; |
| 282 | + |
| 283 | + mlx5_macsec_save_roce_gid(macsec_device, (struct sockaddr *)&addr, attr->index); |
| 284 | + |
| 285 | + dev_put(ndev); |
| 286 | + mutex_unlock(&dev->macsec.lock); |
| 287 | + return ret; |
| 288 | + |
| 289 | +rule_err: |
| 290 | + set_roce_addr(to_mdev(physical_gid->device), physical_gid->port_num, |
| 291 | + physical_gid->index, &physical_gid->gid, physical_gid); |
| 292 | + mgids->macsec_index = -1; |
| 293 | +gid_err: |
| 294 | + rdma_put_gid_attr(physical_gid); |
| 295 | + cleanup_macsec_device(macsec_device); |
| 296 | +dev_err: |
| 297 | + dev_put(ndev); |
| 298 | + mutex_unlock(&dev->macsec.lock); |
| 299 | + return ret; |
| 300 | +} |
| 301 | + |
| 302 | +void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr) |
| 303 | +{ |
| 304 | + struct mlx5_ib_dev *dev = to_mdev(attr->device); |
| 305 | + struct mlx5_macsec_device *macsec_device; |
| 306 | + struct mlx5_reserved_gids *mgids; |
| 307 | + struct net_device *ndev; |
| 308 | + int i, max_gids; |
| 309 | + |
| 310 | + if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) |
| 311 | + return; |
| 312 | + |
| 313 | + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { |
| 314 | + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); |
| 315 | + return; |
| 316 | + } |
| 317 | + |
| 318 | + mgids = &dev->port[attr->port_num - 1].reserved_gids[attr->index]; |
| 319 | + if (mgids->macsec_index != -1) { /* Checking if physical gid has ambiguous IP */ |
| 320 | + rdma_put_gid_attr(mgids->physical_gid); |
| 321 | + mgids->macsec_index = -1; |
| 322 | + return; |
| 323 | + } |
| 324 | + |
| 325 | + rcu_read_lock(); |
| 326 | + ndev = rcu_dereference(attr->ndev); |
| 327 | + if (!ndev) { |
| 328 | + rcu_read_unlock(); |
| 329 | + return; |
| 330 | + } |
| 331 | + |
| 332 | + if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) { |
| 333 | + rcu_read_unlock(); |
| 334 | + return; |
| 335 | + } |
| 336 | + dev_hold(ndev); |
| 337 | + rcu_read_unlock(); |
| 338 | + |
| 339 | + mutex_lock(&dev->macsec.lock); |
| 340 | + max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size); |
| 341 | + for (i = 0; i < max_gids; i++) { /* Checking if macsec gid has ambiguous IP */ |
| 342 | + mgids = &dev->port[attr->port_num - 1].reserved_gids[i]; |
| 343 | + if (mgids->macsec_index == attr->index) { |
| 344 | + const struct ib_gid_attr *physical_gid = mgids->physical_gid; |
| 345 | + |
| 346 | + set_roce_addr(to_mdev(physical_gid->device), |
| 347 | + physical_gid->port_num, |
| 348 | + physical_gid->index, |
| 349 | + &physical_gid->gid, physical_gid); |
| 350 | + |
| 351 | + rdma_put_gid_attr(physical_gid); |
| 352 | + mgids->macsec_index = -1; |
| 353 | + break; |
| 354 | + } |
| 355 | + } |
| 356 | + macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list); |
| 357 | + mlx5_macsec_del_roce_rule(attr->index, dev->mdev->macsec_fs, |
| 358 | + &macsec_device->tx_rules_list, &macsec_device->rx_rules_list); |
| 359 | + mlx5_macsec_del_roce_gid(macsec_device, attr->index); |
| 360 | + cleanup_macsec_device(macsec_device); |
| 361 | + |
| 362 | + dev_put(ndev); |
| 363 | + mutex_unlock(&dev->macsec.lock); |
| 364 | +} |
0 commit comments