Skip to content

Commit

Permalink
net: Fix offloading indirect devices dependency on qdisc order creation
Browse files Browse the repository at this point in the history
[ Upstream commit 74fc4f8 ]

Currently, when creating an ingress qdisc on an indirect device before
the driver registered for callbacks, the driver will not have a chance
to register its filter configuration callbacks.

To fix that, modify the code such that it keeps track of all the ingress
qdiscs that call flow_indr_dev_setup_offload(). When a driver calls
flow_indr_dev_register(),  go through the list of tracked ingress qdiscs
and call the driver callback entry point so as to give it a chance to
register its callback.

Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Eli Cohen <elic@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
elic307i authored and gregkh committed Sep 18, 2021
1 parent 881d24d commit 6628eef
Show file tree
Hide file tree
Showing 5 changed files with 92 additions and 1 deletion.
1 change: 1 addition & 0 deletions include/net/flow_offload.h
Expand Up @@ -451,6 +451,7 @@ struct flow_block_offload {
struct list_head *driver_block_list;
struct netlink_ext_ack *extack;
struct Qdisc *sch;
struct list_head *cb_list_head;
};

enum tc_setup_type;
Expand Down
89 changes: 88 additions & 1 deletion net/core/flow_offload.c
Expand Up @@ -321,6 +321,7 @@ EXPORT_SYMBOL(flow_block_cb_setup_simple);
static DEFINE_MUTEX(flow_indr_block_lock);
static LIST_HEAD(flow_block_indr_list);
static LIST_HEAD(flow_block_indr_dev_list);
static LIST_HEAD(flow_indir_dev_list);

struct flow_indr_dev {
struct list_head list;
Expand All @@ -346,6 +347,33 @@ static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
return indr_dev;
}

struct flow_indir_dev_info {
void *data;
struct net_device *dev;
struct Qdisc *sch;
enum tc_setup_type type;
void (*cleanup)(struct flow_block_cb *block_cb);
struct list_head list;
enum flow_block_command command;
enum flow_block_binder_type binder_type;
struct list_head *cb_list;
};

static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
{
struct flow_block_offload bo;
struct flow_indir_dev_info *cur;

list_for_each_entry(cur, &flow_indir_dev_list, list) {
memset(&bo, 0, sizeof(bo));
bo.command = cur->command;
bo.binder_type = cur->binder_type;
INIT_LIST_HEAD(&bo.cb_list);
cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
list_splice(&bo.cb_list, cur->cb_list);
}
}

int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
{
struct flow_indr_dev *indr_dev;
Expand All @@ -367,6 +395,7 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
}

list_add(&indr_dev->list, &flow_block_indr_dev_list);
existing_qdiscs_register(cb, cb_priv);
mutex_unlock(&flow_indr_block_lock);

return 0;
Expand Down Expand Up @@ -463,14 +492,72 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
}
EXPORT_SYMBOL(flow_indr_block_cb_alloc);

int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
static struct flow_indir_dev_info *find_indir_dev(void *data)
{
struct flow_indir_dev_info *cur;

list_for_each_entry(cur, &flow_indir_dev_list, list) {
if (cur->data == data)
return cur;
}
return NULL;
}

static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
struct flow_block_offload *bo)
{
struct flow_indir_dev_info *info;

info = find_indir_dev(data);
if (info)
return -EEXIST;

info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;

info->data = data;
info->dev = dev;
info->sch = sch;
info->type = type;
info->cleanup = cleanup;
info->command = bo->command;
info->binder_type = bo->binder_type;
info->cb_list = bo->cb_list_head;

list_add(&info->list, &flow_indir_dev_list);
return 0;
}

static int indir_dev_remove(void *data)
{
struct flow_indir_dev_info *info;

info = find_indir_dev(data);
if (!info)
return -ENOENT;

list_del(&info->list);

kfree(info);
return 0;
}

int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb))
{
struct flow_indr_dev *this;

mutex_lock(&flow_indr_block_lock);

if (bo->command == FLOW_BLOCK_BIND)
indir_dev_add(data, dev, sch, type, cleanup, bo);
else if (bo->command == FLOW_BLOCK_UNBIND)
indir_dev_remove(data);

list_for_each_entry(this, &flow_block_indr_dev_list, list)
this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);

Expand Down
1 change: 1 addition & 0 deletions net/netfilter/nf_flow_table_offload.c
Expand Up @@ -1097,6 +1097,7 @@ static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack;
bo->cb_list_head = &flowtable->flow_block.cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}

Expand Down
1 change: 1 addition & 0 deletions net/netfilter/nf_tables_offload.c
Expand Up @@ -353,6 +353,7 @@ static void nft_flow_block_offload_init(struct flow_block_offload *bo,
bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack;
bo->cb_list_head = &basechain->flow_block.cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}

Expand Down
1 change: 1 addition & 0 deletions net/sched/cls_api.c
Expand Up @@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
bo->block_shared = shared;
bo->extack = extack;
bo->sch = sch;
bo->cb_list_head = &flow_block->cb_list;
INIT_LIST_HEAD(&bo->cb_list);
}

Expand Down

0 comments on commit 6628eef

Please sign in to comment.