Skip to content
Permalink
Browse files
flow_offload: allow user to offload tc action to net device
Use flow_indr_dev_register/flow_indr_dev_setup_offload to
offload tc action.

We offload the tc action mainly for ovs meter configuration.
Make some basic changes for different vendors to return EOPNOTSUPP.

We need to call tc_cleanup_flow_action to clean up tc action entry since
in tc_setup_action, some actions may hold dev refcnt, especially the mirror
action.

As per review from the RFC, the kernel test robot will fail to run, so
we add CONFIG_NET_CLS_ACT control for the action offload.

Signed-off-by: Baowen Zheng <baowen.zheng@corigine.com>
Signed-off-by: Louis Peens <louis.peens@corigine.com>
Signed-off-by: Simon Horman <simon.horman@corigine.com>
  • Loading branch information
zhengbaowen authored and intel-lab-lkp committed Jul 22, 2021
1 parent c2255ff commit 9228a8efdbf7a736354b87c0db3260dd7d2c4abd
Show file tree
Hide file tree
Showing 9 changed files with 128 additions and 6 deletions.
@@ -1951,7 +1951,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, v
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!bnxt_is_netdev_indr_offload(netdev))
if (!netdev || !bnxt_is_netdev_indr_offload(netdev))
return -EOPNOTSUPP;

switch (type) {
@@ -486,6 +486,9 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
return -EOPNOTSUPP;

switch (type) {
case TC_SETUP_BLOCK:
return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
@@ -1869,6 +1869,9 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
return -EOPNOTSUPP;

if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;

@@ -923,6 +923,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_TBF,
TC_SETUP_QDISC_FIFO,
TC_SETUP_QDISC_HTB,
TC_SETUP_ACT,
};

/* These structures hold the attributes of bpf state that are being passed
@@ -553,6 +553,21 @@ struct flow_cls_offload {
u32 classid;
};

enum flow_act_command {
FLOW_ACT_REPLACE,
FLOW_ACT_DESTROY,
FLOW_ACT_STATS,
};

struct flow_offload_action {
struct netlink_ext_ack *extack;
enum flow_act_command command;
struct flow_stats stats;
struct flow_action action;
};

struct flow_offload_action *flow_action_alloc(unsigned int num_actions);

static inline struct flow_rule *
flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
{
@@ -266,6 +266,9 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
for (; 0; (void)(i), (void)(a), (void)(exts))
#endif

#define tcf_act_for_each_action(i, a, actions) \
for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)

static inline void
tcf_exts_stats_update(const struct tcf_exts *exts,
u64 bytes, u64 packets, u64 drops, u64 lastuse,
@@ -536,8 +539,19 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
return ifindex == skb->skb_iif;
}

#ifdef CONFIG_NET_CLS_ACT
int tc_setup_flow_action(struct flow_action *flow_action,
const struct tcf_exts *exts);
#else
static inline int tc_setup_flow_action(struct flow_action *flow_action,
const struct tcf_exts *exts)
{
return 0;
}
#endif

int tc_setup_action(struct flow_action *flow_action,
struct tc_action *actions[]);
void tc_cleanup_flow_action(struct flow_action *flow_action);

int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
@@ -558,6 +572,7 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
enum tc_setup_type type, void *type_data,
void *cb_priv, u32 *flags, unsigned int *in_hw_count);
unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
unsigned int tcf_act_num_actions(struct tc_action *actions[]);

#ifdef CONFIG_NET_CLS_ACT
int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
@@ -27,6 +27,27 @@ struct flow_rule *flow_rule_alloc(unsigned int num_actions)
}
EXPORT_SYMBOL(flow_rule_alloc);

struct flow_offload_action *flow_action_alloc(unsigned int num_actions)
{
struct flow_offload_action *fl_action;
int i;

fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
GFP_KERNEL);
if (!fl_action)
return NULL;

fl_action->action.num_entries = num_actions;
/* Pre-fill each action hw_stats with DONT_CARE.
* Caller can override this if it wants stats for a given action.
*/
for (i = 0; i < num_actions; i++)
fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;

return fl_action;
}
EXPORT_SYMBOL(flow_action_alloc);

#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
const struct flow_match *__m = &(__rule)->match; \
struct flow_dissector *__d = (__m)->dissector; \
@@ -476,6 +497,9 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,

mutex_unlock(&flow_indr_block_lock);

return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
if (bo)
return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
else
return 0;
}
EXPORT_SYMBOL(flow_indr_dev_setup_offload);
@@ -1060,6 +1060,36 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
return ERR_PTR(err);
}

/* offload the tc command after inserted */
int tcf_action_offload_cmd(struct tc_action *actions[],
struct netlink_ext_ack *extack)
{
struct flow_offload_action *fl_act;
int err = 0;

fl_act = flow_action_alloc(tcf_act_num_actions(actions));
if (!fl_act)
return -ENOMEM;

fl_act->extack = extack;
err = tc_setup_action(&fl_act->action, actions);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to setup tc actions for offload\n");
goto err_out;
}
fl_act->command = FLOW_ACT_REPLACE;

flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT, fl_act, NULL, NULL);

tc_cleanup_flow_action(&fl_act->action);

err_out:
kfree(fl_act);
return err;
}
EXPORT_SYMBOL(tcf_action_offload_cmd);

/* Returns numbers of initialized actions or negative error. */

int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
@@ -1514,6 +1544,9 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
return ret;
ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);

/* offload actions to hardware if possible */
tcf_action_offload_cmd(actions, extack);

/* only put existing actions */
for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
if (init_res[i] == ACT_P_CREATED)
@@ -3544,8 +3544,8 @@ static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
return hw_stats;
}

int tc_setup_flow_action(struct flow_action *flow_action,
const struct tcf_exts *exts)
int tc_setup_action(struct flow_action *flow_action,
struct tc_action *actions[])
{
struct tc_action *act;
int i, j, k, err = 0;
@@ -3554,11 +3554,11 @@ int tc_setup_flow_action(struct flow_action *flow_action,
BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);

if (!exts)
if (!actions)
return 0;

j = 0;
tcf_exts_for_each_action(i, act, exts) {
tcf_act_for_each_action(i, act, actions) {
struct flow_action_entry *entry;

entry = &flow_action->entries[j];
@@ -3725,7 +3725,19 @@ int tc_setup_flow_action(struct flow_action *flow_action,
spin_unlock_bh(&act->tcfa_lock);
goto err_out;
}
EXPORT_SYMBOL(tc_setup_action);

#ifdef CONFIG_NET_CLS_ACT
int tc_setup_flow_action(struct flow_action *flow_action,
const struct tcf_exts *exts)
{
if (!exts)
return 0;

return tc_setup_action(flow_action, exts->actions);
}
EXPORT_SYMBOL(tc_setup_flow_action);
#endif

unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
{
@@ -3743,6 +3755,22 @@ unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
}
EXPORT_SYMBOL(tcf_exts_num_actions);

unsigned int tcf_act_num_actions(struct tc_action *actions[])
{
unsigned int num_acts = 0;
struct tc_action *act;
int i;

tcf_act_for_each_action(i, act, actions) {
if (is_tcf_pedit(act))
num_acts += tcf_pedit_nkeys(act);
else
num_acts++;
}
return num_acts;
}
EXPORT_SYMBOL(tcf_act_num_actions);

#ifdef CONFIG_NET_CLS_ACT
static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
u32 *p_block_index,

0 comments on commit 9228a8e

Please sign in to comment.