2525#include <linux/kmod.h>
2626#include <linux/slab.h>
2727#include <linux/idr.h>
28+ #include <linux/rhashtable.h>
2829#include <net/net_namespace.h>
2930#include <net/sock.h>
3031#include <net/netlink.h>
@@ -365,6 +366,245 @@ static void tcf_chain_flush(struct tcf_chain *chain)
365366 }
366367}
367368
369+ static struct tcf_block * tc_dev_ingress_block (struct net_device * dev )
370+ {
371+ const struct Qdisc_class_ops * cops ;
372+ struct Qdisc * qdisc ;
373+
374+ if (!dev_ingress_queue (dev ))
375+ return NULL ;
376+
377+ qdisc = dev_ingress_queue (dev )-> qdisc_sleeping ;
378+ if (!qdisc )
379+ return NULL ;
380+
381+ cops = qdisc -> ops -> cl_ops ;
382+ if (!cops )
383+ return NULL ;
384+
385+ if (!cops -> tcf_block )
386+ return NULL ;
387+
388+ return cops -> tcf_block (qdisc , TC_H_MIN_INGRESS , NULL );
389+ }
390+
391+ static struct rhashtable indr_setup_block_ht ;
392+
393+ struct tc_indr_block_dev {
394+ struct rhash_head ht_node ;
395+ struct net_device * dev ;
396+ unsigned int refcnt ;
397+ struct list_head cb_list ;
398+ struct tcf_block * block ;
399+ };
400+
401+ struct tc_indr_block_cb {
402+ struct list_head list ;
403+ void * cb_priv ;
404+ tc_indr_block_bind_cb_t * cb ;
405+ void * cb_ident ;
406+ };
407+
408+ static const struct rhashtable_params tc_indr_setup_block_ht_params = {
409+ .key_offset = offsetof(struct tc_indr_block_dev , dev ),
410+ .head_offset = offsetof(struct tc_indr_block_dev , ht_node ),
411+ .key_len = sizeof (struct net_device * ),
412+ };
413+
414+ static struct tc_indr_block_dev *
415+ tc_indr_block_dev_lookup (struct net_device * dev )
416+ {
417+ return rhashtable_lookup_fast (& indr_setup_block_ht , & dev ,
418+ tc_indr_setup_block_ht_params );
419+ }
420+
421+ static struct tc_indr_block_dev * tc_indr_block_dev_get (struct net_device * dev )
422+ {
423+ struct tc_indr_block_dev * indr_dev ;
424+
425+ indr_dev = tc_indr_block_dev_lookup (dev );
426+ if (indr_dev )
427+ goto inc_ref ;
428+
429+ indr_dev = kzalloc (sizeof (* indr_dev ), GFP_KERNEL );
430+ if (!indr_dev )
431+ return NULL ;
432+
433+ INIT_LIST_HEAD (& indr_dev -> cb_list );
434+ indr_dev -> dev = dev ;
435+ indr_dev -> block = tc_dev_ingress_block (dev );
436+ if (rhashtable_insert_fast (& indr_setup_block_ht , & indr_dev -> ht_node ,
437+ tc_indr_setup_block_ht_params )) {
438+ kfree (indr_dev );
439+ return NULL ;
440+ }
441+
442+ inc_ref :
443+ indr_dev -> refcnt ++ ;
444+ return indr_dev ;
445+ }
446+
447+ static void tc_indr_block_dev_put (struct tc_indr_block_dev * indr_dev )
448+ {
449+ if (-- indr_dev -> refcnt )
450+ return ;
451+
452+ rhashtable_remove_fast (& indr_setup_block_ht , & indr_dev -> ht_node ,
453+ tc_indr_setup_block_ht_params );
454+ kfree (indr_dev );
455+ }
456+
457+ static struct tc_indr_block_cb *
458+ tc_indr_block_cb_lookup (struct tc_indr_block_dev * indr_dev ,
459+ tc_indr_block_bind_cb_t * cb , void * cb_ident )
460+ {
461+ struct tc_indr_block_cb * indr_block_cb ;
462+
463+ list_for_each_entry (indr_block_cb , & indr_dev -> cb_list , list )
464+ if (indr_block_cb -> cb == cb &&
465+ indr_block_cb -> cb_ident == cb_ident )
466+ return indr_block_cb ;
467+ return NULL ;
468+ }
469+
470+ static struct tc_indr_block_cb *
471+ tc_indr_block_cb_add (struct tc_indr_block_dev * indr_dev , void * cb_priv ,
472+ tc_indr_block_bind_cb_t * cb , void * cb_ident )
473+ {
474+ struct tc_indr_block_cb * indr_block_cb ;
475+
476+ indr_block_cb = tc_indr_block_cb_lookup (indr_dev , cb , cb_ident );
477+ if (indr_block_cb )
478+ return ERR_PTR (- EEXIST );
479+
480+ indr_block_cb = kzalloc (sizeof (* indr_block_cb ), GFP_KERNEL );
481+ if (!indr_block_cb )
482+ return ERR_PTR (- ENOMEM );
483+
484+ indr_block_cb -> cb_priv = cb_priv ;
485+ indr_block_cb -> cb = cb ;
486+ indr_block_cb -> cb_ident = cb_ident ;
487+ list_add (& indr_block_cb -> list , & indr_dev -> cb_list );
488+
489+ return indr_block_cb ;
490+ }
491+
492+ static void tc_indr_block_cb_del (struct tc_indr_block_cb * indr_block_cb )
493+ {
494+ list_del (& indr_block_cb -> list );
495+ kfree (indr_block_cb );
496+ }
497+
498+ static void tc_indr_block_ing_cmd (struct tc_indr_block_dev * indr_dev ,
499+ struct tc_indr_block_cb * indr_block_cb ,
500+ enum tc_block_command command )
501+ {
502+ struct tc_block_offload bo = {
503+ .command = command ,
504+ .binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS ,
505+ .block = indr_dev -> block ,
506+ };
507+
508+ if (!indr_dev -> block )
509+ return ;
510+
511+ indr_block_cb -> cb (indr_dev -> dev , indr_block_cb -> cb_priv , TC_SETUP_BLOCK ,
512+ & bo );
513+ }
514+
515+ int __tc_indr_block_cb_register (struct net_device * dev , void * cb_priv ,
516+ tc_indr_block_bind_cb_t * cb , void * cb_ident )
517+ {
518+ struct tc_indr_block_cb * indr_block_cb ;
519+ struct tc_indr_block_dev * indr_dev ;
520+ int err ;
521+
522+ indr_dev = tc_indr_block_dev_get (dev );
523+ if (!indr_dev )
524+ return - ENOMEM ;
525+
526+ indr_block_cb = tc_indr_block_cb_add (indr_dev , cb_priv , cb , cb_ident );
527+ err = PTR_ERR_OR_ZERO (indr_block_cb );
528+ if (err )
529+ goto err_dev_put ;
530+
531+ tc_indr_block_ing_cmd (indr_dev , indr_block_cb , TC_BLOCK_BIND );
532+ return 0 ;
533+
534+ err_dev_put :
535+ tc_indr_block_dev_put (indr_dev );
536+ return err ;
537+ }
538+ EXPORT_SYMBOL_GPL (__tc_indr_block_cb_register );
539+
540+ int tc_indr_block_cb_register (struct net_device * dev , void * cb_priv ,
541+ tc_indr_block_bind_cb_t * cb , void * cb_ident )
542+ {
543+ int err ;
544+
545+ rtnl_lock ();
546+ err = __tc_indr_block_cb_register (dev , cb_priv , cb , cb_ident );
547+ rtnl_unlock ();
548+
549+ return err ;
550+ }
551+ EXPORT_SYMBOL_GPL (tc_indr_block_cb_register );
552+
553+ void __tc_indr_block_cb_unregister (struct net_device * dev ,
554+ tc_indr_block_bind_cb_t * cb , void * cb_ident )
555+ {
556+ struct tc_indr_block_cb * indr_block_cb ;
557+ struct tc_indr_block_dev * indr_dev ;
558+
559+ indr_dev = tc_indr_block_dev_lookup (dev );
560+ if (!indr_dev )
561+ return ;
562+
563+ indr_block_cb = tc_indr_block_cb_lookup (indr_dev , cb , cb_ident );
564+ if (!indr_block_cb )
565+ return ;
566+
567+ /* Send unbind message if required to free any block cbs. */
568+ tc_indr_block_ing_cmd (indr_dev , indr_block_cb , TC_BLOCK_UNBIND );
569+ tc_indr_block_cb_del (indr_block_cb );
570+ tc_indr_block_dev_put (indr_dev );
571+ }
572+ EXPORT_SYMBOL_GPL (__tc_indr_block_cb_unregister );
573+
574+ void tc_indr_block_cb_unregister (struct net_device * dev ,
575+ tc_indr_block_bind_cb_t * cb , void * cb_ident )
576+ {
577+ rtnl_lock ();
578+ __tc_indr_block_cb_unregister (dev , cb , cb_ident );
579+ rtnl_unlock ();
580+ }
581+ EXPORT_SYMBOL_GPL (tc_indr_block_cb_unregister );
582+
583+ static void tc_indr_block_call (struct tcf_block * block , struct net_device * dev ,
584+ struct tcf_block_ext_info * ei ,
585+ enum tc_block_command command ,
586+ struct netlink_ext_ack * extack )
587+ {
588+ struct tc_indr_block_cb * indr_block_cb ;
589+ struct tc_indr_block_dev * indr_dev ;
590+ struct tc_block_offload bo = {
591+ .command = command ,
592+ .binder_type = ei -> binder_type ,
593+ .block = block ,
594+ .extack = extack ,
595+ };
596+
597+ indr_dev = tc_indr_block_dev_lookup (dev );
598+ if (!indr_dev )
599+ return ;
600+
601+ indr_dev -> block = command == TC_BLOCK_BIND ? block : NULL ;
602+
603+ list_for_each_entry (indr_block_cb , & indr_dev -> cb_list , list )
604+ indr_block_cb -> cb (dev , indr_block_cb -> cb_priv , TC_SETUP_BLOCK ,
605+ & bo );
606+ }
607+
368608static bool tcf_block_offload_in_use (struct tcf_block * block )
369609{
370610 return block -> offloadcnt ;
@@ -406,12 +646,17 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
406646 err = tcf_block_offload_cmd (block , dev , ei , TC_BLOCK_BIND , extack );
407647 if (err == - EOPNOTSUPP )
408648 goto no_offload_dev_inc ;
409- return err ;
649+ if (err )
650+ return err ;
651+
652+ tc_indr_block_call (block , dev , ei , TC_BLOCK_BIND , extack );
653+ return 0 ;
410654
411655no_offload_dev_inc :
412656 if (tcf_block_offload_in_use (block ))
413657 return - EOPNOTSUPP ;
414658 block -> nooffloaddevcnt ++ ;
659+ tc_indr_block_call (block , dev , ei , TC_BLOCK_BIND , extack );
415660 return 0 ;
416661}
417662
@@ -421,6 +666,8 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
421666 struct net_device * dev = q -> dev_queue -> dev ;
422667 int err ;
423668
669+ tc_indr_block_call (block , dev , ei , TC_BLOCK_UNBIND , NULL );
670+
424671 if (!dev -> netdev_ops -> ndo_setup_tc )
425672 goto no_offload_dev_dec ;
426673 err = tcf_block_offload_cmd (block , dev , ei , TC_BLOCK_UNBIND , NULL );
@@ -2355,6 +2602,11 @@ static int __init tc_filter_init(void)
23552602 if (err )
23562603 goto err_register_pernet_subsys ;
23572604
2605+ err = rhashtable_init (& indr_setup_block_ht ,
2606+ & tc_indr_setup_block_ht_params );
2607+ if (err )
2608+ goto err_rhash_setup_block_ht ;
2609+
23582610 rtnl_register (PF_UNSPEC , RTM_NEWTFILTER , tc_new_tfilter , NULL , 0 );
23592611 rtnl_register (PF_UNSPEC , RTM_DELTFILTER , tc_del_tfilter , NULL , 0 );
23602612 rtnl_register (PF_UNSPEC , RTM_GETTFILTER , tc_get_tfilter ,
@@ -2366,6 +2618,8 @@ static int __init tc_filter_init(void)
23662618
23672619 return 0 ;
23682620
2621+ err_rhash_setup_block_ht :
2622+ unregister_pernet_subsys (& tcf_net_ops );
23692623err_register_pernet_subsys :
23702624 destroy_workqueue (tc_filter_wq );
23712625 return err ;
0 commit comments