From 103f96103f8ca514734cf2c49a25267fc9d603e7 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 16 Mar 2020 23:49:29 +0100 Subject: [PATCH] bpf: mark all potentially unused parameters with __maybe_unused This allows to enable extra warnings/errors with -Wextra when compiling bpf programs. Signed-off-by: Tobias Klauser --- bpf/Makefile.bpf | 2 +- bpf/bpf_netdev.c | 14 +++++++------ bpf/bpf_sock.c | 30 +++++++++++++++------------- bpf/include/bpf/ctx/skb.h | 2 +- bpf/include/bpf/ctx/xdp.h | 2 +- bpf/lib/common.h | 3 ++- bpf/lib/dbg.h | 22 +++++++++++++-------- bpf/lib/drop.h | 7 ++++--- bpf/lib/encap.h | 3 ++- bpf/lib/l3.h | 3 ++- bpf/lib/lb.h | 40 +++++++++++++++++++++----------------- bpf/lib/lxc.h | 12 ++++++++---- bpf/lib/nat.h | 18 +++++++++++------ bpf/lib/overloadable_skb.h | 6 +++--- bpf/lib/overloadable_xdp.h | 24 ++++++++++++----------- bpf/lib/trace.h | 20 +++++++++++++------ 16 files changed, 124 insertions(+), 84 deletions(-) diff --git a/bpf/Makefile.bpf b/bpf/Makefile.bpf index 371f60e7b108..f19e6fa65e9c 100644 --- a/bpf/Makefile.bpf +++ b/bpf/Makefile.bpf @@ -3,7 +3,7 @@ FLAGS := -I/usr/include/$(shell uname -m)-linux-gnu -I$(ROOT_DIR)/bpf/include -I CLANG_FLAGS := ${FLAGS} -target bpf -emit-llvm # eBPF verifier enforces unaligned access checks where necessary, so don't # let clang complain too early. -CLANG_FLAGS += -Wall -Werror -Wno-address-of-packed-member -Wno-unknown-warning-option +CLANG_FLAGS += -Wall -Wextra -Werror -Wno-address-of-packed-member -Wno-unknown-warning-option LLC_FLAGS := -march=bpf -mcpu=probe -mattr=dwarfris LIB := $(shell find $(ROOT_DIR)/bpf -name '*.h') diff --git a/bpf/bpf_netdev.c b/bpf/bpf_netdev.c index c58542f686a2..0befa27f87dd 100644 --- a/bpf/bpf_netdev.c +++ b/bpf/bpf_netdev.c @@ -54,7 +54,8 @@ static __always_inline int rewrite_dmac_to_host(struct __ctx_buff *ctx, #endif #if defined ENABLE_IPV4 || defined ENABLE_IPV6 -static __always_inline __u32 finalize_sec_ctx(__u32 secctx, __u32 src_identity) +static __always_inline +__u32 finalize_sec_ctx(__u32 secctx, __u32 src_identity __maybe_unused) { #ifdef ENABLE_SECCTX_FROM_IPCACHE /* If we could not derive the secctx from the packet itself but @@ -69,7 +70,7 @@ static __always_inline __u32 finalize_sec_ctx(__u32 secctx, __u32 src_identity) #endif #ifdef ENABLE_IPV6 -static __always_inline __u32 derive_sec_ctx(struct __ctx_buff *ctx, +static __always_inline __u32 derive_sec_ctx(struct __ctx_buff *ctx __maybe_unused, const union v6addr *node_ip, struct ipv6hdr *ip6, __u32 *identity) { @@ -261,8 +262,9 @@ int tail_handle_ipv6(struct __ctx_buff *ctx) #endif /* ENABLE_IPV6 */ #ifdef ENABLE_IPV4 -static __always_inline __u32 derive_ipv4_sec_ctx(struct __ctx_buff *ctx, - struct iphdr *ip4) +static __always_inline +__u32 derive_ipv4_sec_ctx(struct __ctx_buff *ctx __maybe_unused, + struct iphdr *ip4 __maybe_unused) { #ifdef FIXED_SRC_SECCTX return FIXED_SRC_SECCTX; @@ -596,7 +598,7 @@ static __always_inline int do_netdev_encrypt_encap(struct __ctx_buff *ctx) return __encap_and_redirect_with_nodeid(ctx, tunnel_endpoint, seclabel, TRACE_PAYLOAD_LEN); } -static __always_inline int do_netdev_encrypt(struct __ctx_buff *ctx, __u16 proto) +static __always_inline int do_netdev_encrypt(struct __ctx_buff *ctx, __u16 proto __maybe_unused) { return do_netdev_encrypt_encap(ctx); } @@ -695,7 +697,7 @@ int from_netdev(struct __ctx_buff *ctx) } __section("to-netdev") -int to_netdev(struct __ctx_buff *ctx) +int to_netdev(struct __ctx_buff *ctx __maybe_unused) { /* Cannot compile the section out entriely, test/bpf/verifier-test.sh * workaround. diff --git a/bpf/bpf_sock.c b/bpf/bpf_sock.c index ab11cb31a246..4fa70248aaa9 100644 --- a/bpf/bpf_sock.c +++ b/bpf/bpf_sock.c @@ -167,10 +167,11 @@ static __always_inline int sock4_update_revnat(struct bpf_sock_addr *ctx, &rval, 0); } #else -static __always_inline int sock4_update_revnat(struct bpf_sock_addr *ctx, - struct lb4_backend *backend, - struct lb4_key *lkey, - struct lb4_service *slave_svc) +static __always_inline +int sock4_update_revnat(struct bpf_sock_addr *ctx __maybe_unused, + struct lb4_backend *backend __maybe_unused, + struct lb4_key *lkey __maybe_unused, + struct lb4_service *slave_svc __maybe_unused) { return -1; } @@ -197,8 +198,8 @@ static __always_inline bool sock4_skip_xlate(struct lb4_service *svc, } static __always_inline -struct lb4_service *sock4_nodeport_wildcard_lookup(struct lb4_key *key, - const bool include_remote_hosts) +struct lb4_service *sock4_nodeport_wildcard_lookup(struct lb4_key *key __maybe_unused, + const bool include_remote_hosts __maybe_unused) { #ifdef ENABLE_NODEPORT struct remote_endpoint_info *info; @@ -489,10 +490,11 @@ static __always_inline int sock6_update_revnat(struct bpf_sock_addr *ctx, &rval, 0); } #else -static __always_inline int sock6_update_revnat(struct bpf_sock_addr *ctx, - struct lb6_backend *backend, - struct lb6_key *lkey, - struct lb6_service *slave_svc) +static __always_inline +int sock6_update_revnat(struct bpf_sock_addr *ctx __maybe_unused, + struct lb6_backend *backend __maybe_unused, + struct lb6_key *lkey __maybe_unused, + struct lb6_service *slave_svc __maybe_unused) { return -1; } @@ -510,7 +512,7 @@ static __always_inline void ctx_get_v6_address(struct bpf_sock_addr *ctx, #ifdef ENABLE_NODEPORT static __always_inline void ctx_get_v6_src_address(struct bpf_sock *ctx, - union v6addr *addr) + union v6addr *addr) { addr->p1 = ctx->src_ip6[0]; addr->p2 = ctx->src_ip6[1]; @@ -549,7 +551,8 @@ sock6_skip_xlate(struct lb6_service *svc, union v6addr *address) } static __always_inline __maybe_unused struct lb6_service * -sock6_nodeport_wildcard_lookup(struct lb6_key *key, bool include_remote_hosts) +sock6_nodeport_wildcard_lookup(struct lb6_key *key __maybe_unused, + bool include_remote_hosts __maybe_unused) { #ifdef ENABLE_NODEPORT struct remote_endpoint_info *info; @@ -581,7 +584,8 @@ sock6_nodeport_wildcard_lookup(struct lb6_key *key, bool include_remote_hosts) #endif /* ENABLE_NODEPORT */ } -static __always_inline int sock6_xlate_v4_in_v6(struct bpf_sock_addr *ctx) +static __always_inline +int sock6_xlate_v4_in_v6(struct bpf_sock_addr *ctx __maybe_unused) { #ifdef ENABLE_IPV4 struct bpf_sock_addr fake_ctx; diff --git a/bpf/include/bpf/ctx/skb.h b/bpf/include/bpf/ctx/skb.h index d58749d4260d..a172a5d5770c 100644 --- a/bpf/include/bpf/ctx/skb.h +++ b/bpf/include/bpf/ctx/skb.h @@ -47,7 +47,7 @@ #define ctx_adjust_meta ({ -ENOTSUPP; }) static __always_inline __maybe_unused int -ctx_redirect(struct __sk_buff *ctx, int ifindex, __u32 flags) +ctx_redirect(struct __sk_buff *ctx __maybe_unused, int ifindex, __u32 flags) { return redirect(ifindex, flags); } diff --git a/bpf/include/bpf/ctx/xdp.h b/bpf/include/bpf/ctx/xdp.h index 6de8cd5fa644..114ba2b340c5 100644 --- a/bpf/include/bpf/ctx/xdp.h +++ b/bpf/include/bpf/ctx/xdp.h @@ -51,7 +51,7 @@ xdp_load_bytes(struct xdp_md *ctx, __u64 off, void *to, const __u64 len) static __always_inline __maybe_unused int xdp_store_bytes(struct xdp_md *ctx, __u64 off, const void *from, - const __u64 len, __u64 flags) + const __u64 len, __u64 flags __maybe_unused) { void *to; int ret; diff --git a/bpf/lib/common.h b/bpf/lib/common.h index dcaeb1dab099..467c65c07067 100644 --- a/bpf/lib/common.h +++ b/bpf/lib/common.h @@ -625,7 +625,8 @@ struct ct_state { __u16 backend_id; /* Backend ID in lb4_backends */ }; -static __always_inline int redirect_peer(int ifindex, __u32 flags) +static __always_inline int redirect_peer(int ifindex __maybe_unused, + __u32 flags __maybe_unused) { /* If our datapath has proper redirect support, we make use * of it here, otherwise we terminate tc processing by letting diff --git a/bpf/lib/dbg.h b/bpf/lib/dbg.h index 62acd27e5045..5fa89a9d8896 100644 --- a/bpf/lib/dbg.h +++ b/bpf/lib/dbg.h @@ -216,23 +216,29 @@ static __always_inline void cilium_dbg_capture(struct __ctx_buff *ctx, __u8 type # define printk(fmt, ...) \ do { } while (0) -static __always_inline void cilium_dbg(struct __ctx_buff *ctx, __u8 type, - __u32 arg1, __u32 arg2) +static __always_inline +void cilium_dbg(struct __ctx_buff *ctx __maybe_unused, __u8 type __maybe_unused, + __u32 arg1 __maybe_unused, __u32 arg2 __maybe_unused) { } -static __always_inline void cilium_dbg3(struct __ctx_buff *ctx, __u8 type, - __u32 arg1, __u32 arg2, __u32 arg3) +static __always_inline +void cilium_dbg3(struct __ctx_buff *ctx __maybe_unused, + __u8 type __maybe_unused, __u32 arg1 __maybe_unused, + __u32 arg2 __maybe_unused, __u32 arg3 __maybe_unused) { } -static __always_inline void cilium_dbg_capture(struct __ctx_buff *ctx, - __u8 type, __u32 arg1) +static __always_inline +void cilium_dbg_capture(struct __ctx_buff *ctx __maybe_unused, + __u8 type __maybe_unused, __u32 arg1 __maybe_unused) { } -static __always_inline void cilium_dbg_capture2(struct __ctx_buff *ctx, __u8 type, - __u32 arg1, __u32 arg2) +static __always_inline +void cilium_dbg_capture2(struct __ctx_buff *ctx __maybe_unused, + __u8 type __maybe_unused, __u32 arg1 __maybe_unused, + __u32 arg2 __maybe_unused) { } diff --git a/bpf/lib/drop.h b/bpf/lib/drop.h index a81f72984653..7b65f292c269 100644 --- a/bpf/lib/drop.h +++ b/bpf/lib/drop.h @@ -89,9 +89,10 @@ static __always_inline int send_drop_notify(struct __ctx_buff *ctx, __u32 src, return exitcode; } #else -static __always_inline int send_drop_notify(struct __ctx_buff *ctx, __u32 src, - __u32 dst, __u32 dst_id, int reason, - int exitcode, __u8 direction) +static __always_inline +int send_drop_notify(struct __ctx_buff *ctx,__u32 src __maybe_unused, + __u32 dst __maybe_unused, __u32 dst_id __maybe_unused, + int reason, int exitcode, __u8 direction) { update_metrics(ctx_full_len(ctx), direction, -reason); return exitcode; diff --git a/bpf/lib/encap.h b/bpf/lib/encap.h index cfdcbe333ff6..6f1468b74faa 100644 --- a/bpf/lib/encap.h +++ b/bpf/lib/encap.h @@ -53,7 +53,8 @@ encap_and_redirect_ipsec(struct __ctx_buff *ctx, __u32 tunnel_endpoint, #endif /* ENABLE_IPSEC */ static __always_inline int -encap_remap_v6_host_address(struct __ctx_buff *ctx, const bool egress) +encap_remap_v6_host_address(struct __ctx_buff *ctx __maybe_unused, + const bool egress __maybe_unused) { #ifdef ENABLE_ENCAP_HOST_REMAP struct csum_offset csum = {}; diff --git a/bpf/lib/l3.h b/bpf/lib/l3.h index b0769a9108b8..d371a59b1621 100644 --- a/bpf/lib/l3.h +++ b/bpf/lib/l3.h @@ -97,7 +97,8 @@ static __always_inline int ipv6_local_delivery(struct __ctx_buff *ctx, int l3_of static __always_inline int ipv4_local_delivery(struct __ctx_buff *ctx, int l3_off, __u32 seclabel, struct iphdr *ip4, - struct endpoint_info *ep, __u8 direction) + struct endpoint_info *ep, + __u8 direction __maybe_unused) { int ret; diff --git a/bpf/lib/lb.h b/bpf/lib/lb.h index 99fa5b3264f8..4290138c54d2 100644 --- a/bpf/lib/lb.h +++ b/bpf/lib/lb.h @@ -86,7 +86,8 @@ struct bpf_elf_map __section_maps LB4_BACKEND_MAP = { #define cilium_dbg_lb(a, b, c, d) #endif -static __always_inline bool lb4_svc_is_nodeport(const struct lb4_service *svc) +static __always_inline +bool lb4_svc_is_nodeport(const struct lb4_service *svc __maybe_unused) { #ifdef ENABLE_NODEPORT return svc->nodeport; @@ -95,7 +96,8 @@ static __always_inline bool lb4_svc_is_nodeport(const struct lb4_service *svc) #endif /* ENABLE_NODEPORT */ } -static __always_inline bool lb6_svc_is_nodeport(const struct lb6_service *svc) +static __always_inline +bool lb6_svc_is_nodeport(const struct lb6_service *svc __maybe_unused) { #ifdef ENABLE_NODEPORT return svc->nodeport; @@ -104,7 +106,8 @@ static __always_inline bool lb6_svc_is_nodeport(const struct lb6_service *svc) #endif /* ENABLE_NODEPORT */ } -static __always_inline bool lb4_svc_is_external_ip(const struct lb4_service *svc) +static __always_inline +bool lb4_svc_is_external_ip(const struct lb4_service *svc __maybe_unused) { #ifdef ENABLE_EXTERNAL_IP return svc->external; @@ -113,7 +116,8 @@ static __always_inline bool lb4_svc_is_external_ip(const struct lb4_service *svc #endif } -static __always_inline bool lb6_svc_is_external_ip(const struct lb6_service *svc) +static __always_inline +bool lb6_svc_is_external_ip(const struct lb6_service *svc __maybe_unused) { #ifdef ENABLE_EXTERNAL_IP return svc->external; @@ -278,9 +282,9 @@ static __always_inline int lb6_rev_nat(struct __ctx_buff *ctx, int l4_off, * - DROP_UNKNOWN_L4 if packet should be ignore (sent to stack) * - Negative error code */ -static __always_inline int lb6_extract_key(struct __ctx_buff *ctx, +static __always_inline int lb6_extract_key(struct __ctx_buff *ctx __maybe_unused, struct ipv6_ct_tuple *tuple, - int l4_off, + int l4_off __maybe_unused, struct lb6_key *key, struct csum_offset *csum_off, int dir) @@ -332,7 +336,7 @@ struct lb6_service *__lb6_lookup_service(struct lb6_key *key) } static __always_inline -struct lb6_service *lb6_lookup_service(struct __ctx_buff *ctx, +struct lb6_service *lb6_lookup_service(struct __ctx_buff *ctx __maybe_unused, struct lb6_key *key) { struct lb6_service *svc = __lb6_lookup_service(key); @@ -350,7 +354,7 @@ static __always_inline struct lb6_backend *__lb6_lookup_backend(__u16 backend_id } static __always_inline struct lb6_backend * -lb6_lookup_backend(struct __ctx_buff *ctx, __u16 backend_id) +lb6_lookup_backend(struct __ctx_buff *ctx __maybe_unused, __u16 backend_id) { struct lb6_backend *backend; @@ -369,7 +373,7 @@ struct lb6_service *__lb6_lookup_slave(struct lb6_key *key) } static __always_inline -struct lb6_service *lb6_lookup_slave(struct __ctx_buff *ctx, +struct lb6_service *lb6_lookup_slave(struct __ctx_buff *ctx __maybe_unused, struct lb6_key *key, __u16 slave) { struct lb6_service *svc; @@ -524,19 +528,19 @@ static __always_inline int lb6_local(void *map, struct __ctx_buff *ctx, * additional map management. */ static __always_inline -struct lb6_service *__lb6_lookup_service(struct lb6_key *key) +struct lb6_service *__lb6_lookup_service(struct lb6_key *key __maybe_unused) { return NULL; } static __always_inline -struct lb6_service *__lb6_lookup_slave(struct lb6_key *key) +struct lb6_service *__lb6_lookup_slave(struct lb6_key *key __maybe_unused) { return NULL; } static __always_inline struct lb6_backend * -__lb6_lookup_backend(__u16 backend_id) +__lb6_lookup_backend(__u16 backend_id __maybe_unused) { return NULL; } @@ -649,9 +653,9 @@ static __always_inline int lb4_rev_nat(struct __ctx_buff *ctx, int l3_off, int l * - DROP_UNKNOWN_L4 if packet should be ignore (sent to stack) * - Negative error code */ -static __always_inline int lb4_extract_key(struct __ctx_buff *ctx, +static __always_inline int lb4_extract_key(struct __ctx_buff *ctx __maybe_unused, struct ipv4_ct_tuple *tuple, - int l4_off, + int l4_off __maybe_unused, struct lb4_key *key, struct csum_offset *csum_off, int dir) @@ -701,7 +705,7 @@ struct lb4_service *__lb4_lookup_service(struct lb4_key *key) } static __always_inline -struct lb4_service *lb4_lookup_service(struct __ctx_buff *ctx, +struct lb4_service *lb4_lookup_service(struct __ctx_buff *ctx __maybe_unused, struct lb4_key *key) { struct lb4_service *svc = __lb4_lookup_service(key); @@ -718,7 +722,7 @@ static __always_inline struct lb4_backend *__lb4_lookup_backend(__u16 backend_id } static __always_inline struct lb4_backend * -lb4_lookup_backend(struct __ctx_buff *ctx, __u16 backend_id) +lb4_lookup_backend(struct __ctx_buff *ctx __maybe_unused, __u16 backend_id) { struct lb4_backend *backend; @@ -737,8 +741,8 @@ struct lb4_service *__lb4_lookup_slave(struct lb4_key *key) } static __always_inline -struct lb4_service *lb4_lookup_slave(struct __ctx_buff *ctx, - struct lb4_key *key, __u16 slave) +struct lb4_service *lb4_lookup_slave(struct __ctx_buff *ctx __maybe_unused, + struct lb4_key *key, __u16 slave) { struct lb4_service *svc; diff --git a/bpf/lib/lxc.h b/bpf/lib/lxc.h index e4f4dc9aa803..d3387525a9f2 100644 --- a/bpf/lib/lxc.h +++ b/bpf/lib/lxc.h @@ -17,7 +17,8 @@ #define TEMPLATE_LXC_ID 0xffff #ifndef DISABLE_SIP_VERIFICATION -static __always_inline int is_valid_lxc_src_ip(struct ipv6hdr *ip6) +static __always_inline +int is_valid_lxc_src_ip(struct ipv6hdr *ip6 __maybe_unused) { #ifdef ENABLE_IPV6 union v6addr valid = {}; @@ -30,7 +31,8 @@ static __always_inline int is_valid_lxc_src_ip(struct ipv6hdr *ip6) #endif } -static __always_inline int is_valid_lxc_src_ipv4(struct iphdr *ip4) +static __always_inline +int is_valid_lxc_src_ipv4(struct iphdr *ip4 __maybe_unused) { #ifdef ENABLE_IPV4 return ip4->saddr == LXC_IPV4; @@ -40,12 +42,14 @@ static __always_inline int is_valid_lxc_src_ipv4(struct iphdr *ip4) #endif } #else -static __always_inline int is_valid_lxc_src_ip(struct ipv6hdr *ip6) +static __always_inline +int is_valid_lxc_src_ip(struct ipv6hdr *ip6 __maybe_unused) { return 1; } -static __always_inline int is_valid_lxc_src_ipv4(struct iphdr *ip4) +static __always_inline +int is_valid_lxc_src_ipv4(struct iphdr *ip4 __maybe_unused) { return 1; } diff --git a/bpf/lib/nat.h b/bpf/lib/nat.h index 6803c49cc1bc..7160f84a170f 100644 --- a/bpf/lib/nat.h +++ b/bpf/lib/nat.h @@ -545,13 +545,16 @@ static __always_inline int snat_v4_process(struct __ctx_buff *ctx, int dir, snat_v4_rewrite_ingress(ctx, &tuple, state, off); } #else -static __always_inline __maybe_unused int snat_v4_process(struct __ctx_buff *ctx, int dir, - const struct ipv4_nat_target *target) +static __always_inline __maybe_unused +int snat_v4_process(struct __ctx_buff *ctx __maybe_unused, + int dir __maybe_unused, + const struct ipv4_nat_target *target __maybe_unused) { return CTX_ACT_OK; } -static __always_inline __maybe_unused void snat_v4_delete_tuples(struct ipv4_ct_tuple *tuple) +static __always_inline __maybe_unused +void snat_v4_delete_tuples(struct ipv4_ct_tuple *tuple __maybe_unused) { } #endif @@ -1012,13 +1015,16 @@ static __always_inline int snat_v6_process(struct __ctx_buff *ctx, int dir, snat_v6_rewrite_ingress(ctx, &tuple, state, off); } #else -static __always_inline __maybe_unused int snat_v6_process(struct __ctx_buff *ctx, int dir, - const struct ipv6_nat_target *target) +static __always_inline __maybe_unused +int snat_v6_process(struct __ctx_buff *ctx __maybe_unused, + int dir __maybe_unused, + const struct ipv6_nat_target *target __maybe_unused) { return CTX_ACT_OK; } -static __always_inline void snat_v6_delete_tuples(struct ipv6_ct_tuple *tuple) +static __always_inline +void snat_v6_delete_tuples(struct ipv6_ct_tuple *tuple __maybe_unused) { } #endif diff --git a/bpf/lib/overloadable_skb.h b/bpf/lib/overloadable_skb.h index 9d95749b20f1..8e4dcd9af36d 100644 --- a/bpf/lib/overloadable_skb.h +++ b/bpf/lib/overloadable_skb.h @@ -79,7 +79,7 @@ redirect_self(struct __sk_buff *ctx) } static __always_inline __maybe_unused void -ctx_skip_nodeport_clear(struct __sk_buff *ctx) +ctx_skip_nodeport_clear(struct __sk_buff *ctx __maybe_unused) { #ifdef ENABLE_NODEPORT ctx->tc_index &= ~TC_INDEX_F_SKIP_NODEPORT; @@ -87,7 +87,7 @@ ctx_skip_nodeport_clear(struct __sk_buff *ctx) } static __always_inline __maybe_unused void -ctx_skip_nodeport_set(struct __sk_buff *ctx) +ctx_skip_nodeport_set(struct __sk_buff *ctx __maybe_unused) { #ifdef ENABLE_NODEPORT ctx->tc_index |= TC_INDEX_F_SKIP_NODEPORT; @@ -95,7 +95,7 @@ ctx_skip_nodeport_set(struct __sk_buff *ctx) } static __always_inline __maybe_unused bool -ctx_skip_nodeport(struct __sk_buff *ctx) +ctx_skip_nodeport(struct __sk_buff *ctx __maybe_unused) { #ifdef ENABLE_NODEPORT volatile __u32 tc_index = ctx->tc_index; diff --git a/bpf/lib/overloadable_xdp.h b/bpf/lib/overloadable_xdp.h index 7b44f2e2828b..33ae1f5bb078 100644 --- a/bpf/lib/overloadable_xdp.h +++ b/bpf/lib/overloadable_xdp.h @@ -5,43 +5,45 @@ #define __LIB_OVERLOADABLE_XDP_H_ static __always_inline __maybe_unused void -bpf_clear_cb(struct xdp_md *ctx) +bpf_clear_cb(struct xdp_md *ctx __maybe_unused) { } static __always_inline __maybe_unused int -get_identity(struct xdp_md *ctx) +get_identity(struct xdp_md *ctx __maybe_unused) { return 0; } static __always_inline __maybe_unused void -set_encrypt_dip(struct xdp_md *ctx, __u32 ip_endpoint) +set_encrypt_dip(struct xdp_md *ctx __maybe_unused, + __u32 ip_endpoint __maybe_unused) { } static __always_inline __maybe_unused void -set_identity(struct xdp_md *ctx, __u32 identity) +set_identity(struct xdp_md *ctx __maybe_unused, __u32 identity __maybe_unused) { } static __always_inline __maybe_unused void -set_identity_cb(struct xdp_md *ctx, __u32 identity) +set_identity_cb(struct xdp_md *ctx __maybe_unused, + __u32 identity __maybe_unused) { } static __always_inline __maybe_unused void -set_encrypt_key(struct xdp_md *ctx, __u8 key) +set_encrypt_key(struct xdp_md *ctx __maybe_unused, __u8 key __maybe_unused) { } static __always_inline __maybe_unused void -set_encrypt_key_cb(struct xdp_md *ctx, __u8 key) +set_encrypt_key_cb(struct xdp_md *ctx __maybe_unused, __u8 key __maybe_unused) { } static __always_inline __maybe_unused int -redirect_self(struct xdp_md *ctx) +redirect_self(struct xdp_md *ctx __maybe_unused) { #ifdef ENABLE_HOST_REDIRECT return XDP_TX; @@ -53,7 +55,7 @@ redirect_self(struct xdp_md *ctx) #define RECIRC_MARKER 5 static __always_inline __maybe_unused void -ctx_skip_nodeport_clear(struct xdp_md *ctx) +ctx_skip_nodeport_clear(struct xdp_md *ctx __maybe_unused) { #ifdef ENABLE_NODEPORT ctx_store_meta(ctx, RECIRC_MARKER, 0); @@ -61,7 +63,7 @@ ctx_skip_nodeport_clear(struct xdp_md *ctx) } static __always_inline __maybe_unused void -ctx_skip_nodeport_set(struct xdp_md *ctx) +ctx_skip_nodeport_set(struct xdp_md *ctx __maybe_unused) { #ifdef ENABLE_NODEPORT ctx_store_meta(ctx, RECIRC_MARKER, 1); @@ -69,7 +71,7 @@ ctx_skip_nodeport_set(struct xdp_md *ctx) } static __always_inline __maybe_unused bool -ctx_skip_nodeport(struct xdp_md *ctx) +ctx_skip_nodeport(struct xdp_md *ctx __maybe_unused) { #ifdef ENABLE_NODEPORT return ctx_load_meta(ctx, RECIRC_MARKER); diff --git a/bpf/lib/trace.h b/bpf/lib/trace.h index 5b648391acd9..9732ad28e214 100644 --- a/bpf/lib/trace.h +++ b/bpf/lib/trace.h @@ -259,22 +259,30 @@ send_trace_notify6(struct __ctx_buff *ctx, __u8 obs_point, __u32 src, __u32 dst, } #else static __always_inline void -send_trace_notify(struct __ctx_buff *ctx, __u8 obs_point, __u32 src, __u32 dst, - __u16 dst_id, __u32 ifindex, __u8 reason, __u32 monitor) +send_trace_notify(struct __ctx_buff *ctx, __u8 obs_point, + __u32 src __maybe_unused, __u32 dst __maybe_unused, + __u16 dst_id __maybe_unused, __u32 ifindex __maybe_unused, + __u8 reason, __u32 monitor __maybe_unused) { update_trace_metrics(ctx, obs_point, reason); } static __always_inline void -send_trace_notify4(struct __ctx_buff *ctx, __u8 obs_point, __u32 src, __u32 dst, __be32 orig_addr, - __u16 dst_id, __u32 ifindex, __u8 reason, __u32 monitor) +send_trace_notify4(struct __ctx_buff *ctx, __u8 obs_point, + __u32 src __maybe_unused, __u32 dst __maybe_unused, + __be32 orig_addr __maybe_unused, __u16 dst_id __maybe_unused, + __u32 ifindex __maybe_unused, __u8 reason, + __u32 monitor __maybe_unused) { update_trace_metrics(ctx, obs_point, reason); } static __always_inline void -send_trace_notify6(struct __ctx_buff *ctx, __u8 obs_point, __u32 src, __u32 dst, union v6addr *orig_addr, - __u16 dst_id, __u32 ifindex, __u8 reason, __u32 monitor) +send_trace_notify6(struct __ctx_buff *ctx, __u8 obs_point, + __u32 src __maybe_unused, __u32 dst __maybe_unused, + union v6addr *orig_addr __maybe_unused, + __u16 dst_id __maybe_unused, __u32 ifindex __maybe_unused, + __u8 reason, __u32 monitor __maybe_unused) { update_trace_metrics(ctx, obs_point, reason); }