Skip to content

Commit

Permalink
bpf: rename variables with camel-case names
Browse files Browse the repository at this point in the history
We run checkpatch.pl on the commits touching the bpf/ code, and it
complains when edits contain camel-case variable names -- even if they
were not introduced by the commit. We could disable this report in the
script we use to call checkpatch.pl, but at the same time we _do_ want
it to catch the introduction of new camel-case names.

Let's fix the existing variable names once and for all, to avoid
contributors to get surprised by the reports.

The number of variables is rather low: srcID, dstID, localID, remoteID,
and newEntries.

Signed-off-by: Quentin Monnet <quentin@isovalent.com>
  • Loading branch information
qmonnet committed Jul 15, 2021
1 parent c95f6fd commit d5a5b44
Show file tree
Hide file tree
Showing 6 changed files with 78 additions and 78 deletions.
30 changes: 15 additions & 15 deletions bpf/bpf_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ handle_ipv4(struct __ctx_buff *ctx, __u32 secctx,
__u32 ipcache_srcid __maybe_unused, const bool from_host)
{
struct remote_endpoint_info *info = NULL;
__u32 __maybe_unused remoteID = 0;
__u32 __maybe_unused remote_id = 0;
__u32 __maybe_unused monitor = 0;
struct ipv4_ct_tuple tuple = {};
bool skip_redirect = false;
Expand Down Expand Up @@ -508,7 +508,7 @@ handle_ipv4(struct __ctx_buff *ctx, __u32 secctx,
return ret;
} else if (!ctx_skip_host_fw(ctx)) {
/* We're on the ingress path of the native device. */
ret = ipv4_host_policy_ingress(ctx, &remoteID);
ret = ipv4_host_policy_ingress(ctx, &remote_id);
if (IS_ERR(ret))
return ret;
}
Expand Down Expand Up @@ -1084,12 +1084,12 @@ int to_host(struct __ctx_buff *ctx)
__u16 __maybe_unused proto = 0;
int ret = CTX_ACT_OK;
bool traced = false;
__u32 srcID = 0;
__u32 src_id = 0;

if ((magic & MARK_MAGIC_HOST_MASK) == MARK_MAGIC_ENCRYPT) {
ctx->mark = magic; /* CB_ENCRYPT_MAGIC */
srcID = ctx_load_meta(ctx, CB_ENCRYPT_IDENTITY);
set_identity_mark(ctx, srcID);
src_id = ctx_load_meta(ctx, CB_ENCRYPT_IDENTITY);
set_identity_mark(ctx, src_id);
} else if ((magic & 0xFFFF) == MARK_MAGIC_TO_PROXY) {
/* Upper 16 bits may carry proxy port number */
__be16 port = magic >> 16;
Expand All @@ -1114,7 +1114,7 @@ int to_host(struct __ctx_buff *ctx)
#endif

if (!traced)
send_trace_notify(ctx, TRACE_TO_STACK, srcID, 0, 0,
send_trace_notify(ctx, TRACE_TO_STACK, src_id, 0, 0,
CILIUM_IFINDEX, ret, 0);

#ifdef ENABLE_HOST_FIREWALL
Expand All @@ -1133,12 +1133,12 @@ int to_host(struct __ctx_buff *ctx)
# endif
# ifdef ENABLE_IPV6
case bpf_htons(ETH_P_IPV6):
ret = ipv6_host_policy_ingress(ctx, &srcID);
ret = ipv6_host_policy_ingress(ctx, &src_id);
break;
# endif
# ifdef ENABLE_IPV4
case bpf_htons(ETH_P_IP):
ret = ipv4_host_policy_ingress(ctx, &srcID);
ret = ipv4_host_policy_ingress(ctx, &src_id);
break;
# endif
default:
Expand All @@ -1151,7 +1151,7 @@ int to_host(struct __ctx_buff *ctx)

out:
if (IS_ERR(ret))
return send_drop_notify_error(ctx, srcID, ret, CTX_ACT_DROP,
return send_drop_notify_error(ctx, src_id, ret, CTX_ACT_DROP,
METRIC_INGRESS);

return ret;
Expand All @@ -1163,12 +1163,12 @@ declare_tailcall_if(__or(__and(is_defined(ENABLE_IPV4), is_defined(ENABLE_IPV6))
is_defined(DEBUG)), CILIUM_CALL_IPV6_TO_HOST_POLICY_ONLY)
int tail_ipv6_host_policy_ingress(struct __ctx_buff *ctx)
{
__u32 srcID = 0;
__u32 src_id = 0;
int ret;

ret = ipv6_host_policy_ingress(ctx, &srcID);
ret = ipv6_host_policy_ingress(ctx, &src_id);
if (IS_ERR(ret))
return send_drop_notify_error(ctx, srcID, ret, CTX_ACT_DROP,
return send_drop_notify_error(ctx, src_id, ret, CTX_ACT_DROP,
METRIC_INGRESS);
return ret;
}
Expand All @@ -1179,12 +1179,12 @@ declare_tailcall_if(__or(__and(is_defined(ENABLE_IPV4), is_defined(ENABLE_IPV6))
is_defined(DEBUG)), CILIUM_CALL_IPV4_TO_HOST_POLICY_ONLY)
int tail_ipv4_host_policy_ingress(struct __ctx_buff *ctx)
{
__u32 srcID = 0;
__u32 src_id = 0;
int ret;

ret = ipv4_host_policy_ingress(ctx, &srcID);
ret = ipv4_host_policy_ingress(ctx, &src_id);
if (IS_ERR(ret))
return send_drop_notify_error(ctx, srcID, ret, CTX_ACT_DROP,
return send_drop_notify_error(ctx, src_id, ret, CTX_ACT_DROP,
METRIC_INGRESS);
return ret;
}
Expand Down
64 changes: 32 additions & 32 deletions bpf/bpf_lxc.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ encode_custom_prog_meta(struct __ctx_buff *ctx, int ret, __u32 identity)
static __always_inline int ipv6_l3_from_lxc(struct __ctx_buff *ctx,
struct ipv6_ct_tuple *tuple,
int l3_off, struct ipv6hdr *ip6,
__u32 *dstID)
__u32 *dst_id)
{
#ifdef ENABLE_ROUTING
union macaddr router_mac = NODE_MAC;
Expand Down Expand Up @@ -190,7 +190,7 @@ static __always_inline int ipv6_l3_from_lxc(struct __ctx_buff *ctx,

info = lookup_ip6_remote_endpoint(&orig_dip);
if (info != NULL && info->sec_label) {
*dstID = info->sec_label;
*dst_id = info->sec_label;
tunnel_endpoint = info->tunnel_endpoint;
encrypt_key = get_min_encrypt_key(info->key);
#ifdef ENABLE_WIREGUARD
Expand All @@ -200,11 +200,11 @@ static __always_inline int ipv6_l3_from_lxc(struct __ctx_buff *ctx,
dst_remote_ep = true;
#endif /* ENABLE_WIREGUARD */
} else {
*dstID = WORLD_ID;
*dst_id = WORLD_ID;
}

cilium_dbg(ctx, info ? DBG_IP_ID_MAP_SUCCEED6 : DBG_IP_ID_MAP_FAILED6,
orig_dip.p4, *dstID);
orig_dip.p4, *dst_id);
}

/* When an endpoint connects to itself via service clusterIP, we need
Expand All @@ -220,11 +220,11 @@ static __always_inline int ipv6_l3_from_lxc(struct __ctx_buff *ctx,
* within the cluster, it must match policy or be dropped. If it's
* bound for the host/outside, perform the CIDR policy check.
*/
verdict = policy_can_egress6(ctx, tuple, SECLABEL, *dstID,
verdict = policy_can_egress6(ctx, tuple, SECLABEL, *dst_id,
&policy_match_type, &audited);

if (ret != CT_REPLY && ret != CT_RELATED && verdict < 0) {
send_policy_verdict_notify(ctx, *dstID, tuple->dport,
send_policy_verdict_notify(ctx, *dst_id, tuple->dport,
tuple->nexthdr, POLICY_EGRESS, 1,
verdict, policy_match_type, audited);
return verdict;
Expand All @@ -234,7 +234,7 @@ static __always_inline int ipv6_l3_from_lxc(struct __ctx_buff *ctx,
switch (ret) {
case CT_NEW:
if (!hairpin_flow)
send_policy_verdict_notify(ctx, *dstID, tuple->dport,
send_policy_verdict_notify(ctx, *dst_id, tuple->dport,
tuple->nexthdr, POLICY_EGRESS, 1,
verdict, policy_match_type, audited);
ct_recreate6:
Expand All @@ -253,7 +253,7 @@ static __always_inline int ipv6_l3_from_lxc(struct __ctx_buff *ctx,

case CT_REOPENED:
if (!hairpin_flow)
send_policy_verdict_notify(ctx, *dstID, tuple->dport,
send_policy_verdict_notify(ctx, *dst_id, tuple->dport,
tuple->nexthdr, POLICY_EGRESS, 1,
verdict, policy_match_type, audited);
case CT_ESTABLISHED:
Expand Down Expand Up @@ -344,7 +344,7 @@ static __always_inline int ipv6_l3_from_lxc(struct __ctx_buff *ctx,
/* If the destination is the local host and per-endpoint routes are
* enabled, jump to the bpf_host program to enforce ingress host policies.
*/
if (*dstID == HOST_ID) {
if (*dst_id == HOST_ID) {
ctx_store_meta(ctx, CB_FROM_HOST, 0);
tail_call_static(ctx, &POLICY_CALL_MAP, HOST_EP_ID);
return DROP_MISSED_TAIL_CALL;
Expand Down Expand Up @@ -397,7 +397,7 @@ static __always_inline int ipv6_l3_from_lxc(struct __ctx_buff *ctx,

#ifdef ENABLE_ROUTING
to_host:
if (is_defined(ENABLE_HOST_FIREWALL) && *dstID == HOST_ID) {
if (is_defined(ENABLE_HOST_FIREWALL) && *dst_id == HOST_ID) {
send_trace_notify(ctx, TRACE_TO_HOST, SECLABEL, HOST_ID, 0,
HOST_IFINDEX, reason, monitor);
return redirect(HOST_IFINDEX, BPF_F_INGRESS);
Expand Down Expand Up @@ -443,15 +443,15 @@ static __always_inline int ipv6_l3_from_lxc(struct __ctx_buff *ctx,
#ifdef TUNNEL_MODE
encrypt_to_stack:
#endif
send_trace_notify(ctx, TRACE_TO_STACK, SECLABEL, *dstID, 0, 0,
send_trace_notify(ctx, TRACE_TO_STACK, SECLABEL, *dst_id, 0, 0,
reason, monitor);

cilium_dbg_capture(ctx, DBG_CAPTURE_DELIVERY, 0);

return CTX_ACT_OK;
}

static __always_inline int handle_ipv6(struct __ctx_buff *ctx, __u32 *dstID)
static __always_inline int handle_ipv6(struct __ctx_buff *ctx, __u32 *dst_id)
{
struct ipv6_ct_tuple tuple = {};
void *data, *data_end;
Expand All @@ -476,22 +476,22 @@ static __always_inline int handle_ipv6(struct __ctx_buff *ctx, __u32 *dstID)

/* Perform L3 action on the frame */
tuple.nexthdr = ip6->nexthdr;
return ipv6_l3_from_lxc(ctx, &tuple, ETH_HLEN, ip6, dstID);
return ipv6_l3_from_lxc(ctx, &tuple, ETH_HLEN, ip6, dst_id);
}

declare_tailcall_if(__or(__and(is_defined(ENABLE_IPV4), is_defined(ENABLE_IPV6)),
is_defined(DEBUG)), CILIUM_CALL_IPV6_FROM_LXC)
int tail_handle_ipv6(struct __ctx_buff *ctx)
{
__u32 dstID = 0;
int ret = handle_ipv6(ctx, &dstID);
__u32 dst_id = 0;
int ret = handle_ipv6(ctx, &dst_id);

if (IS_ERR(ret))
return send_drop_notify(ctx, SECLABEL, dstID, 0, ret,
return send_drop_notify(ctx, SECLABEL, dst_id, 0, ret,
CTX_ACT_DROP, METRIC_EGRESS);

#ifdef ENABLE_CUSTOM_CALLS
if (!encode_custom_prog_meta(ctx, ret, dstID)) {
if (!encode_custom_prog_meta(ctx, ret, dst_id)) {
tail_call_static(ctx, &CUSTOM_CALLS_MAP,
CUSTOM_CALLS_IDX_IPV6_EGRESS);
update_metrics(ctx_full_len(ctx), METRIC_EGRESS,
Expand All @@ -505,7 +505,7 @@ int tail_handle_ipv6(struct __ctx_buff *ctx)

#ifdef ENABLE_IPV4
static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx,
__u32 *dstID)
__u32 *dst_id)
{
struct ipv4_ct_tuple tuple = {};
#ifdef ENABLE_ROUTING
Expand Down Expand Up @@ -615,7 +615,7 @@ static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx,

info = lookup_ip4_remote_endpoint(orig_dip);
if (info != NULL && info->sec_label) {
*dstID = info->sec_label;
*dst_id = info->sec_label;
tunnel_endpoint = info->tunnel_endpoint;
encrypt_key = get_min_encrypt_key(info->key);
#ifdef ENABLE_WIREGUARD
Expand All @@ -631,11 +631,11 @@ static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx,
dst_remote_ep = true;
#endif /* ENABLE_WIREGUARD */
} else {
*dstID = WORLD_ID;
*dst_id = WORLD_ID;
}

cilium_dbg(ctx, info ? DBG_IP_ID_MAP_SUCCEED4 : DBG_IP_ID_MAP_FAILED4,
orig_dip, *dstID);
orig_dip, *dst_id);
}

/* When an endpoint connects to itself via service clusterIP, we need
Expand All @@ -651,11 +651,11 @@ static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx,
* within the cluster, it must match policy or be dropped. If it's
* bound for the host/outside, perform the CIDR policy check.
*/
verdict = policy_can_egress4(ctx, &tuple, SECLABEL, *dstID,
verdict = policy_can_egress4(ctx, &tuple, SECLABEL, *dst_id,
&policy_match_type, &audited);

if (ret != CT_REPLY && ret != CT_RELATED && verdict < 0) {
send_policy_verdict_notify(ctx, *dstID, tuple.dport,
send_policy_verdict_notify(ctx, *dst_id, tuple.dport,
tuple.nexthdr, POLICY_EGRESS, 0,
verdict, policy_match_type, audited);
return verdict;
Expand All @@ -665,7 +665,7 @@ static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx,
switch (ret) {
case CT_NEW:
if (!hairpin_flow)
send_policy_verdict_notify(ctx, *dstID, tuple.dport,
send_policy_verdict_notify(ctx, *dst_id, tuple.dport,
tuple.nexthdr, POLICY_EGRESS, 0,
verdict, policy_match_type, audited);
ct_recreate4:
Expand All @@ -686,7 +686,7 @@ static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx,

case CT_REOPENED:
if (!hairpin_flow)
send_policy_verdict_notify(ctx, *dstID, tuple.dport,
send_policy_verdict_notify(ctx, *dst_id, tuple.dport,
tuple.nexthdr, POLICY_EGRESS, 0,
verdict, policy_match_type, audited);
case CT_ESTABLISHED:
Expand Down Expand Up @@ -781,7 +781,7 @@ static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx,
/* If the destination is the local host and per-endpoint routes are
* enabled, jump to the bpf_host program to enforce ingress host policies.
*/
if (*dstID == HOST_ID) {
if (*dst_id == HOST_ID) {
ctx_store_meta(ctx, CB_FROM_HOST, 0);
tail_call_static(ctx, &POLICY_CALL_MAP, HOST_EP_ID);
return DROP_MISSED_TAIL_CALL;
Expand Down Expand Up @@ -848,7 +848,7 @@ static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx,

#ifdef ENABLE_ROUTING
to_host:
if (is_defined(ENABLE_HOST_FIREWALL) && *dstID == HOST_ID) {
if (is_defined(ENABLE_HOST_FIREWALL) && *dst_id == HOST_ID) {
send_trace_notify(ctx, TRACE_TO_HOST, SECLABEL, HOST_ID, 0,
HOST_IFINDEX, reason, monitor);
return redirect(HOST_IFINDEX, BPF_F_INGRESS);
Expand Down Expand Up @@ -891,7 +891,7 @@ static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx,
#if defined(TUNNEL_MODE) || defined(ENABLE_EGRESS_GATEWAY)
encrypt_to_stack:
#endif
send_trace_notify(ctx, TRACE_TO_STACK, SECLABEL, *dstID, 0, 0,
send_trace_notify(ctx, TRACE_TO_STACK, SECLABEL, *dst_id, 0, 0,
reason, monitor);
cilium_dbg_capture(ctx, DBG_CAPTURE_DELIVERY, 0);
return CTX_ACT_OK;
Expand All @@ -901,15 +901,15 @@ declare_tailcall_if(__or(__and(is_defined(ENABLE_IPV4), is_defined(ENABLE_IPV6))
is_defined(DEBUG)), CILIUM_CALL_IPV4_FROM_LXC)
int tail_handle_ipv4(struct __ctx_buff *ctx)
{
__u32 dstID = 0;
int ret = handle_ipv4_from_lxc(ctx, &dstID);
__u32 dst_id = 0;
int ret = handle_ipv4_from_lxc(ctx, &dst_id);

if (IS_ERR(ret))
return send_drop_notify(ctx, SECLABEL, dstID, 0, ret,
return send_drop_notify(ctx, SECLABEL, dst_id, 0, ret,
CTX_ACT_DROP, METRIC_EGRESS);

#ifdef ENABLE_CUSTOM_CALLS
if (!encode_custom_prog_meta(ctx, ret, dstID)) {
if (!encode_custom_prog_meta(ctx, ret, dst_id)) {
tail_call_static(ctx, &CUSTOM_CALLS_MAP,
CUSTOM_CALLS_IDX_IPV4_EGRESS);
update_metrics(ctx_full_len(ctx), METRIC_EGRESS,
Expand Down
8 changes: 4 additions & 4 deletions bpf/lib/metrics.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
static __always_inline void update_metrics(__u64 bytes, __u8 direction,
__u8 reason)
{
struct metrics_value *entry, newEntry = {};
struct metrics_value *entry, new_entry = {};
struct metrics_key key = {};

key.reason = reason;
Expand All @@ -36,9 +36,9 @@ static __always_inline void update_metrics(__u64 bytes, __u8 direction,
entry->count += 1;
entry->bytes += bytes;
} else {
newEntry.count = 1;
newEntry.bytes = bytes;
map_update_elem(&METRICS_MAP, &key, &newEntry, 0);
new_entry.count = 1;
new_entry.bytes = bytes;
map_update_elem(&METRICS_MAP, &key, &new_entry, 0);
}
}

Expand Down

0 comments on commit d5a5b44

Please sign in to comment.