-
Notifications
You must be signed in to change notification settings - Fork 2.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
bpf: Do not pre-allocate BPF maps by default #6357
Changes from 4 commits
097638c
d997014
72290e2
1861fc4
4883977
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
GO_BINDATA_SHA1SUM=6c7bb1b20d6162fe92e001fec0d2f573dbfd6409 | ||
GO_BINDATA_SHA1SUM=e690fbe6cd7f96ea3865db6a3935e01f917c9935 | ||
BPF_FILES=../bpf/.gitignore ../bpf/COPYING ../bpf/Makefile ../bpf/Makefile.bpf ../bpf/bpf_features.h ../bpf/bpf_lb.c ../bpf/bpf_lxc.c ../bpf/bpf_netdev.c ../bpf/bpf_overlay.c ../bpf/bpf_xdp.c ../bpf/cilium-map-migrate.c ../bpf/filter_config.h ../bpf/include/bpf/api.h ../bpf/include/elf/elf.h ../bpf/include/elf/gelf.h ../bpf/include/elf/libelf.h ../bpf/include/iproute2/bpf_elf.h ../bpf/include/linux/bpf.h ../bpf/include/linux/bpf_common.h ../bpf/include/linux/byteorder.h ../bpf/include/linux/byteorder/big_endian.h ../bpf/include/linux/byteorder/little_endian.h ../bpf/include/linux/icmp.h ../bpf/include/linux/icmpv6.h ../bpf/include/linux/if_arp.h ../bpf/include/linux/if_ether.h ../bpf/include/linux/in.h ../bpf/include/linux/in6.h ../bpf/include/linux/ioctl.h ../bpf/include/linux/ip.h ../bpf/include/linux/ipv6.h ../bpf/include/linux/perf_event.h ../bpf/include/linux/swab.h ../bpf/include/linux/tcp.h ../bpf/include/linux/type_mapper.h ../bpf/include/linux/udp.h ../bpf/init.sh ../bpf/lib/arp.h ../bpf/lib/common.h ../bpf/lib/conntrack.h ../bpf/lib/csum.h ../bpf/lib/dbg.h ../bpf/lib/drop.h ../bpf/lib/encap.h ../bpf/lib/eps.h ../bpf/lib/eth.h ../bpf/lib/events.h ../bpf/lib/icmp6.h ../bpf/lib/ipv4.h ../bpf/lib/ipv6.h ../bpf/lib/l3.h ../bpf/lib/l4.h ../bpf/lib/lb.h ../bpf/lib/lxc.h ../bpf/lib/maps.h ../bpf/lib/metrics.h ../bpf/lib/nat46.h ../bpf/lib/policy.h ../bpf/lib/trace.h ../bpf/lib/utils.h ../bpf/lib/xdp.h ../bpf/lxc_config.h ../bpf/netdev_config.h ../bpf/node_config.h ../bpf/probes/raw_change_tail.t ../bpf/probes/raw_insn.h ../bpf/probes/raw_invalidate_hash.t ../bpf/probes/raw_lpm_map.t ../bpf/probes/raw_lru_map.t ../bpf/probes/raw_main.c ../bpf/probes/raw_map_val_adj.t ../bpf/probes/raw_mark_map_val.t ../bpf/run_probes.sh ../bpf/sockops/Makefile ../bpf/sockops/bpf_redir.c ../bpf/sockops/bpf_sockops.c ../bpf/sockops/bpf_sockops.h ../bpf/sockops/sockops_config.h ../bpf/spawn_netns.sh |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,4 @@ | ||
// Copyright 2016-2017 Authors of Cilium | ||
// Copyright 2016-2019 Authors of Cilium | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
|
@@ -22,6 +22,7 @@ import ( | |
"github.com/cilium/cilium/pkg/loadbalancer" | ||
"github.com/cilium/cilium/pkg/logging/logfields" | ||
"github.com/cilium/cilium/pkg/maps/lbmap" | ||
"github.com/cilium/cilium/pkg/maps/proxymap" | ||
"github.com/cilium/cilium/pkg/option" | ||
"github.com/cilium/cilium/pkg/service" | ||
|
||
|
@@ -426,6 +427,40 @@ func (d *Daemon) RevNATDump() ([]loadbalancer.L3n4AddrID, error) { | |
return dump, nil | ||
} | ||
|
||
func openServiceMaps() error { | ||
if option.Config.EnableIPv6 { | ||
if _, err := lbmap.Service6Map.OpenOrCreate(); err != nil { | ||
return err | ||
} | ||
if _, err := lbmap.RevNat6Map.OpenOrCreate(); err != nil { | ||
return err | ||
} | ||
if _, err := lbmap.RRSeq6Map.OpenOrCreate(); err != nil { | ||
return err | ||
} | ||
if _, err := proxymap.Proxy6Map.OpenOrCreate(); err != nil { | ||
return err | ||
} | ||
} | ||
|
||
if option.Config.EnableIPv4 { | ||
if _, err := lbmap.Service4Map.OpenOrCreate(); err != nil { | ||
return err | ||
} | ||
if _, err := lbmap.RevNat4Map.OpenOrCreate(); err != nil { | ||
return err | ||
} | ||
if _, err := lbmap.RRSeq4Map.OpenOrCreate(); err != nil { | ||
return err | ||
} | ||
if _, err := proxymap.Proxy4Map.OpenOrCreate(); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Only slight nit I have here is that proxy maps are unrelated to the loadbalancer code, but I think having this code in a separate function is better than how it is currently in master. |
||
return err | ||
} | ||
} | ||
|
||
return nil | ||
} | ||
|
||
func restoreServiceIDs() { | ||
svcMap, _, errors := lbmap.DumpServiceMapsToUserspace(true) | ||
for _, err := range errors { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -70,6 +70,24 @@ data: | |
ct-global-max-entries-tcp: "524288" | ||
ct-global-max-entries-other: "262144" | ||
|
||
# Pre-allocation of map entries allows per-packet latency to be reduced, at | ||
# the expense of up-front memory allocation for the entries in the maps. The | ||
# default value below will minimize memory usage in the default installation; | ||
# users who are sensitive to latency may consider setting this to "true". | ||
# | ||
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore | ||
# this option and behave as though it is set to "true". | ||
# | ||
# If this value is modified, then during the next Cilium startup the restore | ||
# of existing endpoints and tracking of ongoing connections may be disrupted. | ||
# This may lead to policy drops or a change in loadbalancing decisions for a | ||
# connection for some time. Endpoints may need to be recreated to restore | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This implies that the state is not able to be recovered over time? Or is it just that because the applications may get into a bad state themselves vs. in a bad state on the Cilium side? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
EDIT: The above is wrong, that was with In local upgrade testing of just the Cilium component, it surprisingly seems to update pretty seamlessly (admittedly without testing services, which I'd expect to break due to loadbalancing decisions being lost). I suspect that in the upgrade test, because the etcd instance is being redeployed, some more serious problems occur during bootstrap which take some time to get back into a good state. |
||
# connectivity. | ||
# | ||
# If this option is set to "false" during an upgrade from 1.3 or earlier to | ||
# 1.4 or later, then it may cause one-time disruptions during the upgrade. | ||
preallocate-bpf-maps: "false" | ||
|
||
# Regular expression matching compatible Istio sidecar istio-proxy | ||
# container image names | ||
sidecar-istio-proxy-image: "cilium/istio_proxy" | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for putting this into a separate function!