Permalink
Switch branches/tags
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
executable file 83 lines (68 sloc) 3.2 KB
# Exempel från podden Sysoparna avsnitt 2, http://sysoparna.com
# Fil: sysctl-webserver.conf som jag brukar slänga in i /etc/sysctl.d för busy siter med
# många korta connections.
# Taget från internet nånstans, men generellt bra värden, även om du får tweaka dem
# för husbehov. Kör den inte rakt av, snälla! Fint att tweaka med lasttestning med mycket
# concurrency.
#Max receive buffer size (8 Mb)
net.core.rmem_max=8388608
# Max send buffer size (8 Mb)
net.core.wmem_max=8388608
# Default receive buffer size
net.core.rmem_default=65536
# Default send buffer size
net.core.wmem_default=65536
# The first value tells the kernel the minimum receive/send buffer for each TCP connection,
# and this buffer is always allocated to a TCP socket,
# even under high pressure on the system. …
# The second value specified tells the kernel the default receive/send buffer
# allocated for each TCP socket. This value overrides the /proc/sys/net/core/rmem_default
# value used by other protocols. … The third and last value specified
# in this variable specifies the maximum receive/send buffer that can be allocated for a TCP socket.
# Note: The kernel will auto tune these values between the min-max range
# If for some reason you wanted to change this behavior, disable net.ipv4.tcp_moderate_rcvbuf
net.ipv4.tcp_rmem=8192 873800 8388608
net.ipv4.tcp_wmem=4096 655360 8388608
# Units are in page size (default page size is 4 kb)
# These are global variables affecting total pages for TCP
# sockets
# 8388608 * 4 = 32 GB
# low pressure high
# When mem allocated by TCP exceeds “pressure”, kernel will put pressure on TCP memory
# We set all these values high to basically prevent any mem pressure from ever occurring
# on our TCP sockets
net.ipv4.tcp_mem=8388608 8388608 8388608
# Increase max number of sockets allowed in TIME_WAIT
net.ipv4.tcp_max_tw_buckets=6000000
# Increase max half-open connections.
net.ipv4.tcp_max_syn_backlog=65536
# Increase max TCP orphans
# These are sockets which have been closed and no longer have a file handle attached to them
net.ipv4.tcp_max_orphans=262144
# Max listen queue backlog
# make sure to increase nginx backlog as well if changed
net.core.somaxconn = 32768
# Max number of packets that can be queued on interface input
# If kernel is receiving packets faster than can be processed
# this queue increases
net.core.netdev_max_backlog = 65536
# Only retry creating TCP connections twice
# Minimize the time it takes for a connection attempt to fail
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
# Timeout closing of TCP connections after 7 seconds
net.ipv4.tcp_fin_timeout = 7
# Avoid falling back to slow start after a connection goes idle
# keeps our cwnd large with the keep alive connections
net.ipv4.tcp_slow_start_after_idle = 0
# increase file handlers.
# Det här borde matcha vad du har i nginx.conf.
fs.file-max = 100000
# Use the full range of ports.
net.ipv4.ip_local_port_range = 1024 65535
# Enables fast recycling of TIME_WAIT sockets.
# (Use with caution according to the kernel documentation!)
net.ipv4.tcp_tw_recycle = 1
# Allow reuse of sockets in TIME_WAIT state for new connections
# only when it is safe from the network stack’s perspective.
net.ipv4.tcp_tw_reuse = 1