add /etc/sysctl.d

This commit is contained in:
odrling 2023-07-30 02:23:16 +02:00
parent 8ac6ae0242
commit 8313a6a714
No known key found for this signature in database
GPG key ID: A0145F975F9F8B75
4 changed files with 125 additions and 1 deletions

View file

@ -1,6 +1,6 @@
.PHONY: all install clean
conf_files := $(wildcard etc/*/*.d/*)
conf_files := $(wildcard etc/*/*.d/*) $(wildcard etc/*.d/*)
conf_targets := $(foreach path,$(conf_files),/$(path))
all:
@ -11,4 +11,5 @@ clean:
rm $(udev_targets) $(conf_targets)
/etc/%: etc/%
mkdir -p $(@D)
cp $< $@

5
etc/sysctl.d/99-fs.conf Normal file
View file

@ -0,0 +1,5 @@
vm.vfs_cache_pressure = 50
vm.swappiness = 10
vm.dirty_background_bytes=511870912
vm.dirty_ratio = 5
fs.file-max = 150000

117
etc/sysctl.d/99-net.conf Normal file
View file

@ -0,0 +1,117 @@
net.ipv4.tcp_fastopen = 3
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.tcp_mtu_probing = 1
# net.ipv4.tcp_sack = 1
net.core.default_qdisc = cake
net.ipv4.tcp_congestion_control = bbr
# content of this file will override /etc/sysctl.d/*
net.ipv4.conf.default.rp_filter = 2
net.ipv4.conf.all.rp_filter = 2
# net.ipv6.conf.all.disable_ipv6 = 1
# net.ipv6.conf.default.disable_ipv6 = 1
# net.ipv6.conf.lo.disable_ipv6 = 1
#net.ipv4.tcp_adv_win_scale = 4
## Increase max number of PIDs
#kernel.pid_max = 4194303
## Increase range of ports that can be used
net.ipv4.ip_local_port_range = 1024 65535
## https://tweaked.io/guide/kernel/
## Forking servers, like PostgreSQL or Apache, scale to much higher levels of concurrent connections if this is made larger
#kernel.sched_migration_cost_ns=5000000
## https://tweaked.io/guide/kernel/
## Various PostgreSQL users have reported (on the postgresql performance mailing list) gains up to 30% on highly concurrent workloads on multi-core systems
##kernel.sched_autogroup_enabled = 0
# https://github.com/ton31337/tools/wiki/tcp_slow_start_after_idle---tcp_no_metrics_save-performance
# Avoid falling back to slow start after a connection goes idle
#net.ipv4.tcp_slow_start_after_idle=0
#net.ipv4.tcp_no_metrics_save=0
# https://github.com/ton31337/tools/wiki/Is-net.ipv4.tcp_abort_on_overflow-good-or-not%3F
net.ipv4.tcp_abort_on_overflow=0
## Enable TCP window scaling (enabled by default)
## https://en.wikipedia.org/wiki/TCP_window_scale_option
net.ipv4.tcp_window_scaling=1
# Enables fast recycling of TIME_WAIT sockets.
# (Use with caution according to the kernel documentation!)
#net.ipv4.tcp_tw_recycle = 1
# Allow reuse of sockets in TIME_WAIT state for new connections
# only when it is safe from the network stacks perspective.
#net.ipv4.tcp_tw_reuse = 1
## Turn on SYN-flood protections
net.ipv4.tcp_syncookies=1
## Only retry creating TCP connections twice
## Minimize the time it takes for a connection attempt to fail
net.ipv4.tcp_syn_retries=2
net.ipv4.tcp_synack_retries=2
net.ipv4.tcp_orphan_retries=2
## How many retries TCP makes on data segments (default 15)
## Some guides suggest to reduce this value
net.ipv4.tcp_retries2=8
## Optimize connection queues
## https://www.linode.com/docs/web-servers/nginx/configure-nginx-for-optimized-performance
## Increase the number of packets that can be queued
net.core.netdev_max_backlog = 3240000
## Max number of "backlogged sockets" (connection requests that can be queued for any given listening socket)
net.core.somaxconn = 50000
## Increase max number of sockets allowed in TIME_WAIT
net.ipv4.tcp_max_tw_buckets = 1440000
## Number of packets to keep in the backlog before the kernel starts dropping them
## A sane value is net.ipv4.tcp_max_syn_backlog = 3240000
net.ipv4.tcp_max_syn_backlog = 3240000
# TCP memory tuning
# View memory TCP actually uses with: cat /proc/net/sockstat
# *** These values are auto-created based on your server specs ***
# *** Edit these parameters with caution because they will use more RAM ***
# Changes suggested by IBM on https://www.ibm.com/developerworks/community/wikis/home?lang=en#!/wiki/Welcome%20to%20High%20Performance%20Computing%20%28HPC%29%20Central/page/Linux%20System%20Tuning%20Recommendations
# Increase the default socket buffer read size (rmem_default) and write size (wmem_default)
# *** Maybe recommended only for high-RAM servers? ***
net.core.rmem_default=16777216
net.core.wmem_default=16777216
# Increase the max socket buffer size (optmem_max), max socket buffer read size (rmem_max), max socket buffer write size (wmem_max)
# 16MB per socket - which sounds like a lot, but will virtually never consume that much
# rmem_max over-rides tcp_rmem param, wmem_max over-rides tcp_wmem param and optmem_max over-rides tcp_mem param
net.core.optmem_max=16777216
net.core.rmem_max=16777216
net.core.wmem_max=16777216
# Configure the Min, Pressure, Max values (units are in page size)
# Useful mostly for very high-traffic websites that have a lot of RAM
# Consider that we already set the *_max values to 16777216
# So you may eventually comment these three lines
net.ipv4.tcp_mem=16777216 16777216 16777216
net.ipv4.tcp_wmem=4096 87380 16777216
net.ipv4.tcp_rmem=4096 87380 16777216
## Keepalive optimizations
## By default, the keepalive routines wait for two hours (7200 secs) before sending the first keepalive probe,
## and then resend it every 75 seconds. If no ACK response is received for 9 consecutive times, the connection is marked as broken.
## The default values are: tcp_keepalive_time = 7200, tcp_keepalive_intvl = 75, tcp_keepalive_probes = 9
## We would decrease the default values for tcp_keepalive_* params as follow:
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 10
net.ipv4.tcp_keepalive_probes = 9
## The TCP FIN timeout belays the amount of time a port must be inactive before it can reused for another connection.
## The default is often 60 seconds, but can normally be safely reduced to 30 or even 15 seconds
## https://www.linode.com/docs/web-servers/nginx/configure-nginx-for-optimized-performance
net.ipv4.tcp_fin_timeout = 15

View file

@ -0,0 +1 @@
kernel.panic = 5