$ fs.file-max = 6000000 # system-wide, needs headroom over target * soft nofile 6000000 # per-process limit that bit us * hard nofile 6000000
fs.file-max = 6000000 # system-wide, needs headroom over target * soft nofile 6000000 # per-process limit that bit us * hard nofile 6000000
fs.file-max = 6000000 # system-wide, needs headroom over target * soft nofile 6000000 # per-process limit that bit us * hard nofile 6000000
net.core.somaxconn = 4096 # completed handshakes waiting for accept() net.ipv4.tcp_max_syn_backlog = 8192 # half-open connections during handshake net.core.netdev_max_backlog = 5000 # NIC queue before kernel processes
net.core.somaxconn = 4096 # completed handshakes waiting for accept() net.ipv4.tcp_max_syn_backlog = 8192 # half-open connections during handshake net.core.netdev_max_backlog = 5000 # NIC queue before kernel processes
net.core.somaxconn = 4096 # completed handshakes waiting for accept() net.ipv4.tcp_max_syn_backlog = 8192 # half-open connections during handshake net.core.netdev_max_backlog = 5000 # NIC queue before kernel processes
net.core.rmem_max = 16777216 # 16MB max recv buffer net.core.wmem_max = 16777216 # 16MB max send buffer net.ipv4.tcp_rmem = 4096 87380 16777216 # min, default, max net.ipv4.tcp_wmem = 4096 65536 16777216
net.core.rmem_max = 16777216 # 16MB max recv buffer net.core.wmem_max = 16777216 # 16MB max send buffer net.ipv4.tcp_rmem = 4096 87380 16777216 # min, default, max net.ipv4.tcp_wmem = 4096 65536 16777216
net.core.rmem_max = 16777216 # 16MB max recv buffer net.core.wmem_max = 16777216 # 16MB max send buffer net.ipv4.tcp_rmem = 4096 87380 16777216 # min, default, max net.ipv4.tcp_wmem = 4096 65536 16777216
// Works at 10K-50K connections // At millions this kills you func acceptConnections(listener net.Listener) { for { conn, err := listener.Accept() // waits for connection if err != nil { log.Printf("error: %v", err) // log these! continue } go handleClient(conn) // goroutine per connection - doesn't scale } } func handleClient(conn net.Conn) { defer conn.Close() // cleanup buf := make([]byte, 4096) // 4KB × 5M = 20GB in buffers alone for { n, err := conn.Read(buf) // yields to epoll internally if err != nil { return } processMessage(buf[:n]) } }
// Works at 10K-50K connections // At millions this kills you func acceptConnections(listener net.Listener) { for { conn, err := listener.Accept() // waits for connection if err != nil { log.Printf("error: %v", err) // log these! continue } go handleClient(conn) // goroutine per connection - doesn't scale } } func handleClient(conn net.Conn) { defer conn.Close() // cleanup buf := make([]byte, 4096) // 4KB × 5M = 20GB in buffers alone for { n, err := conn.Read(buf) // yields to epoll internally if err != nil { return } processMessage(buf[:n]) } }
// Works at 10K-50K connections // At millions this kills you func acceptConnections(listener net.Listener) { for { conn, err := listener.Accept() // waits for connection if err != nil { log.Printf("error: %v", err) // log these! continue } go handleClient(conn) // goroutine per connection - doesn't scale } } func handleClient(conn net.Conn) { defer conn.Close() // cleanup buf := make([]byte, 4096) // 4KB × 5M = 20GB in buffers alone for { n, err := conn.Read(buf) // yields to epoll internally if err != nil { return } processMessage(buf[:n]) } }
// Multi-core scaling with SO_REUSEPORT async fn start_server() -> std::io::Result<()> { let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; socket.set_reuse_port(true)?; // multiple processes same port socket.bind(&"0.0.0.0:8080".parse().unwrap())?; socket.listen(4096)?; // matches somaxconn let listener = TcpListener::from_std(socket.into())?; loop { let (stream, _) = listener.accept().await?; tokio::spawn(async move { /* handle */ }); } }
// Multi-core scaling with SO_REUSEPORT async fn start_server() -> std::io::Result<()> { let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; socket.set_reuse_port(true)?; // multiple processes same port socket.bind(&"0.0.0.0:8080".parse().unwrap())?; socket.listen(4096)?; // matches somaxconn let listener = TcpListener::from_std(socket.into())?; loop { let (stream, _) = listener.accept().await?; tokio::spawn(async move { /* handle */ }); } }
// Multi-core scaling with SO_REUSEPORT async fn start_server() -> std::io::Result<()> { let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; socket.set_reuse_port(true)?; // multiple processes same port socket.bind(&"0.0.0.0:8080".parse().unwrap())?; socket.listen(4096)?; // matches somaxconn let listener = TcpListener::from_std(socket.into())?; loop { let (stream, _) = listener.accept().await?; tokio::spawn(async move { /* handle */ }); } }
At 2M connections: tcp_sock: ~1.5KB → ~3.0GB inet_sock: ~0.7KB → ~1.4GB nf_conntrack: ~0.9KB → ~1.8GB
At 2M connections: tcp_sock: ~1.5KB → ~3.0GB inet_sock: ~0.7KB → ~1.4GB nf_conntrack: ~0.9KB → ~1.8GB
At 2M connections: tcp_sock: ~1.5KB → ~3.0GB inet_sock: ~0.7KB → ~1.4GB nf_conntrack: ~0.9KB → ~1.8GB
iptables -t raw -A PREROUTING -p tcp --dport 8080 -j NOTRACK iptables -t raw -A OUTPUT -p tcp --sport 8080 -j NOTRACK
iptables -t raw -A PREROUTING -p tcp --dport 8080 -j NOTRACK iptables -t raw -A OUTPUT -p tcp --sport 8080 -j NOTRACK
iptables -t raw -A PREROUTING -p tcp --dport 8080 -j NOTRACK iptables -t raw -A OUTPUT -p tcp --sport 8080 -j NOTRACK
net.ipv4.tcp_keepalive_time = 300 # 5min not 2 hours net.ipv4.tcp_keepalive_intvl = 30 net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_time = 300 # 5min not 2 hours net.ipv4.tcp_keepalive_intvl = 30 net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_time = 300 # 5min not 2 hours net.ipv4.tcp_keepalive_intvl = 30 net.ipv4.tcp_keepalive_probes = 3 - 🚀 Follow The Speed Engineer for more Rust, Go and high-performance engineering stories.
- 💡 Like this article? Follow for daily speed-engineering benchmarks and tactics.
- ⚡ Stay ahead in Rust and Go — follow for a fresh article every morning & night.