I am getting 2 types of error in my nginx logs.
Error-1 connect() failed (110: Connection timed out) while connecting to upstream
Error-2 recv() failed (104: Connection reset by peer) while reading response header from upstream
So my upstream is Tomcat7 and Nginx in the frontend.
I don't understand why am I getting this error. As, When I hit tomcat7 on 8080 port it works with a heavy load. But when hitting with high load on Nginx (running on port 80) which proxy's requests to tomcat7 fail with these 2 errors. All the answers I could get was of PHP-FPM but I am not using it. I have no clue how to fix.
EDITED : -
user www-data;
worker_processes auto;
worker_rlimit_nofile 10000;
pid /run/nginx.pid;
events {
worker_connections 2000;
multi_accept on;
use epoll;
}
http {
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
reset_timedout_connection on;
client_body_timeout 200s; # Use 5s for high-traffic sites
client_header_timeout 200s;
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 9000;
keepalive_requests 100000;
types_hash_max_size 2048;
proxy_connect_timeout 16000s;
proxy_send_timeout 16000s;
proxy_read_timeout 16000s;
send_timeout 16000s;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
# configuration file /etc/nginx/sites-enabled/default:
upstream _tomcat-stream-beta {
server 127.0.0.1:8080;
keepalive 500000;
}
server {
#recursive_error_pages on;
listen 80 default_server;
server_name 127.0.0.1;
#docshare Root WebSite
root /usr/share/nginx/www/;
# error_page 500 502 503 504 =200 /api/testing/errorHandle?headers=$http_attributes;
error_log /var/log/nginx/stream.error.log;
access_log /var/log/nginx/stream.access.log;
client_body_in_file_only on;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
large_client_header_buffers 8 64k;
location ~ /.well-known {
allow all;
}
location @tomcat-stream-beta {
proxy_pass http://_tomcat-stream-beta;
}
location ^~ / {
proxy_pass http://_tomcat-stream-beta;
proxy_read_timeout 600000s;
proxy_connect_timeout 600000s;
proxy_send_timeout 600000s;
proxy_ignore_client_abort on;
}
}
I ended up using the following configuration : -
We had to speed up our internal code i.e. code Execution time. Now each request is taking less than 50 ms.
Nginx configuration used is : -
user www-data;
worker_processes auto;
worker_rlimit_nofile 10000;
pid /run/nginx.pid;
events {
worker_connections 2000;
multi_accept on;
use epoll;
}
http {
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
reset_timedout_connection on;
client_body_timeout 200s; # Use 5s for high-traffic sites
client_header_timeout 200s;
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 900;
keepalive_requests 10000;
types_hash_max_size 2048;
#proxy_buffering off;
proxy_connect_timeout 1600;
proxy_send_timeout 1600;
proxy_read_timeout 1600;
send_timeout 1600;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/stream.access.log;
error_log /var/log/nginx/stream.error.log;
gzip on;
gzip_disable "msie6";
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}