Search code examples
pythonnginxtornadosupervisord

Performance of using Tornado and Nginx is similar to using Tornado only


I tried to improve the performance of my service by using Tornado ( 4 processes ) , Supervisor and Nginx. Once I configured the settings, I used Siege to test the service.

However, the result did not reveal any competitive of using multiple processes. I thought this combination could handle more requests than a single process, but it did not. I still cannot figure out the reason, because it seemed that the Nginx successfully dispatched requests to different Tornado processes.

supervisord.conf:

[group:a]
programs=a-server, a-db

[program:a-server]
command=python2.7 /my/path/to/app/server.py --port=80%(process_num)02d
directory=/my/path/to/app
numprocs=4
process_name=%(program_name)s%(process_num)d
autorestart=true
loglevel=info
redirect_stderr=true
stdout_logfile=/my/path/to/app/server.log
stdout_logfile_maxbytes=50MB
stdout_logfile_backups=10

[program:a-db]
command=mongod --dbpath /path/to/db
numprocs=1
process_name=db
autorestart=true
redirect_stderr=true
stdout_logfile=/my/path/to/app/db.log
stdout_logfile_maxbytes=30MB
stdout_logfile_backups=10

[inet_http_server]
port=127.0.0.1:9001

[supervisord]
logfile=/my/path/to/app/supervisord.log
logfile_maxbytes=50MB
logfile_backups=10
pidfile=/my/path/to/app/supervisord.pid


[supervisorctl]
serverurl=http://127.0.0.1:9001


[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface

Nginx.conf:

worker_processes 1;

error_log /my/path/to/app/nginx.log;
pid /my/path/to/app/nginx.pid;

events {
    worker_connections  1024;
    # Using Mac
    use kqueue;
}

http {
    upstream a {
        server 127.0.0.1:8000;
        server 127.0.0.1:8001;
        server 127.0.0.1:8002;
        server 127.0.0.1:8003;
    }

    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    access_log /my/path/to/app/access.log;

    gzip on;
    gzip_min_length 20;
    gzip_proxied any;
    gzip_types text/plain text/css text/xml application/javascript
               application/x-javascript application/xml
               application/atom+xml text/javascript;

    server {
        listen 80;
        server_name localhost;

        location /nginx_status {
             stub_status on;
             access_log off;
             allow 127.0.0.1;
             deny all;
        }

        location / {
            proxy_pass_header Server;
            proxy_set_header Host $http_host;
            proxy_redirect off;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Scheme $scheme;
            proxy_pass http://a;
        }
    }
}

Please help me to find out the problem, or please tell me how to profile and find the bottleneck! Thank you very much!

ENV: mac book pro, CPU: 2.4 GHz Intel Core 2 Duo.


Solution

  • Some hints:

    # calculated sometimes as 2 * Number of CPUs, sometimes like number of backends + spare e.g.
    # 1 is not enough, is like a Tornado itself
    worker_processes  4;
    
    events {
        worker_connections  19000;  # It's the key to high performance - have a lot of connections available
    }
    
    worker_rlimit_nofile    20000;  # Each connection needs a filehandle (or 2 if you are proxying)
    
    tcp_nopush on;
    tcp_nodelay on;
    sendfile on;
    
    # allow the server to close the connection after a client stops responding. 
    reset_timedout_connection on;
    
    # Accept as many connections as possible, after nginx gets notification about a new connection.
    # May flood worker_connections, if that option is set too low.
    multi_accept on;
    

    Total amount of users you can serve = worker_processes * worker_connections.

    Check also some nginx process limits like nofile, ulimit.

    Check out also Tuning nginx worker_process to obtain 100k hits per min (I've copied a bigger part)