nginx uwsgi websockets 502 Bad Gateway upstream 在从上游读取响应 header 时过早关闭连接

nginx uwsgi websockets 502 Bad Gateway upstream prematurely closed connection while reading response header from upstream

几天来我一直在这个问题上苦思冥想,终于碰壁了。

我一直在努力让我的筹码达到 运行:

http://django-websocket-redis.readthedocs.org/en/latest/running.html#django-with-websockets-for-redis-behind-nginx-using-uwsgi

我一直在看其他一些像这样的 SO 文章:

他们似乎遇到了我遇到的类似问题,但解决方案对我不起作用。

基本上,每当我尝试启动我的 uWSGI 进程时,我总是遇到 nginx 502 错误的网关屏幕。根据文档中的说明,我有两个单独的 uwsgi 进程 运行ning。

当我 运行 websocket uwsgi 实例时,我得到以下信息:

*** running gevent loop engine [addr:0x487690] ***
[2015-05-27 00:45:34,119 wsgi_server] DEBUG: Subscribed to channels: subscribe-broadcast, publish-broadcast

这告诉我 uwsgi 实例 运行 没问题。 然后我 运行 我的下一个 uwsgi 进程也没有错误日志...

当我在浏览器中导航到页面时,页面会挂起几秒钟,然后出现 502 Bad Gateway Screen。

根据 NGINX 日志,NGINX 说:

2015/05/26 22:46:08 [error] 18044#0: *3855 upstream prematurely closed connection while reading response header from upstream, client: 192.168.59.3, server: , request: "GET /chat/ HTTP/1.1", upstream: "uwsgi://unix:/opt/django/django.sock:", host: "192.168.59.103:32768"

这是我尝试在网络浏览器中访问该页面时收到的唯一错误日志。

大家有什么想法吗???

下面是我的一些配置文件:


nginx.conf

user www-data;
worker_processes 4;
pid /run/nginx.pid;

events {
    worker_connections 768;
}

http {

    ##
    # Basic Settings
    ##

    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    types_hash_max_size 2048;
    # server_tokens off;

    # server_names_hash_bucket_size 64;
    # server_name_in_redirect off;

    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    ##
    # SSL Settings
    ##

    ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
    ssl_prefer_server_ciphers on;

    ##
    # Logging Settings
    ##

    access_log /var/log/nginx/access.log;
    error_log /var/log/nginx/error.log;

    ##
    # Gzip Settings
    ##

    gzip on;
    gzip_disable "msie6";

    # gzip_vary on;
    # gzip_proxied any;
    # gzip_comp_level 6;
    # gzip_buffers 16 8k;
    # gzip_http_version 1.1;
    # gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;

    ##
    # Virtual Host Configs
    ##

    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/django.conf;
}

我有以下 django.conf 文件,它扩展了 nginx.conf

upstream django {
    server unix:/opt/django/django.sock;
}

server {
    listen 80 default_server;
    charset utf-8;
    client_max_body_size 20M;
    sendfile on;
    keepalive_timeout 0;
    large_client_header_buffers 8 32k;

location /media  {
    alias /opt/django/app/media/media;  
}

location /static {
    alias /opt/django/app/static;
}

location / {
    include /opt/django/uwsgi_params; 
}

location /ws/ {
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_pass http://unix:/opt/django/app.sock;
        proxy_buffers 8 32k;
        proxy_buffer_size 64k;
    }
}

以及负责我的 uwsgi 进程的两个文件如下:

runserver_uwsgi.ini:

[uwsgi]
ini = :runserver

[default]
userhome = /opt/django
chdir = %dapp/
master = true
module = chatserver.wsgi:application
no-orphans = true
threads = 1
env = DJANGO_SETTINGS_MODULE=myapp.settings
vacuum = true

[runserver]
ini = :default
socket = /opt/django/app.sock
module = wsgi_django
buffer-size = 32768
processes = 4
chmod-socket=666

和wsserver_uwsgi.ini

[uwsgi]
ini = :wsserver

[default]
userhome = /opt/django
chdir = %dapp/
master = true
module = chatserver.wsgi:application
no-orphans = true
threads = 1
env = DJANGO_SETTINGS_MODULE=chatserver.settings
vacuum = true

[wsserver]
ini = :default
http-socket = /opt/django/django.sock
module = wsgi_websocket
http-websockets = true
processes = 2
gevent = 1000
chmod-socket=666

我发现了问题。

我的 [runserver] 套接字 (app.sock) 应该指向 upstream django 下,我的 [wsserver] 套接字 (django.sock) 应该指向 location /ws/ 下,就像这样:

upstream django {
    server unix:/opt/django/app.sock;
}

server {
    listen 80 default_server;
    charset utf-8;
    client_max_body_size 20M;
    sendfile on;
    keepalive_timeout 0;
    large_client_header_buffers 8 32k;

location /media  {
    alias /opt/django/app/media/media;  
}

location /static {
    alias /opt/django/app/static;
}

location / {
    include /opt/django/uwsgi_params; 
}

location /ws/ {
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_pass http://unix:/opt/django/django.sock;
        proxy_buffers 8 32k;
        proxy_buffer_size 64k;
    }
}

我有同样的问题,但这不是我的 NGINX 配置,而是我的 UWSGI 进程在我从客户端向服务器发布 JSON 时导致超时错误。我的进程为 5,我将其更改为 1,它解决了问题。对于我的应用程序,我一次只需要有 1 个进程 运行,因为 AWS 不需要因多个进程而过载。

这是解决超时问题和 502 网关问题的工作 UWSGI 配置 ini 文件。

autoboot.ini

#!/bin/bash

[uwsgi]
socket          = /tmp/app.sock

master          = true

chmod-socket    = 660
module          = app.wsgi
chdir           = home/app

close-on-exec = true # Allow linux shell via uWSGI

processes = 1
threads = 2
vacuum = true

die-on-term = true

这也是我的 nginx 配置。

nginx.conf

# the upstream component nginx needs to connect to
upstream django {
    server unix:///app/tmp/app.sock; # for a file socket
    # server 127.0.0.1:6000; # for a web port socket (we'll use this first)
}

# configuration of the server
server {
    # the port your site will be served on
    listen      80;

    # the domain name it will serve for
    server_name XXX.XXX.XX.X #actual IP in here
    charset     utf-8;

    # max upload size
    client_max_body_size 75M;   # adjust to taste

    error_log /var/log/nginx/error.log;
    access_log /var/log/nginx/access.log;

    # Finally, send all non-media requests to the Django server.
    location / {
        uwsgi_pass  django;
        include     uwsgi_params;
    }

    location /static {
        autoindex on;
        alias app/static; # your Django project's static files - amend as required
    }

    error_page 502 /502.html;
    location = /502.html {
        alias app/templates/502autoreload.html;
    }

    client_body_timeout 100s;
    uwsgi_read_timeout 500s;
    keepalive_timeout 300;
}