Nginx、Varnish 和 Django 的多个域都 return 相同的缓存页面
Multiple domains with Nginx, Varnish and Django all return the same cached page
我正在尝试为以前只使用一个的现有项目配置第二个域。但是 Varnish 总是 returns 来自第一个域的缓存页面。因此,当我访问第二个域时,我看到的是第一个域的内容。我的配置如下:
注:
- 99% 的配置已经存在
- 我更改了域名并删除了一些 SLL 配置,因此 post 更清楚。
- 两个域使用相同的 html 页面,但内容略有不同。
- 我对nginx和varnish一窍不通
NGINX
server_tokens off;
resolver 127.0.0.53 ipv6=off;
upstream django_app_server {
server unix:/home/test/run/gunicorn.sock fail_timeout=0;
}
#http redirect too https.
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
server {
server_name existingdomain.com newdomain.com;
listen 443 ssl default deferred;
# match with actual application server
client_max_body_size 10M;
keepalive_timeout 60s;
# proxy the request through varnish before sending it to gunicorn.
location / {
proxy_pass http://127.0.0.1:6081;
}
}
server {
listen 8000;
server_name existingdomain.com newdomain.com;
root /home/test/www;
location / {
proxy_pass_header Server;
proxy_redirect off;
proxy_connect_timeout 60;
proxy_read_timeout 60;
proxy_set_header Host existingdomain.com; #changed to $host but results in 127.0.0.1 instead of domains
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_pass http://django_app_server;
}
client_max_body_size 10M;
keepalive_timeout 60s;
}
清漆
vcl 4.0;
import std;
import directors;
acl purgers {
"localhost";
}
backend default {
.host = "127.0.0.1";
.port = "8000";
}
sub vcl_recv {
# uncruft
unset req.http.User-Agent;
unset req.http.Accept;
unset req.http.Accept-Language;
# Normalize the query arguments
set req.url = std.querysort(req.url);
# Fix client IP forwarding
# Note: this expects Varnish to be behind upstream that sets this securely
if (req.http.x-forwarded-for) {
set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip;
} else {
set req.http.X-Forwarded-For = client.ip;
}
}
# allow purge
if (req.method == "PURGE") {
if (client.ip ~ purgers) {
return(purge);
} else {
return(synth(403, "Access denied."));
}
}
elif (req.method == "BAN") {
if (client.ip ~ purgers) {
# assumes the ``X-Ban`` header is a regex
ban("obj.http.x-url ~ " + req.http.x-ban);
return(synth(200, "Ban added"));
} else {
return(synth(403, "Access denied."));
}
}
# only head/get
if (req.method != "GET" && req.method != "HEAD") {
return(pass);
}
# kill cookies for everything else
unset req.http.Cookie;
return(hash);
}
sub vcl_backend_response {
# keep for lurker bans
set beresp.http.x-url = bereq.url;
# do the gzip dance with nginx
if (beresp.http.content-type ~ "^text/" || beresp.http.content-type ~ "^application/json") {
set beresp.do_gzip = true;
}
if (beresp.ttl <= 0s || beresp.http.Cache-Control ~ "no-cache|no-store|private") {
# mark as "Hit-For-Pass"
if (beresp.ttl <= 0s || beresp.http.Cache-Control ~ "no-cache|no-store|private") {
# mark as "Hit-For-Pass"
set beresp.ttl = 1m;
set beresp.uncacheable = true;
return (deliver);
}
# stop server error hammering
if (beresp.status == 500 || beresp.status == 502 || beresp.status == 503 || beresp.status == 504) {
set beresp.ttl = 5s;
unset beresp.http.Set-Cookie;
return (deliver);
}
# stop 404 hammering
if (beresp.status == 404) {
set beresp.ttl = 10s;
unset beresp.http.Set-Cookie;
return (deliver);
}
# don't cache 40x responses
if (beresp.status == 400 || beresp.status == 401 || beresp.status == 402 || beresp.status == 403) {
set beresp.ttl = 5m;
set beresp.uncacheable = true;
unset beresp.http.Set-Cookie;
return (deliver);
}
unset beresp.http.Set-Cookie;
set beresp.grace = 2m;
set beresp.ttl = 5m;
return (deliver);
}
sub vcl_deliver {
# for internal use only
unset resp.http.x-url;
# debug info
if (obj.hits > 0) {
set resp.http.X-Cache = "hit";
} else {
set resp.http.X-Cache = "miss";
}
# set resp.http.X-Cache-Hits = obj.hits;
# cleanup headers
# cleanup headers
unset resp.http.Server;
unset resp.http.X-Varnish;
unset resp.http.Via;
return (deliver);
}
sub vcl_purge {
# only handle actual PURGE HTTP methods, everything else is discarded
if (req.method != "PURGE") {
# restart request
set req.http.X-Purge = "Yes";
return(restart);
}
}
我试过了:
- 更改了 NGINX:
proxy_set_header Host existingdomain.com;
到 proxy_set_header Host $host;
- 更改了 NGINX:2 个服务器配置列表到两个域的端口 8000
- 已更改 VARNISH:两个域的 2 个后端配置
我想要什么:
- 最终我想要 2 个不同的域和几个子域,它们都需要自己的清漆缓存。
以下 Nginx 设置导致 Varnish 仅提供来自 existingdomain.com
的页面,即使请求了其他主机:
proxy_set_header Host existingdomain.com;
Varnish 使用 URL 和 hostname 来识别缓存中的对象。当您在 Nginx 中将主机名硬编码为 existingdomain.com
时,您将始终得到相同的内容。
请将此设置更改为以下值:
proxy_set header Host $hostname;
以下配置行:
proxy_set_header Host existingdomain.com;
为两个域发送相同的 Host:
header。
应该是:
proxy_set_header Host $host;
另一个答案提到 $hostname
变量是不正确的,因为它代表机器名称。而你想要 $host
因为这等于来自客户端的 Host:
header 的值。
# proxy the request through varnish before sending it to gunicorn.
location / {
proxy_pass http://127.0.0.1:6081;
}
您没有将任何细节传递给清漆。所以 Varnish 无法确定要使用哪个域。也许像
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://127.0.0.1:6081;
另外,就像其他人所说的那样,您需要更新要发送到的主机 gunicorn
我正在尝试为以前只使用一个的现有项目配置第二个域。但是 Varnish 总是 returns 来自第一个域的缓存页面。因此,当我访问第二个域时,我看到的是第一个域的内容。我的配置如下:
注:
- 99% 的配置已经存在
- 我更改了域名并删除了一些 SLL 配置,因此 post 更清楚。
- 两个域使用相同的 html 页面,但内容略有不同。
- 我对nginx和varnish一窍不通
NGINX
server_tokens off;
resolver 127.0.0.53 ipv6=off;
upstream django_app_server {
server unix:/home/test/run/gunicorn.sock fail_timeout=0;
}
#http redirect too https.
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
server {
server_name existingdomain.com newdomain.com;
listen 443 ssl default deferred;
# match with actual application server
client_max_body_size 10M;
keepalive_timeout 60s;
# proxy the request through varnish before sending it to gunicorn.
location / {
proxy_pass http://127.0.0.1:6081;
}
}
server {
listen 8000;
server_name existingdomain.com newdomain.com;
root /home/test/www;
location / {
proxy_pass_header Server;
proxy_redirect off;
proxy_connect_timeout 60;
proxy_read_timeout 60;
proxy_set_header Host existingdomain.com; #changed to $host but results in 127.0.0.1 instead of domains
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_pass http://django_app_server;
}
client_max_body_size 10M;
keepalive_timeout 60s;
}
清漆
vcl 4.0;
import std;
import directors;
acl purgers {
"localhost";
}
backend default {
.host = "127.0.0.1";
.port = "8000";
}
sub vcl_recv {
# uncruft
unset req.http.User-Agent;
unset req.http.Accept;
unset req.http.Accept-Language;
# Normalize the query arguments
set req.url = std.querysort(req.url);
# Fix client IP forwarding
# Note: this expects Varnish to be behind upstream that sets this securely
if (req.http.x-forwarded-for) {
set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip;
} else {
set req.http.X-Forwarded-For = client.ip;
}
}
# allow purge
if (req.method == "PURGE") {
if (client.ip ~ purgers) {
return(purge);
} else {
return(synth(403, "Access denied."));
}
}
elif (req.method == "BAN") {
if (client.ip ~ purgers) {
# assumes the ``X-Ban`` header is a regex
ban("obj.http.x-url ~ " + req.http.x-ban);
return(synth(200, "Ban added"));
} else {
return(synth(403, "Access denied."));
}
}
# only head/get
if (req.method != "GET" && req.method != "HEAD") {
return(pass);
}
# kill cookies for everything else
unset req.http.Cookie;
return(hash);
}
sub vcl_backend_response {
# keep for lurker bans
set beresp.http.x-url = bereq.url;
# do the gzip dance with nginx
if (beresp.http.content-type ~ "^text/" || beresp.http.content-type ~ "^application/json") {
set beresp.do_gzip = true;
}
if (beresp.ttl <= 0s || beresp.http.Cache-Control ~ "no-cache|no-store|private") {
# mark as "Hit-For-Pass"
if (beresp.ttl <= 0s || beresp.http.Cache-Control ~ "no-cache|no-store|private") {
# mark as "Hit-For-Pass"
set beresp.ttl = 1m;
set beresp.uncacheable = true;
return (deliver);
}
# stop server error hammering
if (beresp.status == 500 || beresp.status == 502 || beresp.status == 503 || beresp.status == 504) {
set beresp.ttl = 5s;
unset beresp.http.Set-Cookie;
return (deliver);
}
# stop 404 hammering
if (beresp.status == 404) {
set beresp.ttl = 10s;
unset beresp.http.Set-Cookie;
return (deliver);
}
# don't cache 40x responses
if (beresp.status == 400 || beresp.status == 401 || beresp.status == 402 || beresp.status == 403) {
set beresp.ttl = 5m;
set beresp.uncacheable = true;
unset beresp.http.Set-Cookie;
return (deliver);
}
unset beresp.http.Set-Cookie;
set beresp.grace = 2m;
set beresp.ttl = 5m;
return (deliver);
}
sub vcl_deliver {
# for internal use only
unset resp.http.x-url;
# debug info
if (obj.hits > 0) {
set resp.http.X-Cache = "hit";
} else {
set resp.http.X-Cache = "miss";
}
# set resp.http.X-Cache-Hits = obj.hits;
# cleanup headers
# cleanup headers
unset resp.http.Server;
unset resp.http.X-Varnish;
unset resp.http.Via;
return (deliver);
}
sub vcl_purge {
# only handle actual PURGE HTTP methods, everything else is discarded
if (req.method != "PURGE") {
# restart request
set req.http.X-Purge = "Yes";
return(restart);
}
}
我试过了:
- 更改了 NGINX:
proxy_set_header Host existingdomain.com;
到proxy_set_header Host $host;
- 更改了 NGINX:2 个服务器配置列表到两个域的端口 8000
- 已更改 VARNISH:两个域的 2 个后端配置
我想要什么:
- 最终我想要 2 个不同的域和几个子域,它们都需要自己的清漆缓存。
以下 Nginx 设置导致 Varnish 仅提供来自 existingdomain.com
的页面,即使请求了其他主机:
proxy_set_header Host existingdomain.com;
Varnish 使用 URL 和 hostname 来识别缓存中的对象。当您在 Nginx 中将主机名硬编码为 existingdomain.com
时,您将始终得到相同的内容。
请将此设置更改为以下值:
proxy_set header Host $hostname;
以下配置行:
proxy_set_header Host existingdomain.com;
为两个域发送相同的 Host:
header。
应该是:
proxy_set_header Host $host;
另一个答案提到 $hostname
变量是不正确的,因为它代表机器名称。而你想要 $host
因为这等于来自客户端的 Host:
header 的值。
# proxy the request through varnish before sending it to gunicorn.
location / {
proxy_pass http://127.0.0.1:6081;
}
您没有将任何细节传递给清漆。所以 Varnish 无法确定要使用哪个域。也许像
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://127.0.0.1:6081;
另外,就像其他人所说的那样,您需要更新要发送到的主机 gunicorn