从手动 Docker 主机网络传递到 Docker Compose 网桥
Passing from manual Docker host network to Docker Compose bridge
我有 2 个 docker 图像,一个 modbus 服务器和一个客户端,我 运行 手动使用 docker run --network host server
并且与客户端相同并且工作完美。但是现在我需要将它们添加到一个 docker-compose 文件中,其中网络是网桥,我是这样做的:
autoserver:
image: 19mikel95/pymodmikel:autoserversynchub
container_name: autoserver
restart: unless-stopped
clientperf:
image: 19mikel95/pymodmikel:reloadcomp
container_name: clientperf
restart: unless-stopped
depends_on:
- autoserver
links:
- "autoserver:server"
我读到要从一个容器引用到另一个容器(客户端到服务器),我必须在 dockercompose YML(自动服务器)中使用服务名称,这就是我所做的。在客户端执行的 python 文件中(这是来自 pymodbus 的 performance.py )我将 'localhost' 更改为:
host = 'autoserver'
client = ModbusTcpClient(host, port=5020)
但是我得到这个错误:
[ERROR/MainProcess] failed to run test successfully Traceback (most
recent call last): File "performance.py", line 72, in
single_client_test
client.read_holding_registers(10, 1, unit=1)
File "/usr/lib/python3/dist-packages/pymodbus/client/common.py", line
114, in read_holding_registers
return self.execute(request) File "/usr/lib/python3/dist-packages/pymodbus/client/sync.py", line 107, in
execute
raise ConnectionException("Failed to connect[%s]" % (self.str())) pymodbus.exceptions.ConnectionException: Modbus
Error: [Connection] Failed to
connect[ModbusTcpClient(autoserver:5020)]
按要求,我完整的docker-compose YML是这样的:
version: '2.1'
networks:
monitor-net:
driver: bridge
volumes:
prometheus_data: {}
grafana_data: {}
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
volumes:
- ./prometheus:/etc/prometheus
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
restart: unless-stopped
expose:
- 9090
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
alertmanager:
image: prom/alertmanager:latest
container_name: alertmanager
volumes:
- ./alertmanager:/etc/alertmanager
command:
- '--config.file=/etc/alertmanager/config.yml'
- '--storage.path=/alertmanager'
restart: unless-stopped
expose:
- 9093
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
nodeexporter:
image: prom/node-exporter:latest
container_name: nodeexporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- c:\:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
restart: unless-stopped
expose:
- 9100
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
cadvisor:
image: gcr.io/google-containers/cadvisor:latest
container_name: cadvisor
volumes:
- c:\:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker:/var/lib/docker:ro
#- /cgroup:/cgroup:ro #doesn't work on MacOS only for Linux
restart: unless-stopped
expose:
- 8080
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
grafana:
image: grafana/grafana:latest
container_name: grafana
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
environment:
- GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
restart: unless-stopped
expose:
- 3000
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
pushgateway:
image: prom/pushgateway:latest
container_name: pushgateway
restart: unless-stopped
expose:
- 9091
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
caddy:
image: stefanprodan/caddy
container_name: caddy
ports:
- "3000:3000"
- "9090:9090"
- "9093:9093"
- "9091:9091"
volumes:
- ./caddy:/etc/caddy
environment:
- ADMIN_USER=${ADMIN_USER:-admin}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
restart: unless-stopped
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
autoserver:
image: 19mikel95/pymodmikel:autoserversynchub
container_name: autoserver
ports:
- "5020:5020"
restart: unless-stopped
networks:
- monitor-net
clientperf:
image: 19mikel95/pymodmikel:reloadcomp
container_name: clientperf
restart: unless-stopped
networks:
- monitor-net
depends_on:
- autoserver
links:
- "autoserver:server"
您可以尝试使用像 AUTO_SERVER_HOST 这样的环境变量并在您的代码中调用它
version: '2.1'
networks:
monitor-net:
driver: bridge
volumes:
prometheus_data: {}
grafana_data: {}
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
volumes:
- ./prometheus:/etc/prometheus
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
restart: unless-stopped
expose:
- 9090
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
alertmanager:
image: prom/alertmanager:latest
container_name: alertmanager
volumes:
- ./alertmanager:/etc/alertmanager
command:
- '--config.file=/etc/alertmanager/config.yml'
- '--storage.path=/alertmanager'
restart: unless-stopped
expose:
- 9093
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
nodeexporter:
image: prom/node-exporter:latest
container_name: nodeexporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- c:\:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
restart: unless-stopped
expose:
- 9100
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
cadvisor:
image: gcr.io/google-containers/cadvisor:latest
container_name: cadvisor
volumes:
- c:\:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker:/var/lib/docker:ro
#- /cgroup:/cgroup:ro #doesn't work on MacOS only for Linux
restart: unless-stopped
expose:
- 8080
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
grafana:
image: grafana/grafana:latest
container_name: grafana
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
environment:
- GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
restart: unless-stopped
expose:
- 3000
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
pushgateway:
image: prom/pushgateway:latest
container_name: pushgateway
restart: unless-stopped
expose:
- 9091
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
caddy:
image: stefanprodan/caddy
container_name: caddy
ports:
- "3000:3000"
- "9090:9090"
- "9093:9093"
- "9091:9091"
volumes:
- ./caddy:/etc/caddy
environment:
- ADMIN_USER=${ADMIN_USER:-admin}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
restart: unless-stopped
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
autoserver:
image: 19mikel95/pymodmikel:autoserversynchub
container_name: autoserver
ports:
- "5020:5020"
restart: unless-stopped
networks:
- monitor-net
clientperf:
image: 19mikel95/pymodmikel:reloadcomp
container_name: clientperf
restart: unless-stopped
networks:
- monitor-net
depends_on:
- autoserver
environment:
- AUTO_SERVER_HOST=autoserver
像下面这样调用环境变量
host = os.environ['AUTO_SERVER_HOST']
client = ModbusTcpClient(host, port=5020)
问题出在 autoserver
图像的 sincserver.py
文件的 StartTcpServer(context, identity=identity, address=("localhost", 5020))
中。 localhost
允许 TcpServer 仅接受来自 localhost
的连接。它应该替换为 0.0.0.0
以允许对该端口的任何外部请求。
以下 Docker Compose 显示它(sed -i 's|localhost|0.0.0.0|g' sincserver.py
替换主机名):
version: '2.1'
services:
autoserver:
image: 19mikel95/pymodmikel:autoserversynchub
command: sh -c "
sed -i 's|localhost|0.0.0.0|g' sincserver.py;
python3 sincserver.py daemon off
"
ports:
- "5020:5020"
restart: unless-stopped
clientperf:
image: 19mikel95/pymodmikel:reloadcomp
restart: unless-stopped
depends_on:
- autoserver
运行:
docker-compose up -d
docker-compose logs -f clientperf
你会看到类似
的日志
clientperf_1 | [DEBUG/MainProcess] 574 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.7410697999875993 seconds
clientperf_1 | [DEBUG/MainProcess] 692 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.4434449000109453 seconds
clientperf_1 | [DEBUG/MainProcess] 708 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.4116760999895632 seconds
clientperf_1 | [DEBUG/MainProcess] 890 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.1230684999900404 seconds
clientperf_1 | [DEBUG/MainProcess] 803 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.2450218999874778 seconds
clientperf_1 | [DEBUG/MainProcess] 753 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.3274328999978025 seconds
clientperf_1 | [DEBUG/MainProcess] 609 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.6399398999928962 seconds
我有 2 个 docker 图像,一个 modbus 服务器和一个客户端,我 运行 手动使用 docker run --network host server
并且与客户端相同并且工作完美。但是现在我需要将它们添加到一个 docker-compose 文件中,其中网络是网桥,我是这样做的:
autoserver:
image: 19mikel95/pymodmikel:autoserversynchub
container_name: autoserver
restart: unless-stopped
clientperf:
image: 19mikel95/pymodmikel:reloadcomp
container_name: clientperf
restart: unless-stopped
depends_on:
- autoserver
links:
- "autoserver:server"
我读到要从一个容器引用到另一个容器(客户端到服务器),我必须在 dockercompose YML(自动服务器)中使用服务名称,这就是我所做的。在客户端执行的 python 文件中(这是来自 pymodbus 的 performance.py )我将 'localhost' 更改为:
host = 'autoserver'
client = ModbusTcpClient(host, port=5020)
但是我得到这个错误:
[ERROR/MainProcess] failed to run test successfully Traceback (most recent call last): File "performance.py", line 72, in single_client_test client.read_holding_registers(10, 1, unit=1) File "/usr/lib/python3/dist-packages/pymodbus/client/common.py", line 114, in read_holding_registers return self.execute(request) File "/usr/lib/python3/dist-packages/pymodbus/client/sync.py", line 107, in execute raise ConnectionException("Failed to connect[%s]" % (self.str())) pymodbus.exceptions.ConnectionException: Modbus Error: [Connection] Failed to connect[ModbusTcpClient(autoserver:5020)]
按要求,我完整的docker-compose YML是这样的:
version: '2.1'
networks:
monitor-net:
driver: bridge
volumes:
prometheus_data: {}
grafana_data: {}
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
volumes:
- ./prometheus:/etc/prometheus
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
restart: unless-stopped
expose:
- 9090
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
alertmanager:
image: prom/alertmanager:latest
container_name: alertmanager
volumes:
- ./alertmanager:/etc/alertmanager
command:
- '--config.file=/etc/alertmanager/config.yml'
- '--storage.path=/alertmanager'
restart: unless-stopped
expose:
- 9093
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
nodeexporter:
image: prom/node-exporter:latest
container_name: nodeexporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- c:\:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
restart: unless-stopped
expose:
- 9100
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
cadvisor:
image: gcr.io/google-containers/cadvisor:latest
container_name: cadvisor
volumes:
- c:\:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker:/var/lib/docker:ro
#- /cgroup:/cgroup:ro #doesn't work on MacOS only for Linux
restart: unless-stopped
expose:
- 8080
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
grafana:
image: grafana/grafana:latest
container_name: grafana
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
environment:
- GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
restart: unless-stopped
expose:
- 3000
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
pushgateway:
image: prom/pushgateway:latest
container_name: pushgateway
restart: unless-stopped
expose:
- 9091
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
caddy:
image: stefanprodan/caddy
container_name: caddy
ports:
- "3000:3000"
- "9090:9090"
- "9093:9093"
- "9091:9091"
volumes:
- ./caddy:/etc/caddy
environment:
- ADMIN_USER=${ADMIN_USER:-admin}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
restart: unless-stopped
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
autoserver:
image: 19mikel95/pymodmikel:autoserversynchub
container_name: autoserver
ports:
- "5020:5020"
restart: unless-stopped
networks:
- monitor-net
clientperf:
image: 19mikel95/pymodmikel:reloadcomp
container_name: clientperf
restart: unless-stopped
networks:
- monitor-net
depends_on:
- autoserver
links:
- "autoserver:server"
您可以尝试使用像 AUTO_SERVER_HOST 这样的环境变量并在您的代码中调用它
version: '2.1'
networks:
monitor-net:
driver: bridge
volumes:
prometheus_data: {}
grafana_data: {}
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
volumes:
- ./prometheus:/etc/prometheus
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
restart: unless-stopped
expose:
- 9090
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
alertmanager:
image: prom/alertmanager:latest
container_name: alertmanager
volumes:
- ./alertmanager:/etc/alertmanager
command:
- '--config.file=/etc/alertmanager/config.yml'
- '--storage.path=/alertmanager'
restart: unless-stopped
expose:
- 9093
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
nodeexporter:
image: prom/node-exporter:latest
container_name: nodeexporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- c:\:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
restart: unless-stopped
expose:
- 9100
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
cadvisor:
image: gcr.io/google-containers/cadvisor:latest
container_name: cadvisor
volumes:
- c:\:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker:/var/lib/docker:ro
#- /cgroup:/cgroup:ro #doesn't work on MacOS only for Linux
restart: unless-stopped
expose:
- 8080
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
grafana:
image: grafana/grafana:latest
container_name: grafana
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
environment:
- GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
restart: unless-stopped
expose:
- 3000
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
pushgateway:
image: prom/pushgateway:latest
container_name: pushgateway
restart: unless-stopped
expose:
- 9091
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
caddy:
image: stefanprodan/caddy
container_name: caddy
ports:
- "3000:3000"
- "9090:9090"
- "9093:9093"
- "9091:9091"
volumes:
- ./caddy:/etc/caddy
environment:
- ADMIN_USER=${ADMIN_USER:-admin}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
restart: unless-stopped
networks:
- monitor-net
labels:
org.label-schema.group: "monitoring"
autoserver:
image: 19mikel95/pymodmikel:autoserversynchub
container_name: autoserver
ports:
- "5020:5020"
restart: unless-stopped
networks:
- monitor-net
clientperf:
image: 19mikel95/pymodmikel:reloadcomp
container_name: clientperf
restart: unless-stopped
networks:
- monitor-net
depends_on:
- autoserver
environment:
- AUTO_SERVER_HOST=autoserver
像下面这样调用环境变量
host = os.environ['AUTO_SERVER_HOST']
client = ModbusTcpClient(host, port=5020)
问题出在 autoserver
图像的 sincserver.py
文件的 StartTcpServer(context, identity=identity, address=("localhost", 5020))
中。 localhost
允许 TcpServer 仅接受来自 localhost
的连接。它应该替换为 0.0.0.0
以允许对该端口的任何外部请求。
以下 Docker Compose 显示它(sed -i 's|localhost|0.0.0.0|g' sincserver.py
替换主机名):
version: '2.1'
services:
autoserver:
image: 19mikel95/pymodmikel:autoserversynchub
command: sh -c "
sed -i 's|localhost|0.0.0.0|g' sincserver.py;
python3 sincserver.py daemon off
"
ports:
- "5020:5020"
restart: unless-stopped
clientperf:
image: 19mikel95/pymodmikel:reloadcomp
restart: unless-stopped
depends_on:
- autoserver
运行:
docker-compose up -d
docker-compose logs -f clientperf
你会看到类似
的日志clientperf_1 | [DEBUG/MainProcess] 574 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.7410697999875993 seconds
clientperf_1 | [DEBUG/MainProcess] 692 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.4434449000109453 seconds
clientperf_1 | [DEBUG/MainProcess] 708 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.4116760999895632 seconds
clientperf_1 | [DEBUG/MainProcess] 890 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.1230684999900404 seconds
clientperf_1 | [DEBUG/MainProcess] 803 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.2450218999874778 seconds
clientperf_1 | [DEBUG/MainProcess] 753 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.3274328999978025 seconds
clientperf_1 | [DEBUG/MainProcess] 609 requests/second
clientperf_1 | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.6399398999928962 seconds