Prometheus 从 Docker Swarm 中的几个容器中抓取特定的微服务指标

Prometheus scrapes particular microservice metrics from several containers in Docker Swarm

我在 Docker swarm 环境中使用 GrafanaPrometheus。我已经设置了 docker-compose.ymlprometheus.yml 来从节点导出器和微服务中抓取指标。

我的问题是当微服务 运行 跨多个容器时如何从微服务中抓取指标。

当我 运行 grafana 时,我只能看到一个容器的输出,而不是所有容器的输出。

docker-compose.yml
version: '3.8'

networks:
  monitoring:

volumes:
  #prometheus_data: {}
  prometheus_data:
    driver: local
    driver_opts:
      type: nfs
      o: addr=pvevm26,rw
      device: :/srv/nexusshare-mnt/metrics/prometheus_data/
  grafana_data: {}
configs:
  metrics_prometheus_v1.conf:
    external: true
  metrics_grafana_ini_v1.conf:
    external: true

services:
#####################################################
# Prometheus
#####################################################
  prometheus:
    image: prom/prometheus:latest
    configs:
      - source: metrics_prometheus_v1.conf
        target: /etc/prometheus/prometheus.yml
    volumes:
      - prometheus_data:/prometheus
    command:
      - '--config.file=/etc/prometheus/prometheus.yml'
      - '--storage.tsdb.no-lockfile'
    ports:
      - 9090:9090
    deploy:
      mode: replicated
      replicas: 1
      restart_policy:
        condition: any
      placement:
        constraints:
          # Hostname of the node!
          - node.role == worker
      resources:
        limits:
          cpus: '2'
          memory: 1G
    logging:
      driver: "json-file"
      options:
        max-size: "10M"
        max-file: "1"
    networks:
      - monitoring
#####################################################
# Node-exporter
# For each node a separate service need to be added
#####################################################
  docker-s1-exporter:
    image: prom/node-exporter
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - /:/rootfs:ro
    command:
      - '--path.procfs=/host/proc' 
      - '--path.sysfs=/host/sys'
      - --collector.filesystem.ignored-mount-points
      - "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
    ports:
      - 9101:9100
    deploy:
      mode: replicated
      replicas: 1
      restart_policy:
        condition: any
      placement:
        constraints:
          # Hostname of the node!
          - node.hostname == Docker-s1
          - node.platform.os == linux
      resources:
        limits:
          cpus: '0.5'
          memory: 128M
    logging:
      driver: "json-file"
      options:
        max-size: "10M"
        max-file: "1"
    networks:
      - monitoring
  
  docker-s2-exporter:
    image: prom/node-exporter
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - /:/rootfs:ro
    command:
      - '--path.procfs=/host/proc' 
      - '--path.sysfs=/host/sys'
      - --collector.filesystem.ignored-mount-points
      - "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
    ports:
      - 9102:9100
    deploy:
      mode: replicated
      replicas: 1
      restart_policy:
        condition: any
      placement:
        constraints:
          # Hostname of the node!
          - node.hostname == Docker-s2
      resources:
        limits:
          cpus: '0.5'
          memory: 128M
    logging:
      driver: "json-file"
      options:
        max-size: "10M"
        max-file: "1"
    networks:
      - monitoring
  
  docker-s3-exporter:
    image: prom/node-exporter
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - /:/rootfs:ro
    command:
      - '--path.procfs=/host/proc' 
      - '--path.sysfs=/host/sys'
      - --collector.filesystem.ignored-mount-points
      - "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
    ports:
      - 9103:9100
    deploy:
      mode: replicated
      replicas: 1
      restart_policy:
        condition: any
      placement:
        constraints:
          # Hostname of the node!
          - node.hostname == Docker-s3
      resources:
        limits:
          cpus: '0.5'
          memory: 128M
    logging:
      driver: "json-file"
      options:
        max-size: "10M"
        max-file: "1"
    networks:
      - monitoring

  docker-s4-exporter:
    image: prom/node-exporter
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - /:/rootfs:ro
    command:
      - '--path.procfs=/host/proc' 
      - '--path.sysfs=/host/sys'
      - --collector.filesystem.ignored-mount-points
      - "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
    ports:
      - 9104:9100
    deploy:
      mode: replicated
      replicas: 1
      restart_policy:
        condition: any
      placement:
        constraints:
          # Hostname of the node!
          - node.hostname == Docker-s4
      resources:
        limits:
          cpus: '0.5'
          memory: 128M
    logging:
      driver: "json-file"
      options:
        max-size: "10M"
        max-file: "1"
    networks:
      - monitoring
  
  docker-s5-exporter:
    image: prom/node-exporter
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - /:/rootfs:ro
    command:
      - '--path.procfs=/host/proc' 
      - '--path.sysfs=/host/sys'
      - --collector.filesystem.ignored-mount-points
      - "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
    ports:
      - 9105:9100
    deploy:
      mode: replicated
      replicas: 1
      restart_policy:
        condition: any
      placement:
        constraints:
          # Hostname of the node!
          - node.hostname == Docker-s5
      resources:
        limits:
          cpus: '0.5'
          memory: 128M
    logging:
      driver: "json-file"
      options:
        max-size: "10M"
        max-file: "1"
    networks:
      - monitoring

  docker-s6-exporter:
    image: prom/node-exporter
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - /:/rootfs:ro
    command:
      - '--path.procfs=/host/proc' 
      - '--path.sysfs=/host/sys'
      - --collector.filesystem.ignored-mount-points
      - "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
    ports:
      - 9106:9100
    deploy:
      mode: replicated
      replicas: 1
      restart_policy:
        condition: any
      placement:
        constraints:
          # Hostname of the node!
          - node.hostname == Docker-s6
      resources:
        limits:
          cpus: '0.5'
          memory: 128M
    logging:
      driver: "json-file"
      options:
        max-size: "10M"
        max-file: "1"
    networks:
      - monitoring
#####################################################
# The Grafana
#####################################################
  grafana:
    image: grafana/grafana:latest
    # image: grafana/grafana:8.2.6
    depends_on:
      - prometheus
    volumes:
      - grafana_data:/var/lib/grafana
    ports:
      - 3000:3000
    configs:
      - source: metrics_grafana_ini_v1.conf
        target: /etc/grafana/grafana.ini
    deploy:
      mode: replicated
      replicas: 1
      restart_policy:
        condition: any
      resources:
        limits:
          cpus: '2'
          memory: 1G
    logging:
      driver: "json-file"
      options:
        max-size: "10M"
        max-file: "1"
    networks:
      - monitoring
prometheus.yml
global:
- job_name: 'my-service-job'
  dns_sd_configs:
   - names: ['tasks.my-service-name']
     scrape_interval: 15s
     type: 'A'
     port: 80

更新:
我发现我应该在 prometheus.yml 中使用 dns_sd_config 喜欢 here:
但是我得到一个错误。

您可能想要的是 swarm 中的 Prometheus 实例来抓取各个服务(而不是负载平衡端点),然后将该数据联合到主 Prometheus 设置。

This blog post 对如何操作有很好的解释。