即使在配置了 confluent kafka docker-compose 的机器重新启动后,如何保持所有配置的设置?

How to keep all the settings configured even after restarting a machine with confluent kafka docker-compose configured?

这是我用于 kafka 和 ksqldb 设置的 docker-compose 文件,

---
version: '2'
services:
  zookeeper:
    image: confluentinc/cp-zookeeper:6.2.0
    hostname: zookeeper
    container_name: zookeeper
    ports:
      - "2181:2181"
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000

  broker:
    image: confluentinc/cp-server:6.2.0
    hostname: broker
    container_name: broker
    depends_on:
      - zookeeper
    ports:
      - "9092:9092"
      - "9101:9101"
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
      KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
      KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
      KAFKA_JMX_PORT: 9101
      KAFKA_JMX_HOSTNAME: localhost
      KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
      CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
      CONFLUENT_METRICS_ENABLE: 'true'
      CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'      

  schema-registry:
    image: confluentinc/cp-schema-registry:6.2.0
    hostname: schema-registry
    container_name: schema-registry
    depends_on:
      - broker
    ports:
      - "8081:8081"
    environment:
      SCHEMA_REGISTRY_HOST_NAME: schema-registry
      SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092'
      SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081


  connect:
    image: confluentinc/kafka-connect-datagen:latest
    build:
      context: .
      dockerfile: Dockerfile
      extra_hosts:
        host.docker.internal: host-gateway
    extra_hosts:
      - "host.docker.internal:host-gateway"
    hostname: connect
    container_name: connect
    depends_on:
      - broker
      - schema-registry
    ports:
      - "8083:8083"
    environment:
      CONNECT_BOOTSTRAP_SERVERS: 'broker:29092'
      CONNECT_REST_ADVERTISED_HOST_NAME: connect
      CONNECT_REST_PORT: 8083
      CONNECT_GROUP_ID: compose-connect-group
      CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
      CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
      CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
      CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
      CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
      CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
      CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
      CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
      CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
      CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      # CLASSPATH required due to CC-2422
      CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-6.2.0.jar
      CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
      CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
      CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
      CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR


  control-center:
    image: confluentinc/cp-enterprise-control-center:6.2.0
    hostname: control-center
    container_name: control-center
    depends_on:
      - broker
      - schema-registry
      - connect
    ports:
      - "9021:9021"
    environment:
      CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092'
      CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: 'connect:8083'
      CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088"
      CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088"
      CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
      CONTROL_CENTER_REPLICATION_FACTOR: 1
      CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
      CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
      CONFLUENT_METRICS_TOPIC_REPLICATION: 1
      PORT: 9021
      

  ksqldb-server:
    image: confluentinc/cp-ksqldb-server:6.2.0
    hostname: ksqldb-server
    container_name: ksqldb-server
    depends_on:
      - broker
      - connect
    ports:
      - "8088:8088"
    environment:
      KSQL_CONFIG_DIR: "/etc/ksql"
      KSQL_BOOTSTRAP_SERVERS: "broker:29092"
      KSQL_HOST_NAME: ksqldb-server
      KSQL_LISTENERS: "http://0.0.0.0:8088"
      KSQL_CACHE_MAX_BYTES_BUFFERING: 0
      KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
      KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
      KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
      KSQL_KSQL_CONNECT_URL: "http://connect:8083"
      KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1
      KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true'
      KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true'


  ksqldb-cli:
    image: confluentinc/cp-ksqldb-cli:6.2.0
    container_name: ksqldb-cli
    depends_on:
      - broker
      - connect
      - ksqldb-server
    entrypoint: /bin/sh
    tty: true
    
    
  rest-proxy:
    image: confluentinc/cp-kafka-rest:6.2.0
    depends_on:
      - broker
      - schema-registry
    ports:
      - 8082:8082
    hostname: rest-proxy
    container_name: rest-proxy
    environment:
      KAFKA_REST_HOST_NAME: rest-proxy
      KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092'
      KAFKA_REST_LISTENERS: "http://0.0.0.0:8082"
      KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'

这是我用于自定义SMT操作的kafka connect Dockerfile,

FROM confluentinc/cp-kafka-connect-base:6.2.0

RUN confluent-hub install --no-prompt mongodb/kafka-connect-mongodb:1.5.1 \
   && confluent-hub install --no-prompt debezium/debezium-connector-mysql:1.5.0 \
   && confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.2.0 \
   && confluent-hub install --no-prompt debezium/debezium-connector-mongodb:1.5.0 \
   && confluent-hub install --no-prompt confluentinc/kafka-connect-elasticsearch:11.0.6 \
   && confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.2.0 \
   && confluent-hub install --no-prompt confluentinc/connect-transforms:latest \
   && confluent-hub install --no-prompt redhatinsights/expandjsonsmt:0.0.7

我添加了 mysql 作为我的源连接器和 elasticsearch 作为接收器连接器,我已经使用 ksqldb 来处理所有 t运行sformation 的东西,这是我在 ksqldb 中 运行 的命令.

docker-compose up --build成功后,我运行这个命令去ksqldb,

docker exec -it ksqldb-cli ksql http://ksqldb-server:8088

进入 ksqldb 后,我 运行 以下代码的顺序与此处相同

CREATE SOURCE CONNECTOR `source_mysql_connector` WITH(<CONFIG>);

SET 'auto.offset.reset' = 'earliest';

CREATE STREAM candidate_input WITH (KAFKA_TOPIC='test.AF.candidate', KEY_FORMAT='JSON', VALUE_FORMAT='AVRO');
              
CREATE STREAM candidate_target WITH (KAFKA_TOPIC='test.AF.candidate.target', KEY_FORMAT='JSON', VALUE_FORMAT='AVRO') FROM candidate_input;

SET 'auto.offset.reset' = 'earliest';

# sink connector
CREATE SINK CONNECTOR `sink_elasticsearch` WITH (<CONFIG>);

假设我已经在我的机器上配置了所有这些并且一切正常。我可以看到数据正在从我的源 mysql 传输 运行 到 elasticsearch 接收器。

现在的问题是,如果我关闭我的电脑,当我重新打开它时。所有容器将被关闭。

所以我做到了,docker-准备启动所有服务。

现在,当我去控制中心检查所有创建的主题和 ksqldb 流时,所有连接器都被删除了。

有什么方法可以让我在每次拆机后都保持完好无损。

Docker 卷是临时的,因此这是预期的行为。

你需要mount host volumes for at least the Kafka and Zookeeper containers

例如

# zookeeper
  volumes:
    - /some/host/path/zk:/var/lib/zookeeper/data
# broker
  volumes:
    - /some/host/path/kafka:/var/lib/kafka/data