diff --git a/2. Kafka Compose (zookeeper, kafka, kafdrop) b/2. Kafka Compose (zookeeper, kafka, kafdrop) deleted file mode 100644 index 68cb8c6..0000000 --- a/2. Kafka Compose (zookeeper, kafka, kafdrop) +++ /dev/null @@ -1,218 +0,0 @@ -2. Kafka Compose (zookeeper, kafka, kafdrop) - -1) 현재 172번 서버에 작성되어 있는 Docker-compose.yml:/opt/docker-compose.yml (kafdrop 은 선택) - 명령어 docker-compose build && docker-compose up -d 실행 - - version: '3.8' - - services: - zookeeper1: - image: confluentinc/cp-zookeeper:latest - restart: unless-stopped - hostname: zookeeper1 - container_name: zookeeper1 - extra_hosts: - - "datanode1:118.220.143.172" - - "datanode2:118.220.143.173" - - "datanode3:118.220.143.173" - environment: #참고 사이트 https://devocean.sk.com/blog/techBoardDetail.do?ID=164016 - ZOOKEEPER_SERVER_ID: 1 - ZOOKEEPER_CLIENT_PORT: 2181 #클라이언트가 ZooKeeper 서비스에 연결할 때 사용할 TCP 포트 - ZOOKEEPER_TICK_TIME: 2000 #동기화를 위한 기본 틱 타임 - ZOOKEEPER_INIT_LIMIT: 5 - ZOOKEEPER_SYNC_LIMIT: 2 - ZOOKEEPER_SERVERS: zookeeper1:2888:3888;datanode2:2888:3888;datanode3:22888:33888 - ZOOKEEPER_MAX_CLIENT_CNXNS: 60 - #ZOOKEEPER_DATA_DIR: /var/lib/zookeeper1 - #LOG_DIR: /var/log/zookeeper1 - volumes: - - "~/zk-cluster/zookeeper1/data:/data" - - #network_mode: host - networks: - - kafka-cluster-net - ports: - - "2181:2181" - - "2888:2888" - - "3888:3888" - - kafka1: - image: confluentinc/cp-kafka:latest - restart: unless-stopped - hostname: kafka1 - container_name: kafka1 - extra_hosts: - - "datanode1:118.220.143.172" - - "datanode2:118.220.143.173" - - "datanode3:118.220.143.173" - environment: - KAFKA_BROKER_ID: 1 - KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9091 - KAFKA_ZOOKEEPER_CONNECT: "zookeeper1:2181,datanode2:2181,datanode3:2182" - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://118.220.143.172:9091 #보안이 중요한 경우에는 PLAINTEXT 을 SSL 또는 SASL과 같은 다른 보안 프로토콜을 사용 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 - KAFKA_NUM_PARTITIONS: 1 #생성할 topic의 파티션 수 이 값은 토픽의 처리량을 결정하므로 작업에 맞게 적절한 값을 선택 - KAFKA_LOG4J_LOGGERS: "kafka.authorizer.logger=INFO" - KAFKA_LOG4J_ROOT_LOGLEVEL: INFO - #KAFKA_DATA_DIR: /var/lib/kafka - KAFKA_LOG_RETENTION_HOURS: 168 #Kafka가 유지 관리하는 모든 로그 파일의 유지 기간을 시간 단위로 설정 - #LOG_DIR: /var/log/kafka - - depends_on: - - zookeeper1 - #network_mode: host - networks: - - kafka-cluster-net - ports: - - "9091:9091" - - kafdrop: - image: obsidiandynamics/kafdrop:latest - hostname: kafdrop - container_name: kafdrop - extra_hosts: - - "datanode1:118.220.143.172" - - "datanode2:118.220.143.173" - - "datanode3:118.220.143.173" - environment: - - KAFKA_BROKERCONNECT=datanode1:9091,datanode2:9091,datanode3:9092 - depends_on: - - kafka1 - networks: - - kafka-cluster-net - ports: - - "9000:9000" - - networks: - kafka-cluster-net: - name: kafka-cluster-net - driver: bridge - -2) 현재 173번 서버에 작성되어 있는 Docker-compose.yml:/opt/docker-compose.yml - version: '3.8' - - services: - zookeeper2: - image: confluentinc/cp-zookeeper:latest - restart: unless-stopped - hostname: zookeeper2 - container_name: zookeeper2 - extra_hosts: - - "datanode1:118.220.143.172" - - "datanode2:118.220.143.173" - - "datanode3:118.220.143.173" - environment: - ZOOKEEPER_SERVER_ID: 2 - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_TICK_TIME: 2000 - ZOOKEEPER_INIT_LIMIT: 5 - ZOOKEEPER_SYNC_LIMIT: 2 - ZOOKEEPER_SERVERS: datanode1:2888:3888;zookeeper2:2888:3888;zookeeper3:22888:33888 - ZOOKEEPER_MAX_CLIENT_CNXNS: 60 - #ZOOKEEPER_DATA_DIR: /var/lib/zookeeper2 - #LOG_DIR: /var/log/zookeeper2 - volumes: - - "~/zk-cluster/zookeeper2/data:/data" - #network_mode: host - networks: - - kafka-cluster-net - ports: - - "2181:2181" - - "2888:2888" - - "3888:3888" - - zookeeper3: - image: confluentinc/cp-zookeeper:latest - restart: unless-stopped - hostname: zookeeper3 - container_name: zookeeper3 - extra_hosts: - - "datanode1:118.220.143.172" - - "datanode2:118.220.143.173" - - "datanode3:118.220.143.173" - environment: - ZOOKEEPER_SERVER_ID: 3 - ZOOKEEPER_CLIENT_PORT: 2182 - ZOOKEEPER_TICK_TIME: 2000 - ZOOKEEPER_INIT_LIMIT: 5 - ZOOKEEPER_SYNC_LIMIT: 2 - ZOOKEEPER_SERVERS: datanode1:2888:3888;zookeeper2:2888:3888;zookeeper3:22888:33888 - ZOOKEEPER_MAX_CLIENT_CNXNS: 60 - #ZOOKEEPER_DATA_DIR: /var/lib/zookeeper2 - #LOG_DIR: /var/log/zookeeper2 - volumes: - - "~/zk-cluster/zookeeper3/data:/data" - #network_mode: host - networks: - - kafka-cluster-net - ports: - - "2182:2182" - - "22888:22888" - - "33888:33888" - - kafka2: - image: confluentinc/cp-kafka:latest - restart: unless-stopped - hostname: kafka2 - container_name: kafka2 - extra_hosts: - - "datanode1:118.220.143.172" - - "datanode2:118.220.143.173" - - "datanode3:118.220.143.173" - environment: - KAFKA_BROKER_ID: 2 - KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9091 - KAFKA_ZOOKEEPER_CONNECT: "datanode1:2181,zookeeper2:2181,zookeeper3:2182" - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://118.220.143.173:9091 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 - KAFKA_NUM_PARTITIONS: 1 - KAFKA_LOG4J_LOGGERS: "kafka.authorizer.logger=INFO" - KAFKA_LOG4J_ROOT_LOGLEVEL: INFO - KAFKA_DATA_DIR: ~/kafka2 - #KAFKA_LOG_RETENTION_HOURS: 168 - #LOG_DIR: /var/log/kafka - - depends_on: - - zookeeper2 - - zookeeper3 - #network_mode: host - networks: - - kafka-cluster-net - ports: - - "9091:9091" - - kafka3: - image: confluentinc/cp-kafka:latest - restart: unless-stopped - hostname: kafka3 - container_name: kafka3 - extra_hosts: - - "datanode1:118.220.143.172" - - "datanode2:118.220.143.173" - - "datanode3:118.220.143.173" - environment: - KAFKA_BROKER_ID: 3 - KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092 - KAFKA_ZOOKEEPER_CONNECT: "datanode1:2181,zookeeper2:2181,zookeeper3:2182" - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://118.220.143.173:9092 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 - KAFKA_NUM_PARTITIONS: 1 - KAFKA_LOG4J_LOGGERS: "kafka.authorizer.logger=INFO" - KAFKA_LOG4J_ROOT_LOGLEVEL: INFO - #KAFKA_DATA_DIR: /var/lib/kafka - #KAFKA_LOG_RETENTION_HOURS: 168 - #LOG_DIR: /var/log/kafka - - depends_on: - - zookeeper2 - - zookeeper3 - #network_mode: host - networks: - - kafka-cluster-net - ports: - - "9092:9092" - - networks: - kafka-cluster-net: - name: kafka-cluster-net - driver: bridge