parent
0f7945d0fb
commit
e8701a7bf7
|
@ -0,0 +1,30 @@
|
|||
1. Docker, Compose 설치 후 진행
|
||||
|
||||
현재 개발 서버 사용 포트로 해당 포트 사용 활성 필요
|
||||
172번서버
|
||||
- 9000 - kafdrop
|
||||
- 9091 - kafka1
|
||||
- 2181 - zookeeper1
|
||||
- 2888 - zookeeper1
|
||||
- 3888 - zookeeper1
|
||||
|
||||
- 9200 - elasticsearch1
|
||||
- 9300 - elasticsearch1
|
||||
- 5044 - logsatsh
|
||||
- 5601 - kibana
|
||||
|
||||
173번서버
|
||||
- 9091 - kafka2
|
||||
- 9092 - kafka3
|
||||
- 2181 - zookeeper2
|
||||
- 2888 - zookeeper2
|
||||
- 3888 - zookeeper2
|
||||
- 2182 - zookeeper3
|
||||
- 22888 - zookeeper3
|
||||
- 33888 - zookeeper3
|
||||
|
||||
- 9200 - elasticsearch2
|
||||
- 9301 - elasticsearch2
|
||||
- 9201 - elasticsearch3
|
||||
- 9302 - elasticsearch3
|
||||
- 5044 - logsatsh
|
|
@ -0,0 +1,218 @@
|
|||
2. Kafka Compose (zookeeper, kafka, kafdrop)
|
||||
|
||||
1) 현재 172번 서버에 작성되어 있는 Docker-compose.yml:/opt/docker-compose.yml (kafdrop 은 선택)
|
||||
명령어 docker-compose build && docker-compose up -d 실행
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
zookeeper1:
|
||||
image: confluentinc/cp-zookeeper:latest
|
||||
restart: unless-stopped
|
||||
hostname: zookeeper1
|
||||
container_name: zookeeper1
|
||||
extra_hosts:
|
||||
- "datanode1:118.220.143.172"
|
||||
- "datanode2:118.220.143.173"
|
||||
- "datanode3:118.220.143.173"
|
||||
environment: #참고 사이트 https://devocean.sk.com/blog/techBoardDetail.do?ID=164016
|
||||
ZOOKEEPER_SERVER_ID: 1
|
||||
ZOOKEEPER_CLIENT_PORT: 2181 #클라이언트가 ZooKeeper 서비스에 연결할 때 사용할 TCP 포트
|
||||
ZOOKEEPER_TICK_TIME: 2000 #동기화를 위한 기본 틱 타임
|
||||
ZOOKEEPER_INIT_LIMIT: 5
|
||||
ZOOKEEPER_SYNC_LIMIT: 2
|
||||
ZOOKEEPER_SERVERS: zookeeper1:2888:3888;datanode2:2888:3888;datanode3:22888:33888
|
||||
ZOOKEEPER_MAX_CLIENT_CNXNS: 60
|
||||
#ZOOKEEPER_DATA_DIR: /var/lib/zookeeper1
|
||||
#LOG_DIR: /var/log/zookeeper1
|
||||
volumes:
|
||||
- "~/zk-cluster/zookeeper1/data:/data"
|
||||
|
||||
#network_mode: host
|
||||
networks:
|
||||
- kafka-cluster-net
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
|
||||
kafka1:
|
||||
image: confluentinc/cp-kafka:latest
|
||||
restart: unless-stopped
|
||||
hostname: kafka1
|
||||
container_name: kafka1
|
||||
extra_hosts:
|
||||
- "datanode1:118.220.143.172"
|
||||
- "datanode2:118.220.143.173"
|
||||
- "datanode3:118.220.143.173"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9091
|
||||
KAFKA_ZOOKEEPER_CONNECT: "zookeeper1:2181,datanode2:2181,datanode3:2182"
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://118.220.143.172:9091 #보안이 중요한 경우에는 PLAINTEXT 을 SSL 또는 SASL과 같은 다른 보안 프로토콜을 사용
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
|
||||
KAFKA_NUM_PARTITIONS: 1 #생성할 topic의 파티션 수 이 값은 토픽의 처리량을 결정하므로 작업에 맞게 적절한 값을 선택
|
||||
KAFKA_LOG4J_LOGGERS: "kafka.authorizer.logger=INFO"
|
||||
KAFKA_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
#KAFKA_DATA_DIR: /var/lib/kafka
|
||||
KAFKA_LOG_RETENTION_HOURS: 168 #Kafka가 유지 관리하는 모든 로그 파일의 유지 기간을 시간 단위로 설정
|
||||
#LOG_DIR: /var/log/kafka
|
||||
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
#network_mode: host
|
||||
networks:
|
||||
- kafka-cluster-net
|
||||
ports:
|
||||
- "9091:9091"
|
||||
|
||||
kafdrop:
|
||||
image: obsidiandynamics/kafdrop:latest
|
||||
hostname: kafdrop
|
||||
container_name: kafdrop
|
||||
extra_hosts:
|
||||
- "datanode1:118.220.143.172"
|
||||
- "datanode2:118.220.143.173"
|
||||
- "datanode3:118.220.143.173"
|
||||
environment:
|
||||
- KAFKA_BROKERCONNECT=datanode1:9091,datanode2:9091,datanode3:9092
|
||||
depends_on:
|
||||
- kafka1
|
||||
networks:
|
||||
- kafka-cluster-net
|
||||
ports:
|
||||
- "9000:9000"
|
||||
|
||||
networks:
|
||||
kafka-cluster-net:
|
||||
name: kafka-cluster-net
|
||||
driver: bridge
|
||||
|
||||
2) 현재 173번 서버에 작성되어 있는 Docker-compose.yml:/opt/docker-compose.yml
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
zookeeper2:
|
||||
image: confluentinc/cp-zookeeper:latest
|
||||
restart: unless-stopped
|
||||
hostname: zookeeper2
|
||||
container_name: zookeeper2
|
||||
extra_hosts:
|
||||
- "datanode1:118.220.143.172"
|
||||
- "datanode2:118.220.143.173"
|
||||
- "datanode3:118.220.143.173"
|
||||
environment:
|
||||
ZOOKEEPER_SERVER_ID: 2
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ZOOKEEPER_INIT_LIMIT: 5
|
||||
ZOOKEEPER_SYNC_LIMIT: 2
|
||||
ZOOKEEPER_SERVERS: datanode1:2888:3888;zookeeper2:2888:3888;zookeeper3:22888:33888
|
||||
ZOOKEEPER_MAX_CLIENT_CNXNS: 60
|
||||
#ZOOKEEPER_DATA_DIR: /var/lib/zookeeper2
|
||||
#LOG_DIR: /var/log/zookeeper2
|
||||
volumes:
|
||||
- "~/zk-cluster/zookeeper2/data:/data"
|
||||
#network_mode: host
|
||||
networks:
|
||||
- kafka-cluster-net
|
||||
ports:
|
||||
- "2181:2181"
|
||||
- "2888:2888"
|
||||
- "3888:3888"
|
||||
|
||||
zookeeper3:
|
||||
image: confluentinc/cp-zookeeper:latest
|
||||
restart: unless-stopped
|
||||
hostname: zookeeper3
|
||||
container_name: zookeeper3
|
||||
extra_hosts:
|
||||
- "datanode1:118.220.143.172"
|
||||
- "datanode2:118.220.143.173"
|
||||
- "datanode3:118.220.143.173"
|
||||
environment:
|
||||
ZOOKEEPER_SERVER_ID: 3
|
||||
ZOOKEEPER_CLIENT_PORT: 2182
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ZOOKEEPER_INIT_LIMIT: 5
|
||||
ZOOKEEPER_SYNC_LIMIT: 2
|
||||
ZOOKEEPER_SERVERS: datanode1:2888:3888;zookeeper2:2888:3888;zookeeper3:22888:33888
|
||||
ZOOKEEPER_MAX_CLIENT_CNXNS: 60
|
||||
#ZOOKEEPER_DATA_DIR: /var/lib/zookeeper2
|
||||
#LOG_DIR: /var/log/zookeeper2
|
||||
volumes:
|
||||
- "~/zk-cluster/zookeeper3/data:/data"
|
||||
#network_mode: host
|
||||
networks:
|
||||
- kafka-cluster-net
|
||||
ports:
|
||||
- "2182:2182"
|
||||
- "22888:22888"
|
||||
- "33888:33888"
|
||||
|
||||
kafka2:
|
||||
image: confluentinc/cp-kafka:latest
|
||||
restart: unless-stopped
|
||||
hostname: kafka2
|
||||
container_name: kafka2
|
||||
extra_hosts:
|
||||
- "datanode1:118.220.143.172"
|
||||
- "datanode2:118.220.143.173"
|
||||
- "datanode3:118.220.143.173"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 2
|
||||
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9091
|
||||
KAFKA_ZOOKEEPER_CONNECT: "datanode1:2181,zookeeper2:2181,zookeeper3:2182"
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://118.220.143.173:9091
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
|
||||
KAFKA_NUM_PARTITIONS: 1
|
||||
KAFKA_LOG4J_LOGGERS: "kafka.authorizer.logger=INFO"
|
||||
KAFKA_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
KAFKA_DATA_DIR: ~/kafka2
|
||||
#KAFKA_LOG_RETENTION_HOURS: 168
|
||||
#LOG_DIR: /var/log/kafka
|
||||
|
||||
depends_on:
|
||||
- zookeeper2
|
||||
- zookeeper3
|
||||
#network_mode: host
|
||||
networks:
|
||||
- kafka-cluster-net
|
||||
ports:
|
||||
- "9091:9091"
|
||||
|
||||
kafka3:
|
||||
image: confluentinc/cp-kafka:latest
|
||||
restart: unless-stopped
|
||||
hostname: kafka3
|
||||
container_name: kafka3
|
||||
extra_hosts:
|
||||
- "datanode1:118.220.143.172"
|
||||
- "datanode2:118.220.143.173"
|
||||
- "datanode3:118.220.143.173"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 3
|
||||
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: "datanode1:2181,zookeeper2:2181,zookeeper3:2182"
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://118.220.143.173:9092
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
|
||||
KAFKA_NUM_PARTITIONS: 1
|
||||
KAFKA_LOG4J_LOGGERS: "kafka.authorizer.logger=INFO"
|
||||
KAFKA_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
#KAFKA_DATA_DIR: /var/lib/kafka
|
||||
#KAFKA_LOG_RETENTION_HOURS: 168
|
||||
#LOG_DIR: /var/log/kafka
|
||||
|
||||
depends_on:
|
||||
- zookeeper2
|
||||
- zookeeper3
|
||||
#network_mode: host
|
||||
networks:
|
||||
- kafka-cluster-net
|
||||
ports:
|
||||
- "9092:9092"
|
||||
|
||||
networks:
|
||||
kafka-cluster-net:
|
||||
name: kafka-cluster-net
|
||||
driver: bridge
|
|
@ -0,0 +1,227 @@
|
|||
3. elk compose
|
||||
|
||||
1) 172 개발 서버 docker-compose.yml::/opt/docker-test/Docker-compose.yml (개발서버에 elk버전은 7.17.7 로 설치)
|
||||
- elastic xpack 인증서 발급 예시 https://llnote.tistory.com/681
|
||||
- 명령어 docker-compose build && docker-compose up -d 실행
|
||||
|
||||
|
||||
version: '3.7'
|
||||
services:
|
||||
elastic01:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7
|
||||
container_name: es01
|
||||
environment:
|
||||
- node.name=es01-node
|
||||
- cluster.name=es-docker-cluster
|
||||
- cluster.initial_master_nodes=es01-node,es02-node,es03-node
|
||||
- discovery.seed_hosts=118.220.143.173:9301,118.220.143.173:9302
|
||||
- bootstrap.memory_lock=true
|
||||
#- 'ES_JAVA_OPTS=-Xms4g -Xmx4g'
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
- network.host=0.0.0.0
|
||||
- node.master=true
|
||||
- node.data=true
|
||||
# - discovery.seed_hosts: ["es01-node:9300", "es02-node:9301", "es03-node:9302"]
|
||||
- http.port=9200
|
||||
- transport.tcp.port=9300
|
||||
- network.publish_host=118.220.143.172
|
||||
- xpack.security.enabled=true
|
||||
- xpack.security.transport.ssl.enabled=true
|
||||
- xpack.security.transport.ssl.verification_mode=certificate
|
||||
- xpack.security.transport.ssl.client_authentication=required
|
||||
- xpack.security.transport.ssl.keystore.path=elastic-certificates.p12
|
||||
- xpack.security.transport.ssl.truststore.path=elastic-certificates.p12
|
||||
- xpack.security.transport.ssl.keystore.password=todnRkd1%
|
||||
- xpack.security.transport.ssl.truststore.password=todnRkd1%
|
||||
- ELASTIC_PASSWORD=changeme
|
||||
- path.repo=/usr/share/elasticsearch/backup
|
||||
|
||||
ulimits: #메모리 잠금을 설정
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
volumes:
|
||||
- "~/elasticsearch/data01/data:/data"
|
||||
# - ./elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12:Z #인증서가 있는 경우
|
||||
# - backup:/usr/share/elasticsearch/backup
|
||||
ports:
|
||||
- 9200:9200
|
||||
- 9300:9300
|
||||
networks:
|
||||
- elastic
|
||||
restart: always
|
||||
|
||||
logstash: #logstash 설정
|
||||
image: docker.elastic.co/logstash/logstash:7.17.7
|
||||
container_name: logstash7
|
||||
volumes: #설정파일 경로
|
||||
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z #설정파일:docker logstash 경로
|
||||
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
|
||||
ports:
|
||||
- 5044:5044
|
||||
- 50000:50000/tcp
|
||||
- 50000:50000/udp
|
||||
- 9600:9600
|
||||
environment:
|
||||
LS_JAVA_OPTS: -Xms256m -Xmx256m
|
||||
LOGSTASH_INTERNAL_PASSWORD: changeme
|
||||
networks:
|
||||
- elastic
|
||||
depends_on:
|
||||
- elastic01
|
||||
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:7.17.7
|
||||
container_name: kibana7
|
||||
volumes:
|
||||
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
|
||||
ports:
|
||||
- 5601:5601
|
||||
restart: always
|
||||
|
||||
volumes:
|
||||
data01:
|
||||
driver: local
|
||||
data02:
|
||||
driver: local
|
||||
data03:
|
||||
driver: local
|
||||
backup:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
elastic:
|
||||
name: elastic
|
||||
driver: bridge
|
||||
|
||||
2) 173 개발 서버 docker-compose.yml:/opt/docker-elk/docker-compose.yml
|
||||
version: '3.7'
|
||||
services:
|
||||
elastic02:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7
|
||||
container_name: es02
|
||||
environment:
|
||||
- node.name=es02-node
|
||||
- cluster.name=es-docker-cluster
|
||||
- cluster.initial_master_nodes=es02-node,es03-node,es01-node
|
||||
- discovery.seed_hosts=118.220.143.172:9300,118.220.143.173:9302
|
||||
- bootstrap.memory_lock=true
|
||||
#- 'ES_JAVA_OPTS=-Xms4g -Xmx4g'
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
- network.host=0.0.0.0
|
||||
- node.master=true
|
||||
- node.data=true
|
||||
- http.port=9200
|
||||
#- transport.publish_port=9201
|
||||
- transport.tcp.port=9301
|
||||
- network.publish_host=118.220.143.173
|
||||
- xpack.security.enabled=true
|
||||
- xpack.security.transport.ssl.enabled=true
|
||||
- xpack.security.transport.ssl.verification_mode=certificate
|
||||
- xpack.security.transport.ssl.client_authentication=required
|
||||
- xpack.security.transport.ssl.keystore.path=elastic-certificates.p12
|
||||
- xpack.security.transport.ssl.truststore.path=elastic-certificates.p12
|
||||
- xpack.security.transport.ssl.keystore.password=todnRkd1%
|
||||
- xpack.security.transport.ssl.truststore.password=todnRkd1%
|
||||
- ELASTIC_PASSWORD=changeme
|
||||
- path.repo=/usr/share/elasticsearch/backup
|
||||
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
volumes:
|
||||
- "~/elasticsearch/data02/data:/data"
|
||||
# - ./elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12:Z
|
||||
# - backup:/usr/share/elasticsearch/backup
|
||||
extra_hosts:
|
||||
- "es01:118.220.143.172"
|
||||
- "es02:118.220.143.173"
|
||||
- "es03:118.220.143.173"
|
||||
ports:
|
||||
- 9200:9200
|
||||
- 9301:9301
|
||||
networks:
|
||||
- elastic
|
||||
restart: always
|
||||
|
||||
elastic03:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7
|
||||
container_name: es03
|
||||
environment:
|
||||
- node.name=es03-node
|
||||
- cluster.name=es-docker-cluster
|
||||
- cluster.initial_master_nodes=es03-node,es01-node,es02-node
|
||||
- discovery.seed_hosts=118.220.143.172:9300,118.220.143.173:9301
|
||||
- bootstrap.memory_lock=true
|
||||
#- 'ES_JAVA_OPTS=-Xms4g -Xmx4g'
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
- network.host=0.0.0.0
|
||||
- transport.tcp.port=9302
|
||||
- node.master=true
|
||||
- node.data=true
|
||||
- http.port=9201
|
||||
#- transport.publish_port=9202
|
||||
- network.publish_host=118.220.143.173
|
||||
- xpack.security.enabled=true
|
||||
- xpack.security.transport.ssl.enabled=true
|
||||
- xpack.security.transport.ssl.verification_mode=certificate
|
||||
- xpack.security.transport.ssl.client_authentication=required
|
||||
- xpack.security.transport.ssl.keystore.path=elastic-certificates.p12
|
||||
- xpack.security.transport.ssl.truststore.path=elastic-certificates.p12
|
||||
- xpack.security.transport.ssl.keystore.password=todnRkd1%
|
||||
- xpack.security.transport.ssl.truststore.password=todnRkd1%
|
||||
- ELASTIC_PASSWORD=changeme
|
||||
- path.repo=/usr/share/elasticsearch/backup
|
||||
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
volumes:
|
||||
- "~/elasticsearch/data03/data:/data"
|
||||
# - ./elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12:Z
|
||||
# - backup:/usr/share/elasticsearch/backup
|
||||
extra_hosts:
|
||||
- "es01:118.220.143.172"
|
||||
- "es02:118.220.143.173"
|
||||
- "es03:118.220.143.173"
|
||||
ports:
|
||||
- 9201:9201
|
||||
- 9302:9302
|
||||
networks:
|
||||
- elastic
|
||||
restart: always
|
||||
|
||||
logstash:
|
||||
image: docker.elastic.co/logstash/logstash:7.17.7
|
||||
container_name: logstash7
|
||||
volumes:
|
||||
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
|
||||
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
|
||||
ports:
|
||||
- 5044:5044
|
||||
- 50000:50000/tcp
|
||||
- 50000:50000/udp
|
||||
- 9600:9600
|
||||
environment:
|
||||
LS_JAVA_OPTS: -Xms256m -Xmx256m
|
||||
LOGSTASH_INTERNAL_PASSWORD: changeme
|
||||
networks:
|
||||
- elastic
|
||||
depends_on:
|
||||
- elastic02
|
||||
- elastic03
|
||||
|
||||
volumes:
|
||||
data02:
|
||||
driver: local
|
||||
data03:
|
||||
driver: local
|
||||
backup:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
elastic:
|
||||
name: elastic
|
||||
driver: bridge
|
|
@ -0,0 +1,79 @@
|
|||
4. logsatsh 설정 정보
|
||||
|
||||
1) logstash.yml
|
||||
http.host: 0.0.0.0
|
||||
|
||||
node.name: logstash
|
||||
|
||||
2) logsatsh.conf
|
||||
logstash 설정 소스를 작성
|
||||
|
||||
input {
|
||||
kafka {
|
||||
bootstrap_servers => "118.220.143.172:9091,118.220.143.173:9091,118.220.143.173:9092"
|
||||
topics => ["sacp1","sacp2","sacp3","sacp4","sacp5","sacp6","sacp7", "sacp8", "asde"]
|
||||
consumer_threads => 3
|
||||
# consumer_threads 토픽에서 데이터를 읽을 때 사용하는 컨슈머 스레드 수를 설정하는 옵션 메모리 양이 충분하지 않으면 부하 문제가 발생 일반적으로 1-4개의 컨슈머 스레드를 사용
|
||||
decorate_events => true #filter 부분에 필요한 정보가 있는 경우
|
||||
}
|
||||
|
||||
#파일 부분은 더 확인이 필요
|
||||
file {
|
||||
path => "/home/gmt/ASDE/logs/sendlog/*.log"
|
||||
type => "logs"
|
||||
start_position => "beginning"
|
||||
}
|
||||
|
||||
#beats {
|
||||
#port => 5044
|
||||
#}
|
||||
|
||||
tcp {
|
||||
port => 50000
|
||||
}
|
||||
}
|
||||
|
||||
filter {
|
||||
if [type] == "logs" {
|
||||
grok {
|
||||
match => { "path" => "/home/gmt/ASDE/logs/sendlog/%{GREEDYDATA:filename}\.log" }
|
||||
}
|
||||
mutate {
|
||||
add_field => { "file_name" => "%{filename}" }
|
||||
}
|
||||
}
|
||||
else {
|
||||
#데이터 직렬화 부분 수정 필요 (json형식으로 변환 후 ruby 활용해 데이터 변환 후 삭제 처리 형식)
|
||||
|
||||
json { source => "message" }
|
||||
ruby {
|
||||
code => "
|
||||
event.set('newfield', event.get('trgt_id')+','+event.get('cat_ty')+',')
|
||||
"
|
||||
}
|
||||
|
||||
mutate {
|
||||
remove_field => [
|
||||
"trgt_id", "cat_ty",
|
||||
"@version", "@timestamp", "host", "path", "message" ]
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
output {
|
||||
elasticsearch {
|
||||
if "file_name" in [fields] {
|
||||
index => "spring-%{[file_name]}-2023-03-06"
|
||||
} else {
|
||||
index => "spring-%{[@metadata][kafka][topic]}-2023-03-06"
|
||||
}
|
||||
user => "elastic"
|
||||
password => "changeme"
|
||||
hosts => ["118.220.143.172:9200","118.220.143.173:9201","118.220.143.173:9202"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
5. kibana 설정 정보
|
||||
|
||||
server.name: kibana
|
||||
server.port: 5601
|
||||
server.host: '0.0.0.0'
|
||||
elasticsearch.username: elastic
|
||||
elasticsearch.password: changeme
|
||||
|
||||
elasticsearch.hosts: ['http://118.220.143.172:9200','http://118.220.143.173:9200','http://118.220.143.173:9201']
|
||||
xpack.security.enabled: true
|
||||
|
||||
server.cors: true
|
||||
server.cors.origin: ['*']
|
Loading…
Reference in New Issue