Obsidian/Recognition/Programing/ELK/ELK 폐쇄망 설치.md

584 lines
16 KiB
Markdown
Raw Permalink Normal View History

2023-08-14 16:19:25 +00:00
# Docker-compose 기반 Elasticsearch, logstash, kibana설치 (Cluster + xpack인증 + curator)
## 0. 사전작업
### ELK 폴더 생성
- docker-elk (root)
- kibana
- config
- logstash
- config
- pipeline
## 1. docker-compose.yml 작성 (xpack 인증 전)
```yml
version: '3.7'
services:
elastic01:
user: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7
container_name: es01
environment:
- node.name=es01-node
- cluster.name=es-docker-cluster
- cluster.initial_master_nodes=es01-node,es02-node,es03-node
- discovery.seed_hosts=118.220.143.176:9301,118.220.143.176:9302
- bootstrap.memory_lock=true
#- 'ES_JAVA_OPTS=-Xms4g -Xmx4g'
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- network.host=0.0.0.0
- node.master=true
- node.data=true
# - discovery.seed_hosts: ["es01-node:9300", "es02-node:9301", "es03-node:9302"]
- http.port=9200
- transport.tcp.port=9300
- network.publish_host=118.220.143.175
- ELASTIC_PASSWORD=changeme
- path.repo=/usr/share/elasticsearch/backup
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- "~/elasticsearch/data01/data:/data"
#- ./elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12:Z
ports:
- 9200:9200
- 9300:9300
networks:
- elastic
restart: always
logstash: #logstash 설정
image: docker.elastic.co/logstash/logstash:7.17.7
container_name: logstash7
volumes: #설정파일 경로
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
ports:
- 5044:5044
- 50000:50000/tcp
- 50000:50000/udp
- 9600:9600
environment:
LS_JAVA_OPTS: -Xms256m -Xmx256m
LOGSTASH_INTERNAL_PASSWORD: changeme
networks:
- elastic
depends_on:
- elastic01
kibana:
image: docker.elastic.co/kibana/kibana:7.17.7
container_name: kibana7
volumes:
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- 5601:5601
restart: always
volumes:
data01:
driver: local
data02:
driver: local
data03:
driver: local
backup:
driver: local
networks:
elastic:
name: elastic
driver: bridge
```
## 2. 기타 설정 파일 작성
### 1. logstash.yml 작성
```yml
http.host: 0.0.0.0
node.name: logstash
```
### 3. kibana.yml 작성
```yml
server.name: kibana
server.port: 5601
server.host: '0.0.0.0'
elasticsearch.username: elastic
elasticsearch.password: changeme
elasticsearch.hosts: ['http://118.220.143.175:9200','http://118.220.143.176:9201','http://118.220.143.176:9202']
xpack.security.enabled: true
#server.cors: true
#server.cors.origin: ['*']
```
### 4. logstash.conf 작성
```yml
input {
kafka {
bootstrap_servers => "118.220.143.175:9091,118.220.143.176:9091,118.220.143.176:9092"
topics => ["sacp2","sacp3","sacp4","sacp6"]
consumer_threads => 1
decorate_events => true
}
beats {
port => 5044
}
tcp {
port => 50000
}
}
output {
elasticsearch {
index => "spring-%{[@metadata][kafka][topic]}-%{+YYYY-MM-dd}"
user => "elastic"
password => "changeme"
hosts => ["118.220.143.175:9200","118.220.143.176:9201","118.220.143.176:9202"]
}
}
```
## 3. 컨테이너 실행
```bash
# 경로 이동 후 컨테이너 생성 및 실행
cd /opt/SACP-INSTALL/docker-elk/ && docker-compose build && docker-compose up -d
# 컨테이너 로그 확인
docker logs es01
### 로그의 오류
# 내용 : vm.max_map_count is too low, access denied "modifyThread" ..
# Host에서 max_map_count 수정
cat /proc/sys/vm/max_map_count
65530 -> 262144
```
## 4. X-pack 용 CA 생성
```bash
# 컨테이너 접속
docker exec -it es01 bash
# CA 수행 및 keystore.password 입력 (예. todnRkd1% )
bin/elasticsearch-certutil ca
# cert 수행 및 truststore.password 입력 (예. todnRkd1% )
bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12
# 생성 경로 확인
find / -name "elastic-stack-ca.p12"
--> /usr/share/elasticsearch/elastic-stack-ca.p12
find / -name "elastic-certificates.p12"
--> /usr/share/elasticsearch/elastic-certificates.p12
# 컨테이너 접속 종료
exit
```
## 5. 컨테이너에서 elastic-certificates.p12 파일 꺼내오기 및 권한 변경
```bash
# Elastic 컨테이너에서 elastic-certificates.p12 파일 가져오기
docker cp es01:/usr/share/elasticsearch/elastic-certificates.p12 /opt/SACP-INSTALL/docker-elk/elastic-certificates.p12
2023-12-23 14:26:28 +00:00
2023-08-14 16:19:25 +00:00
# elastic-certificates.p12 파일 권한 변경
chmod 777 elastic-certificates.p12
```
## 6. docker-compose.yml 수정 (최종본)
> **- user: elasticsearch 부분 중요함 (elasticsearch는 root로 실행하면 안됨)**
> **- xpack.securit 부분 추가**
```yml
version: '3.7'
services:
elastic01:
user: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7
container_name: es01
environment:
- node.name=es01-node
- cluster.name=es-docker-cluster
- cluster.initial_master_nodes=es01-node,es02-node,es03-node
- discovery.seed_hosts=118.220.143.176:9301,118.220.143.176:9302
- bootstrap.memory_lock=true
#- 'ES_JAVA_OPTS=-Xms4g -Xmx4g'
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- network.host=0.0.0.0
- node.master=true
- node.data=true
# - discovery.seed_hosts: ["es01-node:9300", "es02-node:9301", "es03-node:9302"]
- http.port=9200
- transport.tcp.port=9300
- network.publish_host=118.220.143.175
- xpack.security.enabled=true
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.client_authentication=required
- xpack.security.transport.ssl.keystore.path=elastic-certificates.p12
- xpack.security.transport.ssl.truststore.path=elastic-certificates.p12
- xpack.security.transport.ssl.keystore.password=todnRkd1%
- xpack.security.transport.ssl.truststore.password=todnRkd1%
- ELASTIC_PASSWORD=changeme
- path.repo=/usr/share/elasticsearch/backup
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- "~/elasticsearch/data01/data:/data"
- ./elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12:Z
# - backup:/usr/share/elasticsearch/backup
ports:
- 9200:9200
- 9300:9300
networks:
- elastic
restart: always
logstash: #logstash 설정
image: docker.elastic.co/logstash/logstash:7.17.7
container_name: logstash7
volumes: #설정파일 경로
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
ports:
- 5044:5044
- 50000:50000/tcp
- 50000:50000/udp
- 9600:9600
environment:
LS_JAVA_OPTS: -Xms256m -Xmx256m
LOGSTASH_INTERNAL_PASSWORD: changeme
networks:
- elastic
depends_on:
- elastic01
kibana:
image: docker.elastic.co/kibana/kibana:7.17.7
container_name: kibana7
volumes:
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- 5601:5601
restart: always
volumes:
data01:
driver: local
data02:
driver: local
data03:
driver: local
backup:
driver: local
networks:
elastic:
name: elastic
driver: bridge
```
## 7. 컨테이너 재실행
```bash
# 경로 이동 후 컨테이너 생성 및 실행
cd /opt/SACP-INSTALL/docker-elk/ && docker-compose build && docker-compose up -d
# 컨테이너 로그 확인
docker logs es01
```
## 8. 나머지 서버 작업
### 1. 설정파일 작성
#### 1. docker-compose.yml
```yml
version: '3.7'
services:
elastic02:
user: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7
container_name: es02
environment:
- node.name=es02-node
- cluster.name=es-docker-cluster
- cluster.initial_master_nodes=es02-node,es03-node,es01-node
- discovery.seed_hosts=118.220.143.175:9300,118.220.143.176:9302
- bootstrap.memory_lock=true
#- 'ES_JAVA_OPTS=-Xms4g -Xmx4g'
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- network.host=0.0.0.0
- node.master=true
- node.data=true
# - discovery.zen.minimum_master_nodes=1
- http.port=9200
#- transport.publish_port=9201
- transport.tcp.port=9301
- network.publish_host=118.220.143.176
- xpack.security.enabled=true
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.client_authentication=required
- xpack.security.transport.ssl.keystore.path=elastic-certificates.p12
- xpack.security.transport.ssl.truststore.path=elastic-certificates.p12
- xpack.security.transport.ssl.keystore.password=todnRkd1%
- xpack.security.transport.ssl.truststore.password=todnRkd1%
- ELASTIC_PASSWORD=changeme
- path.repo=/usr/share/elasticsearch/backup
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- "~/elasticsearch/data02/data:/data"
- ./elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12:Z
#- backup:/usr/share/elasticsearch/backup
extra_hosts:
- "es01:118.220.143.175"
- "es02:118.220.143.176"
- "es03:118.220.143.176"
ports:
- 9200:9200
- 9301:9301
networks:
- elastic
restart: always
elastic03:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7
container_name: es03
environment:
- node.name=es03-node
- cluster.name=es-docker-cluster
- cluster.initial_master_nodes=es03-node,es01-node,es02-node
- discovery.seed_hosts=118.220.143.175:9300,118.220.143.176:9301
- bootstrap.memory_lock=true
#- 'ES_JAVA_OPTS=-Xms4g -Xmx4g'
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- network.host=0.0.0.0
- transport.tcp.port=9302
- node.master=true
- node.data=true
# - discovery.zen.minimum_master_nodes=1
- http.port=9201
#- transport.publish_port=9202
- network.publish_host=118.220.143.176
- xpack.security.enabled=true
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.client_authentication=required
- xpack.security.transport.ssl.keystore.path=elastic-certificates.p12
- xpack.security.transport.ssl.truststore.path=elastic-certificates.p12
- xpack.security.transport.ssl.keystore.password=todnRkd1%
- xpack.security.transport.ssl.truststore.password=todnRkd1%
- ELASTIC_PASSWORD=changeme
- path.repo=/usr/share/elasticsearch/backup
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- "~/elasticsearch/data03/data:/data"
- ./elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12:Z
#- backup:/usr/share/elasticsearch/backup
extra_hosts:
- "es01:118.220.143.175"
- "es02:118.220.143.176"
- "es03:118.220.143.176"
ports:
- 9201:9201
- 9302:9302
networks:
- elastic
restart: always
logstash:
image: docker.elastic.co/logstash/logstash:7.17.7
container_name: logstash7
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
ports:
- 5044:5044
- 50000:50000/tcp
- 50000:50000/udp
- 9600:9600
environment:
LS_JAVA_OPTS: -Xms256m -Xmx256m
LOGSTASH_INTERNAL_PASSWORD: changeme
networks:
- elastic
depends_on:
- elastic02
- elastic03
# kibana:
# image: docker.elastic.co/kibana/kibana:8.5.2
# container_name: kibana7
# volumes:
# - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
# ports:
# - 5601:5601
# restart: always
volumes:
data02:
driver: local
data03:
driver: local
backup:
driver: local
networks:
elastic:
name: elastic
driver: bridge
```
#### 2. logstash.yml
```yml
http.host: 0.0.0.0
node.name: logstash
```
#### 3. logstash.conf
```yml
input {
kafka {
bootstrap_servers => "118.220.143.175:9091,118.220.143.176:9091,118.220.143.176:9092"
topics => ["sacp1","sacp2","sacp3","sacp4","sacp5","sacp6","sacp7", "sacp8", "asde"]
consumer_threads => 3
decorate_events => true
}
file {
path => "/home/gmt/ASDE/logs/sendlog/*.log"
type => "logs"
start_position => "beginning"
}
beats {
port => 5044
}
tcp {
port => 50000
}
}
filter {
if [type] == "logs" {
grok {
match => { "path" => "/home/gmt/ASDE/logs/sendlog/%{GREEDYDATA:filename}\.log" }
}
mutate {
add_field => { "[@metadata][field][file_name]" => "%{filename}" }
}
} else {
json { source => "message" }
ruby {
code => "
event.set('newfield', event.get('trgt_id')+','+event.get('cat_ty')+',')
"
}
mutate {
remove_field => [
"trgt_id", "cat_ty",
"@version", "@timestamp", "host", "path", "message" ]
}
}
}
output {
if [@metadata][field] and [@metadata][field][file_name] {
elasticsearch {
index => "spring-%{[@metadata][field][file_name]}-2023-03-06"
user => "elastic"
password => "changeme"
hosts => ["http://118.220.143.175:9200","http://118.220.143.176:9201","http://118.220.143.176:9200"]
}
} else {
elasticsearch {
index => "spring-%{[@metadata][kafka][topic]}-2023-03-06"
user => "elastic"
password => "changeme"
hosts => ["http://118.220.143.175:9200","http://118.220.143.176:9201","http://118.220.143.176:9200"]
}
}
}
```
#### 4. kibana.yml
```yml
server.name: kibana
server.port: 5601
server.host: '0.0.0.0'
elasticsearch.username: elastic
elasticsearch.password: changeme
elasticsearch.hosts: ['http://118.220.143.175:9200','http://118.220.143.176:9201','http://118.220.143.176:9202']
xpack.security.enabled: true
server.cors: true
server.cors.origin: ['*']
```
### 2. 메인서버의 elastic-certificates.p12 파일 복사
### 3. 컨테이너 실행
```bash
# 경로 이동 후 컨테이너 생성 및 실행
cd /opt/SACP-INSTALL/docker-elk/ && docker-compose build && docker-compose up -d
# 컨테이너 로그 확인
docker logs es02
```
## 9. Kibana 접속
> http://118.220.143.175:5601/
> http://118.220.143.175:5601/app/dev_tools#/console
## 10. Curator 설치
- [[python-pip 설치]]
- Curator 설치 (임시, docker에 포함 시켜야 함)
```bash
# curator 다운로드(curator-7.0.zip)
https://github.com/elastic/curator/releases
# pytion 필수 모듈 설치 확인
pip list
voluptuous, cx_Freeze, click, elasticsearch7
# curator 압축푼 폴더로 이동
# curator 설치
python3 setup.py install
# curator 버전 확인
curator --version
```