ELK/ElasticSearch

docker-compose ELK H2스택 구축

by 앵과장 2023. 12. 27. 09:38
반응형

 

Local Mac 기준 Elasticsearch, Logstash, Kibana, H2, Nori 설치

 

Local 구성 설치 항목
Elasticsearch 8.x.x
Logstash 8.x.x

Kibana 8.x.x

Rancher Desktop 설치

1. RancherDesktop 설치

https://rancherdesktop.io/

 

Rancher Desktop

Container Management Container management to build, push, and pull images and run containers. It uses the same container runtime as Kubernetes. Built images are immediately available to use in your local workloads without any pushing, pulling, or copying.

rancherdesktop.io

OS환경별 다운로드 진행

2. Docker-Compose CLI 설치

brew install docker
$ brew install docker-compose
$ brew install docker-credential-helper

docker-elk 설치

 

docker-compose 으로 설치가능한 Repository

 

local에 설치 되는 항목
Elasticsearch 8.x.x
Logstash 8.x.x

Kibana 8.x.x
H2 2.1.214
Nori_analysis Elasticsearch plugin

https://github.com/lswteen/docker-elk

 

GitHub - lswteen/docker-elk: The Elastic stack (ELK) powered by Docker and Compose.

The Elastic stack (ELK) powered by Docker and Compose. - GitHub - lswteen/docker-elk: The Elastic stack (ELK) powered by Docker and Compose.

github.com

 

version: '3.7'

services:

  # The 'setup' service runs a one-off script which initializes users inside
  # Elasticsearch — such as 'logstash_internal' and 'kibana_system' — with the
  # values of the passwords defined in the '.env' file. It also creates the
  # roles required by some of these users.
  #
  # This task only needs to be performed once, during the *initial* startup of
  # the stack. Any subsequent run will reset the passwords of existing users to
  # the values defined inside the '.env' file, and the built-in roles to their
  # default permissions.
  #
  # By default, it is excluded from the services started by 'docker compose up'
  # due to the non-default profile it belongs to. To run it, either provide the
  # '--profile=setup' CLI flag to Compose commands, or "up" the service by name
  # such as 'docker compose up setup'.
  setup:
    profiles:
      - setup
    build:
      context: setup/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    init: true
    volumes:
      - ./setup/entrypoint.sh:/entrypoint.sh:ro,Z
      - ./setup/lib.sh:/lib.sh:ro,Z
      - ./setup/roles:/roles:ro,Z
    environment:
      ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
      LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
      KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
      METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-}
      FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-}
      HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-}
      MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-}
      BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
    networks:
      - elk
    depends_on:
      - elasticsearch
      - kibana
      - h2
      - logstash

  elasticsearch:
    build:
      context: elasticsearch/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    volumes:
      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z
      - elasticsearch:/usr/share/elasticsearch/data:Z
    ports:
      - 9200:9200
      - 9300:9300
    environment:
      node.name: elasticsearch
      ES_JAVA_OPTS: -Xms512m -Xmx512m
      # Bootstrap password.
      # Used to initialize the keystore during the initial startup of
      # Elasticsearch. Ignored on subsequent runs.
      ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
      # Use single node discovery in order to disable production mode and avoid bootstrap checks.
      # see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
      discovery.type: single-node
    networks:
      - elk
    restart: unless-stopped

  logstash:
    build:
      context: logstash/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    volumes:
      - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
      - ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
      - ./logstash/jdbc/h2-2.1.214.jar:/usr/share/logstash/jdbc/h2-2.1.214.jar:ro,Z  # Added volume for H2 JDBC driver-
    ports:
      - 5044:5044
      - 50000:50000/tcp
      - 50000:50000/udp
      - 9600:9600
    environment:
      LS_JAVA_OPTS: -Xms256m -Xmx256m
      LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
    networks:
      - elk
    depends_on:
      - elasticsearch
    restart: unless-stopped

  kibana:
    build:
      context: kibana/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    volumes:
      - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z
    ports:
      - 5601:5601
    environment:
      KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
    networks:
      - elk
    depends_on:
      - elasticsearch
    restart: unless-stopped

  h2:
    container_name: h2
    image: oscarfonts/h2:latest
    ports:
      - 1521:1521
      - 8081:81
    environment:
      H2_OPTIONS: -ifNotExists
    volumes:
      - ./h2/opt/h2-data/:/opt/h2-data
    networks:
      - elk
    restart: unless-stopped

networks:
  elk:
    driver: bridge

volumes:
  elasticsearch:

.env

ELASTIC_VERSION=8.6.2
H2_VERSION=2.1.214
 
## Passwords for stack users
#
 
# User 'elastic' (built-in)
#
# Superuser role, full access to cluster management and data indices.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
ELASTIC_PASSWORD='jk1234'
 
# User 'logstash_internal' (custom)
#
# The user Logstash uses to connect and send data to Elasticsearch.
# https://www.elastic.co/guide/en/logstash/current/ls-security.html
LOGSTASH_INTERNAL_PASSWORD='jk1234'
 
# User 'kibana_system' (built-in)
#
# The user Kibana uses to connect and communicate with Elasticsearch.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
KIBANA_SYSTEM_PASSWORD='jk1234'
 
# Users 'metricbeat_internal', 'filebeat_internal' and 'heartbeat_internal' (custom)
#
# The users Beats use to connect and send data to Elasticsearch.
# https://www.elastic.co/guide/en/beats/metricbeat/current/feature-roles.html
METRICBEAT_INTERNAL_PASSWORD='jk1234'
FILEBEAT_INTERNAL_PASSWORD='jk1234'
HEARTBEAT_INTERNAL_PASSWORD='jk1234'
 
# User 'monitoring_internal' (custom)
#
# The user Metricbeat uses to collect monitoring data from stack components.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/how-monitoring-works.html
MONITORING_INTERNAL_PASSWORD='jk1234'
 
# User 'beats_system' (built-in)
#
# The user the Beats use when storing monitoring information in Elasticsearch.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
BEATS_SYSTEM_PASSWORD='jk1234'

 

Elasticsearch

 

volumes : 

설정되는 값은 "-" 여러개의 경로를 매핑할수 있는 구조입니다.
docker-elk 프로젝트 경로 와  docker Container 내에 매핑이 가능하며 ":" 구분됩니다.

 

2건의 volumes 설정이 구성되어있고 
elasticsearch.yml 이라는 설정정보, data 경로 매핑


volumes:

- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z
- elasticsearch:/usr/share/elasticsearch/data:Z

ports:
포트정보는 private, public 하게 사용해야하는 포트를 정의합니다.
private 정보는 docker Container Internal 네트워크 Server To Server 통신에서 사용하며 
public 정보는 External 에서 접근하게 되는 네트워크 구간이라고 보시면됩니다.

environment:
Elasticsearch 설정정보를 정의할수 있습니다.
노드정보, 메모리설정, 패스워드, 디스커버리타입 등 사용해야하는 정의를 추가 하시면 됩니다.

restart: 
docker Container 부트스트랩 될때 사용하는 옵션을 정의합니다.
no : 재시작 하지않음.
always : exit code와 상관없이 항상 재시작
on-failure : exit code가 0이 아닐때 재시작
unless-stopped : Container 상태를 수동으로 stop 하기전까지 재시작 

 

Logstash

 

Logstash Pipline 

RDBMS(H2) → Elasticsearch Pipline 설정으로 
Rdbms의 jar정보가 필요합니다. 

Mysql, Mssql, MariaDB 등 rdbms를 사용하기위한 JAR 라이브러리가
필요한 데이터베이스들은 필수로 Pipline에 사용될 드라이버를 참조하게 만들어야합니다.


volumes:

- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
- ./logstash/jdbc/h2-2.1.214.jar:/usr/share/logstash/jdbc/h2-2.1.214.jar:ro,Z # Added volume for H2 JDBC driver-

Logstash의 경우 pipline에 추가되는 정보는 "./logstash/pipline 하위에 .conf파일을 정의하면 
Logstash가 init 되는과정에서 자동으로 등록되며 .conf에 정의한 Input, filter, output 내용으로 처리됩니다.
Logsatsh는 내부적으로 Queue를 사용하여 처리합니다.

 

Kibana

 

Kibana Elasticsearch외에도 다양한 플러그인을 설정하여 
검색엔진에 필요한 통계,집계,그래프,로그,노드,DEV_TOOL등 다양한 기능을 제공합니다.

ports:
5601 기본 포트

depends_on: 
Elasticsearch 멀티로 구성하는경우 Elasticsearch를 정의한 Container 이름을 넣어주면됩니다.

 

H2

 

RDBMS TO Logstash를 샘플로 작업하기위해서 
내부적으로 설정된 정보는 그대로 사용을 하도록 하겠습니다.

h2 정보 2023.11.13 기준 Docker Image oscarfonts/h2 latest Verion 2.1.214입니다.

ports:
private 1521
public 8081

volumes : 
/h2/opt/h2-data 
해당 경로로 h2에 관련된 데이터 파일이 생성됩니다.

 

Docker-Compose ELK H2 초기화

 

docker 초기화 (기존설치되어있는 버전 존재시)

Elasticsearch + Logstash + Kibana + H2

docker 초기화하는 내용을 초반에 정리하였습니다. 처음 진행하시는 분들이라면 설치부터 진행하시면 됩니다.

docker-compose Container 초기화 

기존에 docker-elk 설치하였거나 잘못설치했을경우 아라와같이 삭제를 진행할수 있게됩니다.


> docker-compose down -v 
docker-compose.yml 위치로 이동 후 명령어 실행 
docker Container 그리고 Volume까지 삭제 하게 됩니다.

Volume가 삭제되지 않을경우 정상적으로 설치되지 않기때문에 "-v" 옵션을 실행주세요 

 

프로세스 확인

> docker ps 
Elasticsearch, Logstash, Kibana, H2 Container 목록이 프로세스로 할당되어있지 않는것을 확인할수 있습니다.

 

Docker Image 확인

> docker images
이전에 설치된 ELK 스택 버전을 변경하고싶다면 image까지 삭제해야합니다.
명령어로 docker images를 확인하시고 삭제하면됩니다.

 

Docker Images 삭제

4개 docker-elk-* 삭제 하면됩니다.

> docker rmi IMAGE ID 또는 REPOSITORY 삭제 가능

정상적으로 삭제되는 경우 아래와같이 출력 
(base) renzo@MI-1-renzo1980 docker-elk % docker rmi docker-elk-kibana
Untagged: docker-elk-kibana:latest

디펜던시된 컨테이너가 존재하는경우 

 

"rm" 으로 해당 ID를 삭제 후 image를 삭제요청하면 처리됩니다.

(base) renzo@MI-1-renzo1980 docker-elk % docker rmi docker-elk-setup
Error response from daemon: conflict: unable to remove repository reference "docker-elk-setup" (must force) - container 78b1541c5773 is using its referenced image 6ac55eb84553

 

(base) renzo@MI-1-renzo1980 docker-elk % docker rm 6ac55eb84553

> docker rm 6ac55eb84553
> docker rmi 78b1541c5773

모두 삭제했다면 아래와같이 docker-compose.yml 에 정의할 docker image가 클린하게 진행된것을 확인할수 있습니다.

 

 

ELK Docker Setup ( Elasticsearch + Logstash + Kibana + H2)

 

docker-compose up setup

 

> docker-compose up setup

> docker images

 

 

Elasticsearch : localhost:9200
{
  "name" : "elasticsearch",
  "cluster_name" : "docker-cluster",
  "cluster_uuid" : "u_6uudUSRbimQvqZvCN8QA",
  "version" : {
    "number" : "8.6.2",
    "build_flavor" : "default",
    "build_type" : "docker",
    "build_hash" : "2d58d0f136141f03239816a4e360a8d17b6d8f29",
    "build_date" : "2023-02-13T09:35:20.314882762Z",
    "build_snapshot" : false,
    "lucene_version" : "9.4.2",
    "minimum_wire_compatibility_version" : "7.17.0",
    "minimum_index_compatibility_version" : "7.0.0"
  },
  "tagline" : "You Know, for Search"
}

 

Kibana : localhost:5601

kabana console 표기

 

Logstash Pipline 정상처리 확인
{
    "@timestamp" => 2023-11-22T08:14:00.626475261Z,
         "price" => 19800.0,
      "@version" => "1",
     "productId" => 611019,
          "name" => "플루크 new 피그먼트 오버핏 반팔티셔츠 FST701 / 7color M"
}
{
    "@timestamp" => 2023-11-22T08:14:00.626515886Z,
         "price" => 12900.0,
      "@version" => "1",
     "productId" => 628066,
          "name" => "무설탕 프로틴 초콜릿 틴볼스"
}
{
    "@timestamp" => 2023-11-22T08:14:00.626522386Z,
         "price" => 238000.0,
      "@version" => "1",
     "productId" => 648418,
          "name" => "BS 02-2A DAYPACK 26 (BLACK)"
}
{
    "@timestamp" => 2023-11-22T08:14:00.626526511Z,
         "price" => 38000.0,
      "@version" => "1",
     "productId" => 706803,
          "name" => "ZEROVITY™ Flip Flop Cream 2.0 (Z-FF-CRAJ-)"
}
{
    "@timestamp" => 2023-11-22T08:14:00.626530011Z,
         "price" => 28000.0,
      "@version" => "1",
     "productId" => 744775,
          "name" => "SHUT UP [TK00112]"

 

H2 Console

 

localhost:8081 h2 Web Console

h2:
  container_name: h2
  image: oscarfonts/h2:latest
  ports:
    - 1521:1521
    - 8081:81
  environment:
    H2_OPTIONS: -ifNotExists
  volumes:
    - ./h2/opt/h2-data/:/opt/h2-data
  networks:
    - elk
  restart: unless-stopped