Skip to end of metadata
Go to start of metadata

You are viewing an old version of this page. View the current version.

Compare with Current View Page History

« Previous Version 5 Next »

EFK is a set of logging solution. It helps you view the application logs. This section guides you on installing EFK. 

Prerequisites

  • Kubernetes 1.16+
  • Helm 3+

Elasticsearch

Prerequisites 

  • Minimum cluster requirements include the following to run this chart with default settings. All of these settings are configurable.
    • Three Kubernetes nodes to respect the default "hard" affinity settings
    • 1GB of RAM for the JVM heap

Installation

To deploy Elasticsearch, you need to follow the steps as given below.

  • Run the following command to add the  Elasticsearch helm chart from the  Elasticsearch helm repository.
  • Update the Helm repository by running the following command.
    • helm repo update
  • Run the helm install command as shown below to deploy  Elasticsearch.
    • helm install elasticsearch elastic/elasticsearch  -n <NAMESPACE>

FluentD

Fluentd is an open source data collector for unified logging layer. Fluentd allows you to unify data collection and consumption for a better use and understanding of data.

Installation

To deploy FluentD, you need to follow the steps as given below.

  • Run the following command to add the  FluentD helm chart from the  FluentD helm repository.
  • Update the Helm repository by running the following command.
    • helm repo update
  • Run the helm install command as shown below to deploy  FluentD .
    • helm install fluentd fluent/fluentd  -n <NAMESPACE>

Once you have installed FluentD, update your FluentD ConfigMap using the FluentD ConfigMap configuration given in the file below. 

apiVersion: v1
kind: ConfigMap
metadata:
  name: fluentd-forwarder-cm
  namespace: fluentd
data:
  fluentd.conf: |-
    # Ignore fluentd own events
    <match fluent.**>
      @type null
    </match>


    @include source.conf
    @include elastic-output.conf

  source.conf: |-
    # HTTP input for the liveness and readiness probes
    <source>
      @type http
      port 9880
    </source>
    
    @include webapp-gateway-input.conf
    @include webrunner-input.conf
    @include portal-input.conf
    @include event-input.conf
    @include runtime-input.conf
   
  webapp-gateway-input.conf: |-
    <source>
      @type tail
      tag service.webapp-gateway
      path /var/log/containers/*webapp-gateway*.log
      pos_file /var/log/webapp-gateway-containers.log.pos
      read_from_head true
      @include source-parser.conf
    </source>
    <match service.webapp-gateway.**>
      @id service.webapp-gateway
      @include exception-detector.conf
    </match>
    @include concat-filter.conf
    <filter webapp-gateway.**>
      @id webapp-gateway_kubernetes_metadata-filter
      @include kubernetes_metadata-filter.conf
    </filter>
    <filter webapp-gateway.**>
      @id webapp-gateway_log-field-parser
      @include log-field-parser.conf
    </filter>
  
  portal-input.conf: |-
    <source>
      @type tail
      tag service.portal
      path /var/log/containers/*portal*.log
      pos_file /var/log/portal-containers.log.pos
      read_from_head true
      @include source-parser.conf
    </source>
    <match service.portal.**>
      @id service.portal
      @include exception-detector.conf
    </match>
    @include concat-filter.conf
    <filter portal.**>
      @id portal_kubernetes_metadata-filter
      @include kubernetes_metadata-filter.conf
    </filter>
    <filter portal.**>
      @id portal_log-field-parser
      @include log-field-parser.conf
    </filter>
  
  webrunner-input.conf: |-
    <source>
      @type tail
      tag service.webrunner
      path /var/log/containers/*web-runner*.log
      pos_file /var/log/webrunner-containers.log.pos
      read_from_head true
      @include source-parser.conf
    </source>
    @include concat-filter.conf
    <filter webrunner.**>
      @id webrunner_kubernetes_metadata-filter
      @include kubernetes_metadata-filter.conf
    </filter>
    <filter webrunner.**>
      @id webrunner_log-field-parser
      @include log-field-parser.conf
    </filter>

  event-input.conf: |-
    <source>
      @type tail
      tag service.event
      path /var/log/containers/*event*.log
      pos_file /var/log/event-containers.log.pos
      read_from_head true
      @include source-parser.conf
    </source>
    <match service.event.**>
      @id service.event
      @include exception-detector.conf
    </match>
    @include concat-filter.conf
    <filter event.**>
      @id event_kubernetes_metadata-filter
      @include kubernetes_metadata-filter.conf
    </filter>
    <filter event.**>
      @id event_log-field-parser
      @include log-field-parser.conf
    </filter>

  runtime-input.conf: |-
    <source>
      @type tail
      tag service.runtime
      path /var/log/containers/*runtime*.log
      pos_file /var/log/runtime-containers.log.pos
      read_from_head true
      @include source-parser.conf
    </source>
    <match service.runtime.**>
      @id service.runtime
      @include exception-detector.conf
    </match>
    @include concat-filter.conf
    <filter runtime.**>
      @id runtime_kubernetes_metadata-filter
      @include kubernetes_metadata-filter.conf
    </filter>
    <filter runtime.**>
      @id runtime_log-field-parser
      @include log-field-parser.conf
    </filter>
    
  
  exception-detector.conf: |-
      # Detect exceptions in the log output and forward them as one log entry.
      @type detect_exceptions
      remove_tag_prefix service
      message log
      stream stream
      multiline_flush_interval 5
      max_bytes 500000
      max_lines 1000

  concat-filter.conf: |-
    <filter **>
      # @id filter_concat
      @type concat
      key log
      use_first_timestamp true
      multiline_end_regexp /\n$/
      separator ""
    </filter>

  kubernetes_metadata-filter.conf: |-
   # Enriches records with Kubernetes metadata
    @type kubernetes_metadata
      # skip_namespace_metadata true
      # skip_master_url true
      # skip_labels false
      # skip_container_metadata false

  log-field-parser.conf: |-
    @type parser
    key_name log
    reserve_time true
    reserve_data true
    remove_key_name_field true
    <parse>
      @type multi_format
      <pattern>
        format json
      </pattern>
      <pattern>
        format none
      </pattern>
    </parse>
    
  source-parser.conf: |-
    <parse>
      @type multi_format
      <pattern>
        format json
        time_key time
        #time_format %Y-%m-%dT%H:%M:%S.%NZ
      </pattern>
      <pattern>
        format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
        time_format %Y-%m-%dT%H:%M:%S.%N%:z
      </pattern>
    </parse>
  elastic-output.conf: |-
    <match webapp-gateway.**>
      @include elastic-search.conf
      index_name Webapp Gateway
    </match>
    <match portal.**>
      @include elastic-search.conf
      index_name Portal
    </match>
    <match webrunner.**>
      @include elastic-search.conf
      index_name WebRunner
    </match>
    <match event.**>
      @include elastic-search.conf
      index_name Event
    </match>
    <match runtime.**>
      @include elastic-search.conf
      index_name Runtime
    </match>
    
  elastic-search.conf: |-
      @type elasticsearch
      host "#{ENV['FLUENT_ELASTICSEARCH_HOST'] || 'elasticsearch-master.logs.svc.cluster.local'}"
      port "#{ENV['FLUENT_ELASTICSEARCH_PORT'] || '9200'}"
      include_tag_key true

Kibana

Installation

To deploy Kibana, you need to follow the steps as given below.

  • Run the following command to add the   Kibana helm chart from the   Kibana helm repository.
  • Update the Helm repository by running the following command.
    • helm repo update
  • Run the helm install command as shown below to deploy   Kibana.
    • helm install kibana elastic/kibana  -n <NAMESPACE>
  • No labels