k 8 s配置elk(esクラスタは事前にインストールする必要があります.バージョン6.8.2)


#前提:k 8 sクラスタインフラストラクチャが配備されていることを確認します.注意:ミラーアドレスは実際のミラーウェアハウスアドレスを記入します!(docker images|grepミラー名)、esクラスタアドレスは実際のアドレスを記入します!
kibanaを導入し、uiインタフェースを示すストレージログを提供
svcを導入し、NodePortを使用してポートを外部に露出し、外部からアクセスできます.vim hqs-kibana-svc.yaml(kibana svc yamlファイルの作成)
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: null
  labels:
    app: hqs-kibana
  name: hqs-kibana
  namespace: hqs
  selfLink: /api/v1/namespaces/hqs/services/hqs-kibana
spec:
  clusterIP: 10.4.146.238
  externalTrafficPolicy: Cluster
  ports:
  - name: http
    nodePort: 32115
    port: 5601
    protocol: TCP
    targetPort: 5601
  selector:
    apps.deployment: hqs-kibana
  sessionAffinity: None
  type: NodePort
status:
  loadBalancer: {
     }


kubectl create -f hqs-kibana-svc.yaml(外部露出ポートの作成サービス)kubectl get svc-n hqs(hqs-kibanaというsvcが正常に作成されたかどうかを確認)
kibanaを配備し、タイプdeployment:
vim hqs-kibana-deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
  name: hqs-kibana
  namespace: hqs
  selfLink: /apis/apps/v1/namespaces/hqs/deployments/hqs-kibana
  labels:
    name: hqs-kibana
spec:
  replicas: 1
  selector:
    matchLabels:
      apps.deployment: hqs-kibana
  template:
    metadata:
      creationTimestamp: null
      labels:
        apps.deployment: hqs-kibana
    spec:
      containers:
        - name: hqs-kibana
          image: elastic/kibana:6.8.2
          env:
          - name: "ELASTICSEARCH_URL"
            value: "http://10.66.0.126:9200"
          - name: "ELASTICSEARCH_USERNAME"
            value: "elastic"
          - name: "ELASTICSEARCH_PASSWORD"
            value: "7ujm  
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          imagePullPolicy: IfNotPresent
          volumeMounts:
          - name: host-time
            mountPath: /etc/localtime
      volumes:
      - name: host-time
        hostPath:
          path: /etc/localtime
      restartPolicy: Always
      terminationGracePeriodSeconds: 30
      dnsPolicy: ClusterFirst
      securityContext: {
     }
      schedulerName: default-scheduler
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 25%
      maxSurge: 25%
  revisionHistoryLimit: 10
  progressDeadlineSeconds: 600


kubectl create -f hqs-kibana-deployment.yaml(配置pod)kubectl get pod-nhqs(hqs-kibana-xxxx pod状態を表示し、Runningが正常であることを示す)
ログ収集、集中、変換、格納のためのlogstashの導入
vim hqs-logstash-deployment.yaml(デプロイコピーが2でpodが2個作成され、クラスタにランダムにデプロイされます)
kind: ConfigMap
apiVersion: v1
metadata:
  name: hqs-logstash-config
  namespace: hqs
  selfLink: /api/v1/namespaces/hqs/configmaps/hqs-logstash-config
data:
  logstash.conf: |-
    input{
     
      beats {
     
        port => 5044
      }
    }

    filter {
       #      
      grok {
     
        match => {
      "message" => "\[%{
     TIMESTAMP_ISO8601:logTime}\]\[%{
     DATA:fileName}:%{
     NUMBER:line}\]\[%{
     NUMBER:process}\,%{
     NUMBER:thread}\] %{
     LOGLEVEL:loglevel}%{
     GREEDYDATA:message}"}  #          
      }
    }

    output {
     
     stdout {
      codec => rubydebug }
       elasticsearch {
     
         hosts => ["http://10.66.0.126:9200","http://10.66.0.27:9200","http://10.66.0.32:9200"]
         index => "%{
     [fields][indexname]}-%{
     +YYYY.MM.dd}"
         user => "elastic"
         password => "changeme"
        }
    }
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: hqs-logstash
  namespace: hqs
  selfLink: /apis/apps/v1/namespaces/hqs/deployments/hqs-logstash
  labels:
    name: hqs-logstash
spec:
  replicas: 2
  selector:
    matchLabels:
      apps.deployment: hqs-logstash
  template:
    metadata:
      creationTimestamp: null
      labels:
        apps.deployment: hqs-logstash
    spec:
      volumes:
        - name: hqs-logstash-config
          configMap:
            name: hqs-logstash-config
            items:
              - key: logstash.conf
                path: logstash.conf
            defaultMode: 420
      containers:
        - name: hqs-logstash
          image: elastic/logstash:6.8.2
          envFrom:
            - configMapRef:
                name: hqs-logstash-config
          command:
          - logstash
          - '-f'
          - '/etc/logstash.conf'      
          resources: 
            limits:
              cpu: 1000m
              memory: 2048Mi
            requests:
              cpu: 100m
              memory: 512Mi
          volumeMounts:
            - name: hqs-logstash-config
              readOnly: true
              mountPath: /etc/logstash.conf
              subPath: logstash.conf
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          imagePullPolicy: IfNotPresent
      restartPolicy: Always
      terminationGracePeriodSeconds: 30
      dnsPolicy: ClusterFirst
      securityContext: {
     }
      schedulerName: default-scheduler
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 25%
      maxSurge: 25%
  revisionHistoryLimit: 10
  progressDeadlineSeconds: 600


kubectl create -f hqs-logstash-deployment.yaml kubectl get pod-nhqs|grep hqs-logstash(podの導入に成功したかどうかを確認し、正常に2つのpodがある)vim hqs-logstash-svc.yaml
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: null
  labels:
    app: hqs-logstash
  name: hqs-logstash
  namespace: hqs
  selfLink: /api/v1/namespaces/hqs/services/hqs-logstash
spec:
  ports:
  - name: tcp
    port: 5044
    protocol: TCP
    targetPort: 5044
  selector:
    apps.deployment: hqs-logstash
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {
     }


kubectl create -f hqs-logstash-svc.yaml
filebeatを配備し、各アプリケーションノードでログを収集します.(DaemonSet採用)
vim hqs-filebeat-deployment.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: filebeat-config
  namespace: hqs
  labels:
    k8s-app: filebeat
data:
  filebeat.yml: |-
    filebeat.inputs:
    - type: log
      enabled: true
      paths:
        - /data/release/hqs-toc/hqs-toc*.log
      fields:
        indexname: hqs-toc
    - type: log
      enabled: true
      paths:
        - /data/release/hqs-eureka/hqs-eureka*.log
      fields:
        indexname: hqs-eureka
    - type: log
      enabled: true
      paths:
        - /data/release/hqs-rights/hqs-rights*.log
      fields:
        indexname: hqs-rights
      filebeat.config.modules:
      path: ${
     path.config}/modules.d/*.yml
      reload.enabled: false
    setup.template.settings:
      index.number_of_shards: 3
    setup.kibana:
    output.logstash:
      hosts: ["hqs-logstash:5044"]
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: hqs-filebeat
  namespace: hqs
  selfLink: /apis/apps/v1/namespaces/hqs/daemonsets/hqs-filebeat
  labels:
    name: hqs-filebeat
spec:
  selector:
    matchLabels:
      apps.daemonset: hqs-filebeat
  template:
    metadata:
      labels:
        apps.daemonset: hqs-filebeat
    spec:
      containers:
      - name: hqs-filebeat
        image: elastic/filebeat:6.8.2
        args: [
          "-c", "/etc/filebeat.yml",
          "-e",
        ]
        resources:
          limits:
            cpu: 0m
            memory: 0Mi
          requests:
            cpu: 0m
            memory: 0Mi
        securityContext:
          runAsUser: 0    
        volumeMounts:
         - name: eureka-logs
           mountPath: /data/release/hqs-eureka/
         - name: toc-logs
           mountPath: /data/release/hqs-toc/ 
         - name: rights-logs
           mountPath: /data/release/hqs-rights/
         - name: config
           mountPath: /etc/filebeat.yml
           readOnly: true
           subPath: filebeat.yml
      volumes:
      - name: eureka-logs
        hostPath:
          path: /data/release/hqs-eureka/
      - name: toc-logs
        hostPath:
          path: /data/release/hqs-toc/
      - name: rights-logs
        hostPath:
          path: /data/release/hqs-rights/
      - name: config
        configMap:
          name: filebeat-config


kubectl create -f hqs-filebeat-deployment.yaml kubectl get pod-nhqs|grep hqs-filebeat(クラスタ内のnodeノードごとに配置される)アクセステストhttp://nodeip:32115