Миникуб: Проблема с запуском Kibana

#elasticsearch #kubernetes #kibana #fluentd

Вопрос:

В настоящее время я изучаю Kubernetes и использую Minikube на macOS с помощью рабочего стола Docker, я сталкиваюсь с проблемами при запуске Kibana, которая, похоже, не запускается, а также не позволяет включить ее через мой контроллер входа nginx.

Что касается Кибаны, то она не переходит на стадию готовности, кажется, застряла и несколько раз перезапускается. Все находится в пространстве имен по умолчанию, за исключением fluentd, в котором я использую постоянный том и утверждение о постоянном томе для доступа к общей папке /данные/журналы.

Я добавил свою конфигурацию fluentd, kibana, es и ingress yaml. А также журналы кибаны ниже.

Fluentd

 ---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: fluentd
  namespace: kube-system

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: fluentd
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - namespaces
  verbs:
  - get
  - list
  - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: fluentd
roleRef:
  kind: ClusterRole
  name: fluentd
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: fluentd
  namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd
  namespace: kube-system
  labels:
    k8s-app: fluentd-logging
    version: v1
spec:
  selector:
    matchLabels:
      k8s-app: fluentd-logging
      version: v1
  template:
    metadata:
      labels:
        k8s-app: fluentd-logging
        version: v1
    spec:
      containers:
      - name: fluentd
        image: fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch
        env:
          - name:  FLUENT_ELASTICSEARCH_HOST
            value: "elasticsearch"
          - name:  FLUENT_ELASTICSEARCH_PORT
            value: "9200"
          - name: FLUENT_ELASTICSEARCH_SCHEME
            value: "http"
          - name: FLUENT_UID
            value: "0"
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 200Mi
        volumeMounts:
          - mountPath: /var/logs
            name: logs
      terminationGracePeriodSeconds: 30
      volumes:
      - name: logs
        persistentVolumeClaim:
          claimName: chi-kube-pvc
 

Кибана.ямл

 apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
spec:
  selector:
    matchLabels:
      run: kibana
  template:
    metadata:
      labels:
        run: kibana
    spec:
      containers:
        - name: kibana
          image: docker.elastic.co/kibana/kibana:7.14.2
          readinessProbe:
            httpGet:
              path: /kibana
              port: 5601
            initialDelaySeconds: 5
            periodSeconds: 10
          livenessProbe:
            httpGet:
              path: /kibana
              port: 5601
            initialDelaySeconds: 15
            periodSeconds: 20
          env:
            - name: XPACK_SECURITY_ENABLED
              value: "true"
            - name: SERVER_BASEPATH
              value: "/kibana"
          ports:
            - containerPort: 5601
          volumeMounts:
            - mountPath: /var/logs
              name: logs
      volumes:
      - name: logs
        persistentVolumeClaim:
          claimName: chi-pvc      
---
apiVersion: v1
kind: Service
metadata:
  name: kibana
  labels:
    service: kibana
spec:
  type: NodePort
  selector:
    run: kibana
  ports:
    - port: 5601
      targetPort: 5601
 

Kibana logs:

 {"type":"log","@timestamp":"2021-09-22T09:54:47 00:00","tags":["info","plugins-service"],"pid":1216,"message":"Plugin "metricsEntities" is disabled."}
{"type":"log","@timestamp":"2021-09-22T09:54:47 00:00","tags":["warning","config","deprecation"],"pid":1216,"message":"You should set server.basePath along with server.rewriteBasePath. Starting in 7.0, Kibana will expect that all requests start with server.basePath rather than expecting you to rewrite the requests in your reverse proxy. Set server.rewriteBasePath to false to preserve the current behavior and silence this warning."}
{"type":"log","@timestamp":"2021-09-22T09:54:47 00:00","tags":["warning","config","deprecation"],"pid":1216,"message":"Support for setting server.host to "0" in kibana.yml is deprecated and will be removed in Kibana version 8.0.0. Instead use "0.0.0.0" to bind to all interfaces."}
{"type":"log","@timestamp":"2021-09-22T09:54:47 00:00","tags":["warning","config","deprecation"],"pid":1216,"message":"plugins.scanDirs is deprecated and is no longer used"}
{"type":"log","@timestamp":"2021-09-22T09:54:47 00:00","tags":["warning","config","deprecation"],"pid":1216,"message":"Config key [monitoring.cluster_alerts.email_notifications.email_address] will be required for email notifications to work in 8.0.""}
{"type":"log","@timestamp":"2021-09-22T09:54:47 00:00","tags":["warning","config","deprecation"],"pid":1216,"message":""xpack.reporting.roles" is deprecated. Granting reporting privilege through a "reporting_user" role will not be supported starting in 8.0. Please set "xpack.reporting.roles.enabled" to "false" and grant reporting privileges to users using Kibana application privileges **Management > Security > Roles**."}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["info","http","server","NotReady"],"pid":1216,"message":"http server running at http://0.0.0.0:5601"}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["info","plugins-system"],"pid":1216,"message":"Setting up [106] plugins: [translations,taskManager,licensing,globalSearch,globalSearchProviders,banners,licenseApiGuard,code,usageCollection,xpackLegacy,telemetryCollectionManager,telemetryCollectionXpack,kibanaUsageCollection,securityOss,share,screenshotMode,telemetry,newsfeed,mapsEms,mapsLegacy,legacyExport,kibanaLegacy,embeddable,uiActionsEnhanced,expressions,charts,esUiShared,bfetch,data,savedObjects,visualizations,visTypeXy,visTypeVislib,visTypeTimelion,features,visTypeTagcloud,visTypeTable,visTypePie,visTypeMetric,visTypeMarkdown,tileMap,regionMap,presentationUtil,timelion,home,searchprofiler,painlessLab,grokdebugger,graph,visTypeVega,management,watcher,licenseManagement,indexPatternManagement,advancedSettings,discover,discoverEnhanced,dashboard,dashboardEnhanced,visualize,visTypeTimeseries,savedObjectsManagement,spaces,security,transform,savedObjectsTagging,lens,reporting,canvas,lists,ingestPipelines,fileUpload,maps,dataVisualizer,encryptedSavedObjects,dataEnhanced,timelines,dashboardMode,cloud,upgradeAssistant,snapshotRestore,fleet,indexManagement,rollup,remoteClusters,crossClusterReplication,indexLifecycleManagement,enterpriseSearch,eventLog,actions,alerting,triggersActionsUi,stackAlerts,ruleRegistry,osquery,ml,cases,securitySolution,observability,uptime,infra,monitoring,logstash,console,apmOss,apm]"}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["info","plugins","taskManager"],"pid":1216,"message":"TaskManager is identified by the Kibana UUID: 4f523c36-da1f-46e2-a071-84ee400bb9e7"}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["warning","plugins","security","config"],"pid":1216,"message":"Generating a random key for xpack.security.encryptionKey. To prevent sessions from being invalidated on restart, please set xpack.security.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command."}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["warning","plugins","security","config"],"pid":1216,"message":"Session cookies will be transmitted over insecure connections. This is not recommended."}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["warning","plugins","reporting","config"],"pid":1216,"message":"Generating a random key for xpack.reporting.encryptionKey. To prevent sessions from being invalidated on restart, please set xpack.reporting.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command."}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["warning","plugins","reporting","config"],"pid":1216,"message":"Chromium sandbox provides an additional layer of protection, but is not supported for Linux CentOS 8.4.2105n OS. Automatically setting 'xpack.reporting.capture.browser.chromium.disableSandbox: true'."}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["warning","plugins","encryptedSavedObjects"],"pid":1216,"message":"Saved objects encryption key is not set. This will severely limit Kibana functionality. Please set xpack.encryptedSavedObjects.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command."}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["warning","plugins","actions","actions"],"pid":1216,"message":"APIs are disabled because the Encrypted Saved Objects plugin is missing encryption key. Please set xpack.encryptedSavedObjects.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command."}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["warning","plugins","alerting","plugins","alerting"],"pid":1216,"message":"APIs are disabled because the Encrypted Saved Objects plugin is missing encryption key. Please set xpack.encryptedSavedObjects.encryptionKey in the kibana.yml or use the bin/kibana-encryption-keys command."}
{"type":"log","@timestamp":"2021-09-22T09:54:48 00:00","tags":["info","plugins","ruleRegistry"],"pid":1216,"message":"Write is disabled, not installing assets"}
{"type":"log","@timestamp":"2021-09-22T09:54:49 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"Waiting until all Elasticsearch nodes are compatible with Kibana before starting saved objects migrations..."}
{"type":"log","@timestamp":"2021-09-22T09:54:49 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"Starting saved objects migrations"}
{"type":"log","@timestamp":"2021-09-22T09:54:49 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana] INIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT. took: 226ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:49 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana] OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT -> OUTDATED_DOCUMENTS_SEARCH_READ. took: 192ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:49 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana] OUTDATED_DOCUMENTS_SEARCH_READ -> OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT. took: 118ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:49 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana_task_manager] INIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT. took: 536ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:49 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana] OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT -> UPDATE_TARGET_MAPPINGS. took: 86ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:49 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana_task_manager] OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT -> OUTDATED_DOCUMENTS_SEARCH_READ. took: 86ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana_task_manager] OUTDATED_DOCUMENTS_SEARCH_READ -> OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT. took: 64ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana] UPDATE_TARGET_MAPPINGS -> UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK. took: 112ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana_task_manager] OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT -> UPDATE_TARGET_MAPPINGS. took: 49ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana_task_manager] UPDATE_TARGET_MAPPINGS -> UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK. took: 29ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana] UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK -> DONE. took: 106ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana] Migration completed after 840ms"}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana_task_manager] UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK -> DONE. took: 104ms."}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","savedobjects-service"],"pid":1216,"message":"[.kibana_task_manager] Migration completed after 869ms"}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","plugins-system"],"pid":1216,"message":"Starting [106] plugins: [translations,taskManager,licensing,globalSearch,globalSearchProviders,banners,licenseApiGuard,code,usageCollection,xpackLegacy,telemetryCollectionManager,telemetryCollectionXpack,kibanaUsageCollection,securityOss,share,screenshotMode,telemetry,newsfeed,mapsEms,mapsLegacy,legacyExport,kibanaLegacy,embeddable,uiActionsEnhanced,expressions,charts,esUiShared,bfetch,data,savedObjects,visualizations,visTypeXy,visTypeVislib,visTypeTimelion,features,visTypeTagcloud,visTypeTable,visTypePie,visTypeMetric,visTypeMarkdown,tileMap,regionMap,presentationUtil,timelion,home,searchprofiler,painlessLab,grokdebugger,graph,visTypeVega,management,watcher,licenseManagement,indexPatternManagement,advancedSettings,discover,discoverEnhanced,dashboard,dashboardEnhanced,visualize,visTypeTimeseries,savedObjectsManagement,spaces,security,transform,savedObjectsTagging,lens,reporting,canvas,lists,ingestPipelines,fileUpload,maps,dataVisualizer,encryptedSavedObjects,dataEnhanced,timelines,dashboardMode,cloud,upgradeAssistant,snapshotRestore,fleet,indexManagement,rollup,remoteClusters,crossClusterReplication,indexLifecycleManagement,enterpriseSearch,eventLog,actions,alerting,triggersActionsUi,stackAlerts,ruleRegistry,osquery,ml,cases,securitySolution,observability,uptime,infra,monitoring,logstash,console,apmOss,apm]"}
{"type":"log","@timestamp":"2021-09-22T09:54:50 00:00","tags":["info","plugins","monitoring","monitoring"],"pid":1216,"message":"config sourced from: production cluster"}
{"type":"log","@timestamp":"2021-09-22T09:54:51 00:00","tags":["info","http","server","Kibana"],"pid":1216,"message":"http server running at http://0.0.0.0:5601"}
{"type":"log","@timestamp":"2021-09-22T09:54:52 00:00","tags":["info","plugins","monitoring","monitoring","kibana-monitoring"],"pid":1216,"message":"Starting monitoring stats collection"}
{"type":"log","@timestamp":"2021-09-22T09:54:52 00:00","tags":["info","plugins","securitySolution"],"pid":1216,"message":"Dependent plugin setup complete - Starting ManifestTask"}
{"type":"log","@timestamp":"2021-09-22T09:54:52 00:00","tags":["info","status"],"pid":1216,"message":"Kibana is now degraded"}
{"type":"log","@timestamp":"2021-09-22T09:54:52 00:00","tags":["info","plugins","reporting"],"pid":1216,"message":"Browser executable: /usr/share/kibana/x-pack/plugins/reporting/chromium/headless_shell-linux_x64/headless_shell"}
{"type":"log","@timestamp":"2021-09-22T09:54:52 00:00","tags":["warning","plugins","reporting"],"pid":1216,"message":"Enabling the Chromium sandbox provides an additional layer of protection."}
{"type":"log","@timestamp":"2021-09-22T09:54:55 00:00","tags":["info","status"],"pid":1216,"message":"Kibana is now available (was degraded)"}
{"type":"response","@timestamp":"2021-09-22T09:54:58 00:00","tags":[],"pid":1216,"method":"get","statusCode":404,"req":{"url":"/kibana","method":"get","headers":{"host":"172.17.0.3:5601","user-agent":"kube-probe/1.22","accept":"*/*","connection":"close"},"remoteAddress":"172.17.0.1","userAgent":"kube-probe/1.22"},"res":{"statusCode":404,"responseTime":36,"contentLength":60},"message":"GET /kibana 404 36ms - 60.0B"}
{"type":"response","@timestamp":"2021-09-22T09:55:08 00:00","tags":[],"pid":1216,"method":"get","statusCode":404,"req":{"url":"/kibana","method":"get","headers":{"host":"172.17.0.3:5601","user-agent":"kube-probe/1.22","accept":"*/*","connection":"close"},"remoteAddress":"172.17.0.1","userAgent":"kube-probe/1.22"},"res":{"statusCode":404,"responseTime":25,"contentLength":60},"message":"GET /kibana 404 25ms - 60.0B"}
{"type":"response","@timestamp":"2021-09-22T09:55:08 00:00","tags":[],"pid":1216,"method":"get","statusCode":404,"req":{"url":"/kibana","method":"get","headers":{"host":"172.17.0.3:5601","user-agent":"kube-probe/1.22","accept":"*/*","connection":"close"},"remoteAddress":"172.17.0.1","userAgent":"kube-probe/1.22"},"res":{"statusCode":404,"responseTime":22,"contentLength":60},"message":"GET /kibana 404 22ms - 60.0B"}
{"type":"response","@timestamp":"2021-09-22T09:55:18 00:00","tags":[],"pid":1216,"method":"get","statusCode":404,"req":{"url":"/kibana","method":"get","headers":{"host":"172.17.0.3:5601","user-agent":"kube-probe/1.22","accept":"*/*","connection":"close"},"remoteAddress":"172.17.0.1","userAgent":"kube-probe/1.22"},"res":{"statusCode":404,"responseTime":26,"contentLength":60},"message":"GET /kibana 404 26ms - 60.0B"}
{"type":"response","@timestamp":"2021-09-22T09:55:28 00:00","tags":[],"pid":1216,"method":"get","statusCode":404,"req":{"url":"/kibana","method":"get","headers":{"host":"172.17.0.3:5601","user-agent":"kube-probe/1.22","accept":"*/*","connection":"close"},"remoteAddress":"172.17.0.1","userAgent":"kube-probe/1.22"},"res":{"statusCode":404,"responseTime":27,"contentLength":60},"message":"GET /kibana 404 27ms - 60.0B"}
{"type":"response","@timestamp":"2021-09-22T09:55:28 00:00","tags":[],"pid":1216,"method":"get","statusCode":404,"req":{"url":"/kibana","method":"get","headers":{"host":"172.17.0.3:5601","user-agent":"kube-probe/1.22","accept":"*/*","connection":"close"},"remoteAddress":"172.17.0.1","userAgent":"kube-probe/1.22"},"res":{"statusCode":404,"responseTime":22,"contentLength":60},"message":"GET /kibana 404 22ms - 60.0B"}
{"type":"response","@timestamp":"2021-09-22T09:55:38 00:00","tags":[],"pid":1216,"method":"get","statusCode":404,"req":{"url":"/kibana","method":"get","headers":{"host":"172.17.0.3:5601","user-agent":"kube-probe/1.22","accept":"*/*","connection":"close"},"remoteAddress":"172.17.0.1","userAgent":"kube-probe/1.22"},"res":{"statusCode":404,"responseTime":25,"contentLength":60},"message":"GET /kibana 404 25ms - 60.0B"}
 

Elasticsearch.yaml

 apiVersion: apps/v1
kind: Deployment
metadata:
  name: elasticsearch
spec:
  selector:
    matchLabels:
      component: elasticsearch
  template:
    metadata:
      labels:
        component: elasticsearch
    spec:
      containers:
        - name: elasticsearch
          image: docker.elastic.co/elasticsearch/elasticsearch:7.14.2
          env:
            - name: discovery.type
              value: single-node
          ports:
            - containerPort: 9200
              protocol: TCP
          resources:
            limits:
              cpu: 2
              memory: 4Gi
            requests:
              cpu: 500m
              memory: 4Gi
          volumeMounts:
            - mountPath: /var/logs
              name: logs
      volumes:
      - name: logs
        persistentVolumeClaim:
          claimName: chi-pvc
---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch
  labels:
    service: elasticsearch
spec:
  type: NodePort
  selector:
    component: elasticsearch
  ports:
    - port: 9200
      targetPort: 9200
 

Вход-ресурс.yaml

 apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: chi-ingress
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  rules:
  - http:
      paths:
      - path: /healthz
        pathType: Prefix
        backend:
          service:
            name: chi-svc
            port:
              number: 3000
      - path: /kibana
        pathType: Prefix
        backend:
          service:
            name: kibana
            port:
              number: 5601
      - path: /elasticsearch
        pathType: Prefix
        backend:
          service:
            name: elasticsearch
            port:
              number: 9200
 

Комментарии:

1. Я не знаю, извините, Кубернетес, но Кибана определенно работает — {"type":"log","@timestamp":"2021-09-22T09:54:55 00:00","tags":["info","status"],"pid":1216,"message":"Kibana is now available (was degraded)"}

2. @MarkWalkom, большое спасибо за ваш комментарий. Сейчас я опубликую свое решение.

Ответ №1:

В итоге я решил проблему, указав разные входы и удалив аннотацию Nginx, предназначенную для перезаписи, я сделал один шаг вперед и создал специальное пространство имен для инфраструктуры ведения журнала.

Пространство имен.yaml

 apiVersion: v1
kind: Namespace
metadata:
  name: kube-logging
 

Постоянный объем.yaml

 apiVersion: v1
kind: PersistentVolume
metadata:
  name: chi-pv
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteMany
  hostPath:
    path: /data/logs/
    type: DirectoryOrCreate
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: chi-pvc
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
  selector:
    matchLabels:
      type: local
 

Эластичный-поиск.yaml

 apiVersion: apps/v1
kind: Deployment
metadata:
  name: elasticsearch
  namespace: kube-logging
  labels:
    k8s-app: elasticsearch
    version: v1
spec:
  selector:
    matchLabels:
      k8s-app: elasticsearch
      version: v1
  template:
    metadata:
      labels:
        k8s-app: elasticsearch
        version: v1
    spec:
      containers:
      - name: elasticsearch
        image: docker.elastic.co/elasticsearch/elasticsearch:7.15.0
        env:
        - name: discovery.type
          value: single-node
        - name: ES_JAVA_OPTS
          value: "-Xms512m -Xmx512m"
        ports:
          - containerPort: 9200
        resources:
          limits:
            cpu: 500m
            memory: 4Gi
          requests:
            cpu: 500m
            memory: 4Gi
---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch
  namespace: kube-logging
  labels:
    k8s-app: elasticsearch
    version: v1
spec:
  type: NodePort
  selector:
    k8s-app: elasticsearch
  ports:
    - port: 9200
 

Fluentd.yaml

 ---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: fluentd
  namespace: kube-logging
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: fluentd
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - namespaces
  verbs:
  - get
  - list
  - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: fluentd
roleRef:
  kind: ClusterRole
  name: fluentd
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: fluentd
  namespace: kube-logging
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd
  namespace: kube-logging
  labels:
    k8s-app: fluentd-logging
    version: v1
spec:
  selector:
    matchLabels:
      k8s-app: fluentd-logging
      version: v1
  template:
    metadata:
      labels:
        k8s-app: fluentd-logging
        version: v1
    spec:
      serviceAccount: fluentd
      serviceAccountName: fluentd
      # Don't need this for Minikubes
      # tolerations:
      # - key: node-role.kubernetes.io/master
      #   effect: NoSchedule
      containers:
      - name: fluentd
        image: fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch
        env:
          - name:  FLUENT_ELASTICSEARCH_HOST
            # <hostname>.<namespace>.svc.cluster.local
            value: "elasticsearch.kube-logging.svc.cluster.local"
          - name:  FLUENT_ELASTICSEARCH_PORT
            value: "9200"
          - name: FLUENT_ELASTICSEARCH_SCHEME
            value: "http"
          - name: FLUENTD_SYSTEMD_CONF
            value: 'disable'
          - name: FLUENT_LOGSTASH_FORMAT
            value: "true"
          # # X-Pack Authentication
          # # =====================
          # - name: FLUENT_ELASTICSEARCH_USER
          #   value: "elastic"
          # - name: FLUENT_ELASTICSEARCH_PASSWORD
          #   value: "changeme"
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 200Mi
        volumeMounts:
        - name: varlog
          mountPath: /var/log
        # When actual pod logs in /var/lib/docker/containers, the following lines should be used.
        - name: dockercontainerlogdirectory
          mountPath: /var/lib/docker/containers
          readOnly: true
        # When actual pod logs in /var/log/pods, the following lines should be used.
        # - name: dockercontainerlogdirectory
        #   mountPath: /var/log/pods
        #   readOnly: true
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log
      # When actual pod logs in /var/lib/docker/containers, the following lines should be used.
      - name: dockercontainerlogdirectory
        hostPath:
          path: /var/lib/docker/containers
      # When actual pod logs in /var/log/pods, the following lines should be used.
      # - name: dockercontainerlogdirectory
      #   hostPath:
      #     path: /var/log/pods
 

Кибана.ямл

 apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
  namespace: kube-logging
  labels:
    k8s-app: kibana
    version: v1
spec:
  selector:
    matchLabels:
      k8s-app: kibana
      version: v1
  template:
    metadata:
      labels:
        k8s-app: kibana
        version: v1
    spec:
      containers:
      - name: kibana
        image: docker.elastic.co/kibana/kibana:7.15.0
        env:
        - name: SERVER_NAME
          value: kibana
        - name: SERVER_BASEPATH
          value: /kibana
        - name: SERVER_REWRITEBASEPATH
          value: "true"
        # - name: XPACK_SECURITY_ENABLED
        #   value: "true"
        readinessProbe:
          httpGet:
            path: /kibana/api/status
            port: 5601
          initialDelaySeconds: 5
          periodSeconds: 10
        livenessProbe:
          httpGet:
            path: /kibana/api/status
            port: 5601
          initialDelaySeconds: 15
          periodSeconds: 20
        ports:
          - containerPort: 5601
---
apiVersion: v1
kind: Service
metadata:
  name: kibana
  namespace: kube-logging
  labels:
    k8s-app: kibana
    version: v1
spec:
  type: NodePort
  selector:
    k8s-app: kibana
  ports:
    - port: 5601
      targetPort: 5601
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: kibana-ingress
  namespace: kube-logging
spec:
  rules:
  - host: logging.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: kibana
            port:
              number: 5601