ELK 收集 K8S (containerd 容器运行时) 一

日志收集过程:filebeat(收集) -> kafka(缓存) -> logstash(处理) -> elasticsearch(存储) -> kibana(展示)

创建目录和对应的命名空间

mkdir -p /data/yaml/k8s-logging
cd /data/yaml/k8s-logging

cat ns.yaml 
kind: Namespace
apiVersion: v1
metadata:
  name: k8s-logging

kubectl apply -f ns.yaml 

后端的存储为 ceph, 创建对应的 StorageClass

cat es-rbd-sc.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
   name: es-rbd-sc
   namespace: k8s-logging
provisioner: rbd.csi.ceph.com
parameters:
   clusterID: 24c6d785-eec0-44f7-8ae1-738d05a1c89d
   pool: kubernetes
   csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
   csi.storage.k8s.io/provisioner-secret-namespace: default
   csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
   csi.storage.k8s.io/node-stage-secret-namespace: default
   imageFormat: "2"
   imageFeatures: "layering"
reclaimPolicy: Delete
mountOptions:
   - discard

kubectl apply -f es-rbd-sc.yaml

部署 zookeeper 集群

mkdir -p /data/yaml/k8s-logging/zookeeper
cd /data/yaml/k8s-logging/zookeeper

cat sts.yaml 
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: zk-pdb
  namespace: k8s-logging
spec:
  selector:
    matchLabels:
      app: zk
  maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zk
  namespace: k8s-logging
spec:
  selector:
    matchLabels:
      app: zk
  serviceName: zk-hs
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  template:
    metadata:
      labels:
        app: zk
    spec:
      containers:
      - name: kubernetes-zookeeper
        imagePullPolicy: IfNotPresent
        image: fastop/zookeeper:3.4.10
        resources:
          requests:
            memory: "1G"
            cpu: "0.5"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        command:
        - sh
        - -c
        - "start-zookeeper \
          --servers=3 \
          --data_dir=/var/lib/zookeeper/data \
          --data_log_dir=/var/lib/zookeeper/data/log \
          --conf_dir=/opt/zookeeper/conf \
          --client_port=2181 \
          --election_port=3888 \
          --server_port=2888 \
          --tick_time=2000 \
          --init_limit=10 \
          --sync_limit=5 \
          --heap=512M \
          --max_client_cnxns=60 \
          --snap_retain_count=3 \
          --purge_interval=12 \
          --max_session_timeout=40000 \
          --min_session_timeout=4000 \
          --log_level=INFO"
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/zookeeper
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
      labels:
        app: zk
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: es-rbd-sc
      resources:
        requests:
          storage: 3Gi


kubectl apply -f sts.yaml

cat svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: zk-hs
  namespace: k8s-logging
  labels:
    app: zk
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zk
---
apiVersion: v1
kind: Service
metadata:
  name: zk-cs
  namespace: k8s-logging
  labels:
    app: zk
spec:
  ports:
  - port: 2181
    name: client
  selector:
    app: zk

kubectl apply -f svc.yaml 

测试 zookeeper

kubectl exec zk-0 -n k8s-logging zkCli.sh create /hello world

kubectl exec zk-1 -n k8s-logging  zkCli.sh get /hello

部署 kafka 集群

mkdir -p /data/yaml/k8s-logging/kafka
cd /data/yaml/k8s-logging/kafka

cat sts.yaml 
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: kafka-pdb
  namespace: k8s-logging
spec:
  selector:
    matchLabels:
      app: kafka
  minAvailable: 2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka
  namespace: k8s-logging
spec:
  selector:
     matchLabels:
        app: kafka
  serviceName: kafka-svc
  replicas: 3
  template:
    metadata:
      labels:
        app: kafka
    spec:
      terminationGracePeriodSeconds: 300
      containers:
      - name: k8s-kafka
        imagePullPolicy: IfNotPresent
        image: fastop/kafka:2.2.0
        resources:
          requests:
            memory: "1G"
            cpu: 500m
        ports:
        - containerPort: 9092
          name: server
        command:
        - sh
        - -c
        - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
          --override listeners=PLAINTEXT://:9092 \
          --override zookeeper.connect=zk-0.zk-hs.k8s-logging.svc.cluster.local:2181,zk-1.zk-hs.k8s-logging.svc.cluster.local:2181,zk-2.zk-hs.k8s-logging.svc.cluster.local:2181 \
          --override log.dir=/var/lib/kafka \
          --override auto.create.topics.enable=true \
          --override auto.leader.rebalance.enable=true \
          --override background.threads=10 \
          --override compression.type=producer \
          --override delete.topic.enable=false \
          --override leader.imbalance.check.interval.seconds=300 \
          --override leader.imbalance.per.broker.percentage=10 \
          --override log.flush.interval.messages=9223372036854775807 \
          --override log.flush.offset.checkpoint.interval.ms=60000 \
          --override log.flush.scheduler.interval.ms=9223372036854775807 \
          --override log.retention.bytes=-1 \
          --override log.retention.hours=168 \
          --override log.roll.hours=168 \
          --override log.roll.jitter.hours=0 \
          --override log.segment.bytes=1073741824 \
          --override log.segment.delete.delay.ms=60000 \
          --override message.max.bytes=1000012 \
          --override min.insync.replicas=1 \
          --override num.io.threads=8 \
          --override num.network.threads=3 \
          --override num.recovery.threads.per.data.dir=1 \
          --override num.replica.fetchers=1 \
          --override offset.metadata.max.bytes=4096 \
          --override offsets.commit.required.acks=-1 \
          --override offsets.commit.timeout.ms=5000 \
          --override offsets.load.buffer.size=5242880 \
          --override offsets.retention.check.interval.ms=600000 \
          --override offsets.retention.minutes=1440 \
          --override offsets.topic.compression.codec=0 \
          --override offsets.topic.num.partitions=50 \
          --override offsets.topic.replication.factor=3 \
          --override offsets.topic.segment.bytes=104857600 \
          --override queued.max.requests=500 \
          --override quota.consumer.default=9223372036854775807 \
          --override quota.producer.default=9223372036854775807 \
          --override replica.fetch.min.bytes=1 \
          --override replica.fetch.wait.max.ms=500 \
          --override replica.high.watermark.checkpoint.interval.ms=5000 \
          --override replica.lag.time.max.ms=10000 \
          --override replica.socket.receive.buffer.bytes=65536 \
          --override replica.socket.timeout.ms=30000 \
          --override request.timeout.ms=30000 \
          --override socket.receive.buffer.bytes=102400 \
          --override socket.request.max.bytes=104857600 \
          --override socket.send.buffer.bytes=102400 \
          --override unclean.leader.election.enable=true \
          --override zookeeper.session.timeout.ms=6000 \
          --override zookeeper.set.acl=false \
          --override broker.id.generation.enable=true \
          --override connections.max.idle.ms=600000 \
          --override controlled.shutdown.enable=true \
          --override controlled.shutdown.max.retries=3 \
          --override controlled.shutdown.retry.backoff.ms=5000 \
          --override controller.socket.timeout.ms=30000 \
          --override default.replication.factor=1 \
          --override fetch.purgatory.purge.interval.requests=1000 \
          --override group.max.session.timeout.ms=300000 \
          --override group.min.session.timeout.ms=6000 \
          --override inter.broker.protocol.version=2.2.0 \
          --override log.cleaner.backoff.ms=15000 \
          --override log.cleaner.dedupe.buffer.size=134217728 \
          --override log.cleaner.delete.retention.ms=86400000 \
          --override log.cleaner.enable=true \
          --override log.cleaner.io.buffer.load.factor=0.9 \
          --override log.cleaner.io.buffer.size=524288 \
          --override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
          --override log.cleaner.min.cleanable.ratio=0.5 \
          --override log.cleaner.min.compaction.lag.ms=0 \
          --override log.cleaner.threads=1 \
          --override log.cleanup.policy=delete \
          --override log.index.interval.bytes=4096 \
          --override log.index.size.max.bytes=10485760 \
          --override log.message.timestamp.difference.max.ms=9223372036854775807 \
          --override log.message.timestamp.type=CreateTime \
          --override log.preallocate=false \
          --override log.retention.check.interval.ms=300000 \
          --override max.connections.per.ip=2147483647 \
          --override num.partitions=4 \
          --override producer.purgatory.purge.interval.requests=1000 \
          --override replica.fetch.backoff.ms=1000 \
          --override replica.fetch.max.bytes=1048576 \
          --override replica.fetch.response.max.bytes=10485760 \
          --override reserved.broker.max.id=1000 "
        env:
        - name: KAFKA_HEAP_OPTS
          value : "-Xmx512M -Xms512M"
        - name: KAFKA_OPTS
          value: "-Dlogging.level=INFO"
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/kafka
        readinessProbe:
          tcpSocket:
            port: 9092
          timeoutSeconds: 1
          initialDelaySeconds: 5
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: es-rbd-sc 
      resources:
        requests:
          storage:  1G

kubectl apply -f sts.yaml

cat svc.yaml 
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-svc
  namespace: k8s-logging
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    name: server
  clusterIP: None
  selector:
    app: kafka

kubectl apply -f svc.yaml

测试 kafka

kubectl exec -it kafka-0 -n k8s-logging   -- bash 

# 创建test topic
kafka-topics.sh --create \
--topic test \
--zookeeper zk-0.zk-hs.k8s-logging.svc.cluster.local:2181,zk-1.zk-hs.k8s-logging.svc.cluster.local:2181,zk-2.zk-hs.k8s-logging.svc.cluster.local:2181 \
--partitions 3 \
--replication-factor 2

# 检查 topic
kafka-topics.sh --list --zookeeper zk-0.zk-hs.k8s-logging.svc.cluster.local:2181,zk-1.zk-hs.k8s-logging.svc.cluster.local:2181,zk-2.zk-hs.k8s-logging.svc.cluster.local:2181
原文地址:https://www.cnblogs.com/klvchen/p/15715325.html