CaaSP4

worker01:~ # tree /var/lib/containers/storage/ -L 1
/var/lib/containers/storage/
├── mounts
├── overlay
├── overlay-containers
├── overlay-images
├── overlay-layers
├── storage.lock
└── tmp
# kubectl describe nodes master01
Name:               master01
Roles:              master
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/os=linux
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=master01
                    kubernetes.io/os=linux
                    node-role.kubernetes.io/master=
Annotations:        io.cilium.network.ipv4-cilium-host: 10.244.0.1
                    io.cilium.network.ipv4-health-ip: 10.244.0.144
                    io.cilium.network.ipv4-pod-cidr: 10.244.0.0/24
                    io.cilium.network.ipv6-cilium-host: f00d::af4:0:0:1
                    io.cilium.network.ipv6-health-ip: f00d::af4:0:0:8ce1
                    io.cilium.network.ipv6-pod-cidr: f00d::af4:0:0:0/96
                    kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Sun, 06 Oct 2019 16:03:08 +0800
Taints:             node-role.kubernetes.io/master:NoSchedule
Unschedulable:      false
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Wed, 09 Oct 2019 09:48:15 +0800   Wed, 09 Oct 2019 09:48:15 +0800   CiliumIsUp                   Cilium is running on this node
  MemoryPressure       False   Wed, 09 Oct 2019 10:41:58 +0800   Sun, 06 Oct 2019 16:03:00 +0800   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Wed, 09 Oct 2019 10:41:58 +0800   Sun, 06 Oct 2019 16:03:00 +0800   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Wed, 09 Oct 2019 10:41:58 +0800   Sun, 06 Oct 2019 16:03:00 +0800   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Wed, 09 Oct 2019 10:41:58 +0800   Sun, 06 Oct 2019 16:08:28 +0800   KubeletReady                 kubelet is posting ready status. AppArmor enabled
Addresses:
  InternalIP:  172.200.50.70
  Hostname:    master01
Capacity:
 cpu:                4
 ephemeral-storage:  17394Mi
 hugepages-1Gi:      0
 hugepages-2Mi:      0
 memory:             3746524Ki
 pods:               110
Allocatable:
 cpu:                4
 ephemeral-storage:  16415037823
 hugepages-1Gi:      0
 hugepages-2Mi:      0
 memory:             3644124Ki
 pods:               110
System Info:
 Machine ID:                 21c43aad5388499bba20966cf4aad01a
 System UUID:                e5744d56-fe06-ebb8-779a-ed0dfc4f6e07
 Boot ID:                    bff71ca7-f8d5-40ec-823a-017cda604703
 Kernel Version:             4.12.14-197.18-default
 OS Image:                   SUSE Linux Enterprise Server 15 SP1
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  cri-o://1.15.0
 Kubelet Version:            v1.15.2
 Kube-Proxy Version:         v1.15.2
PodCIDR:                     10.244.0.0/24
Non-terminated Pods:         (9 in total)
  Namespace                  Name                                CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                                ------------  ----------  ---------------  -------------  ---
  kube-system                cilium-d6krt                        0 (0%)        0 (0%)      0 (0%)           0 (0%)         2d18h
  kube-system                coredns-69c4947958-jq5fj            100m (2%)     0 (0%)      70Mi (1%)        170Mi (4%)     2d18h
  kube-system                coredns-69c4947958-m29fz            100m (2%)     0 (0%)      70Mi (1%)        170Mi (4%)     2d18h
  kube-system                etcd-master01                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         2d18h
  kube-system                kube-apiserver-master01             250m (6%)     0 (0%)      0 (0%)           0 (0%)         2d18h
  kube-system                kube-controller-manager-master01    200m (5%)     0 (0%)      0 (0%)           0 (0%)         2d18h
  kube-system                kube-proxy-2f57p                    0 (0%)        0 (0%)      0 (0%)           0 (0%)         2d18h
  kube-system                kube-scheduler-master01             100m (2%)     0 (0%)      0 (0%)           0 (0%)         2d18h
  kube-system                kured-5vhdw                         0 (0%)        0 (0%)      0 (0%)           0 (0%)         2d18h
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource           Requests    Limits
  --------           --------    ------
  cpu                750m (18%)  0 (0%)
  memory             140Mi (3%)  340Mi (9%)
  ephemeral-storage  0 (0%)      0 (0%)
Events:
  Type     Reason                   Age                From                  Message
  ----     ------                   ----               ----                  -------
  Normal   Starting                 54m                kubelet, master01     Starting kubelet.
  Normal   NodeHasSufficientMemory  54m (x8 over 54m)  kubelet, master01     Node master01 status is now: NodeHasSufficientMemory
  Normal   NodeHasNoDiskPressure    54m (x8 over 54m)  kubelet, master01     Node master01 status is now: NodeHasNoDiskPressure
  Normal   NodeHasSufficientPID     54m (x7 over 54m)  kubelet, master01     Node master01 status is now: NodeHasSufficientPID
  Normal   NodeAllocatableEnforced  54m                kubelet, master01     Updated Node Allocatable limit across pods
  Warning  readOnlySysFS            54m                kube-proxy, master01  CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)
  Normal   Starting                 54m                kube-proxy, master01  Starting kube-proxy.
# cat /etc/crio/crio.conf | grep -A 10 crio.network
# The crio.network table containers settings pertaining to the management of
# CNI plugins.
[crio.network]

# Path to the directory where CNI configuration files are located.
network_dir = "/etc/cni/net.d/"
原文地址:https://www.cnblogs.com/alfiesuse/p/11640348.html