day02 K8S

1.前情回顾

源码部署k8s

gitlib就是github的开源实现
harbor类似这样
// 计算节点要关闭swap分区
apiServer启动默认监听6443(tls通信)以及8080(http通信)
controllerManager和scheduler和apiServer部署在同一个物理机的目的,保证这2个服务和apiServer是通过本机的127.0.0.1:8080通信的,比tls的证书通信要效率高一些

1.1重要节点工作职责

etcd(监听2380端口对外,2379是集群内部之间通信)无所谓,和etcd是通过tcp socket
etcd要保证计数个,自己有自己的高可用机制
apiServer->接受api请求,该存etcd,该调度scheduler调度,该去找controller-manager就去找
kubelet->干活最累的,去起容器的
kube-proxy->实现了k8s里面pod网络和service网络的连接(1.12开始使用ipvs的流量调度算法,调度集群的流量,1.3版本用的userspace调度造成了极大的资源浪费,很多公有云厂商用的还是iptables规则)

2.cfssl工具

cfssl:证书签发的主要工具
cfssl-json:将cfssl生成的证书(json格式)变为文件承载式证书
cfssl-certinof:验证证书的信息,证书有效期。入职时一定要注意看

[root@jdss7-200 certs]# cfssl-certinfo -cert apiserver.pem 
{
  "subject": {
    "common_name": "k8s-apiserver",
    "country": "CN",
    "organization": "od",
    "organizational_unit": "ops",
    "locality": "beijing",
    "province": "beijing",
    "names": [
      "CN",
      "beijing",
      "beijing",
      "od",
      "ops",
      "k8s-apiserver"
    ]
  },
  "issuer": {
    "common_name": "OldboyEdu",
    "country": "CN",
    "organization": "od",
    "organizational_unit": "ops",
    "locality": "beijing",
    "province": "beijing",
    "names": [
      "CN",
      "beijing",
      "beijing",
      "od",
      "ops",
      "OldboyEdu"
    ]
  },
  "serial_number": "623283726608787615526450732742237323307482519058",
  "sans": [
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local",
    "127.0.0.1",
    "192.168.0.1",
    "10.4.7.10",
    "10.4.7.21",
    "10.4.7.22",
    "10.4.7.23"
  ],
  "not_before": "2021-12-20T07:01:00Z",
  "not_after": "2041-12-15T07:01:00Z",
  "sigalg": "SHA256WithRSA",
  "authority_key_id": "41:78:51:4F:F0:70:FC:4D:4E:55:4E:AD:97:FD:4C:49:11:14:FE:1C",
  "subject_key_id": "84:31:2D:E8:51:7A:EC:6:E5:90:4C:2E:CD:75:C6:64:F4:5:BA:C9",
  "pem": "-----BEGIN CERTIFICATE-----\nMIIEbzCCA1egAwIBAgIUbS0CSPhhJRCd4sycnqc4IZ7ClhIwDQYJKoZIhvcNAQEL\nBQAwYDELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB2JlaWppbmcxEDAOBgNVBAcTB2Jl\naWppbmcxCzAJBgNVBAoTAm9kMQwwCgYDVQQLEwNvcHMxEjAQBgNVBAMTCU9sZGJv\neUVkdTAeFw0yMTEyMjAwNzAxMDBaFw00MTEyMTUwNzAxMDBaMGQxCzAJBgNVBAYT\nAkNOMRAwDgYDVQQIEwdiZWlqaW5nMRAwDgYDVQQHEwdiZWlqaW5nMQswCQYDVQQK\nEwJvZDEMMAoGA1UECxMDb3BzMRYwFAYDVQQDEw1rOHMtYXBpc2VydmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuWxN2q0u068b1vgmGecRlPLfsTjo\nDssLvNRxF3ePufsxScPzxMrWi8AyaBD8RLM5SR2wh/4tosjeyLl09cgeKuKAksC5\n4yIqXJUmYqT6PfxCeGnjRhrvOL1pWSYUqaFEYto7nfFZrpXq32SPDvO8/3TnucGi\nNTC3rBTdpeZN51GaQegIYJBdIfkF0/eQB3XFF3KDtujvPd5UGx0rU0wE0/cPFm+S\nWIl7AlMPSU6OPXp+6n/CGOHaCrmWJz62lRTvDG+mHiTHO90XfTEcyl7T42fFZBeF\n3EdFsIlvACsFU8k7bjA8IoxiI/uZv5PhSgJC8WDFfFxasibUz3pLwVq2mQIDAQAB\no4IBGzCCARcwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwG\nA1UdEwEB/wQCMAAwHQYDVR0OBBYEFIQxLehReuwG5ZBMLs11xmT0BbrJMB8GA1Ud\nIwQYMBaAFEF4UU/wcPxNTlVOrZf9TEkRFP4cMIGhBgNVHREEgZkwgZaCEmt1YmVy\nbmV0ZXMuZGVmYXVsdIIWa3ViZXJuZXRlcy5kZWZhdWx0LnN2Y4Iea3ViZXJuZXRl\ncy5kZWZhdWx0LnN2Yy5jbHVzdGVygiRrdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNs\ndXN0ZXIubG9jYWyHBH8AAAGHBMCoAAGHBAoEBwqHBAoEBxWHBAoEBxaHBAoEBxcw\nDQYJKoZIhvcNAQELBQADggEBAJ9Q0tRsM1tF5uYl6GrPYvzu89mwECqEyQF2P05J\nHomzFt6iGPZhE10e7pkdfEBQrtgk0xptd3loyLzYOlSXUiRfA1e9BshkeDpB43uM\nx+3dE9YpmQrRE4HedM/IfSLg8Jgv3Wcat6D3luRqCOu0GB9i2cIwplxORj1SAacj\ncBwSzOHlUk6g+u6HkuiTo3wnSZDstE98qnNCrqvyohlcHYozth+yFvtydN/O4S1i\nxcpNga8CG4iFtl97rWnVHnssFkgNh0EYpb+8bJBjv3Tm5tmdXHLsAn8DU4rY+eQs\nM05d3rNklfLWhThQv8It3bNiIO22a/pSm1mcsVVCd4QGjlo=\n-----END CERTIFICATE-----\n"
}

cfssl-certinfo -domain www.baidu.com 查看百度的证书信息

{
  "subject": {
    "common_name": "baidu.com",
    "country": "CN",
    "organization": "Beijing Baidu Netcom Science Technology Co., Ltd",
    "organizational_unit": "service operation department",
    "locality": "beijing",
    "province": "beijing",
    "names": [
      "CN",
      "beijing",
      "beijing",
      "service operation department",
      "Beijing Baidu Netcom Science Technology Co., Ltd",
      "baidu.com"
    ]
  },
  "issuer": {
    "common_name": "GlobalSign Organization Validation CA - SHA256 - G2",
    "country": "BE",
    "organization": "GlobalSign nv-sa",
    "names": [
      "BE",
      "GlobalSign nv-sa",
      "GlobalSign Organization Validation CA - SHA256 - G2"
    ]
  },
  "serial_number": "35351242533515273557482149369",
  "sans": [
    "baidu.com",
    "baifubao.com",
    "www.baidu.cn",
    "www.baidu.com.cn",
    "mct.y.nuomi.com",
    "apollo.auto",
    "dwz.cn",
    "*.baidu.com",
    "*.baifubao.com",
    "*.baidustatic.com",
    "*.bdstatic.com",
    "*.bdimg.com",
    "*.hao123.com",
    "*.nuomi.com",
    "*.chuanke.com",
    "*.trustgo.com",
    "*.bce.baidu.com",
    "*.eyun.baidu.com",
    "*.map.baidu.com",
    "*.mbd.baidu.com",
    "*.fanyi.baidu.com",
    "*.baidubce.com",
    "*.mipcdn.com",
    "*.news.baidu.com",
    "*.baidupcs.com",
    "*.aipage.com",
    "*.aipage.cn",
    "*.bcehost.com",
    "*.safe.baidu.com",
    "*.im.baidu.com",
    "*.baiducontent.com",
    "*.dlnel.com",
    "*.dlnel.org",
    "*.dueros.baidu.com",
    "*.su.baidu.com",
    "*.91.com",
    "*.hao123.baidu.com",
    "*.apollo.auto",
    "*.xueshu.baidu.com",
    "*.bj.baidubce.com",
    "*.gz.baidubce.com",
    "*.smartapps.cn",
    "*.bdtjrcv.com",
    "*.hao222.com",
    "*.haokan.com",
    "*.pae.baidu.com",
    "*.vd.bdstatic.com",
    "*.cloud.baidu.com",
    "click.hm.baidu.com",
    "log.hm.baidu.com",
    "cm.pos.baidu.com",
    "wn.pos.baidu.com",
    "update.pan.baidu.com"
  ],
  "not_before": "2021-07-01T01:16:03Z",
  "not_after": "2022-08-02T01:16:03Z",
  "sigalg": "SHA256WithRSA",
  "authority_key_id": "96:DE:61:F1:BD:1C:16:29:53:1C:C0:CC:7D:3B:83:0:40:E6:1A:7C",
  "subject_key_id": "34:92:9A:2F:C:71:62:BC:3D:DB:23:6D:6D:3E:B3:D1:1D:11:9D:ED",
  "pem": "-----BEGIN CERTIFICATE-----\nMIIKQDCCCSigAwIBAgIMcjncyb61yc15VBX5MA0GCSqGSIb3DQEBCwUAMGYxCzAJ\nBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMTwwOgYDVQQDEzNH\nbG9iYWxTaWduIE9yZ2FuaXphdGlvbiBWYWxpZGF0aW9uIENBIC0gU0hBMjU2IC0g\nRzIwHhcNMjEwNzAxMDExNjAzWhcNMjIwODAyMDExNjAzWjCBpzELMAkGA1UEBhMC\nQ04xEDAOBgNVBAgTB2JlaWppbmcxEDAOBgNVBAcTB2JlaWppbmcxJTAjBgNVBAsT\nHHNlcnZpY2Ugb3BlcmF0aW9uIGRlcGFydG1lbnQxOTA3BgNVBAoTMEJlaWppbmcg\nQmFpZHUgTmV0Y29tIFNjaWVuY2UgVGVjaG5vbG9neSBDby4sIEx0ZDESMBAGA1UE\nAxMJYmFpZHUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm1HB\nm0ZQIHnU05khvgJXhkUKZn2K4iK1E4Kavx+DGar7z3MELQdMQ7ZbhVg37haeoI+n\nbwWDpMhbF3PNgNaTLjiHsGrdl0s3eLVh0zrTkjtH0Q0UBddlilbpPExNPFWq4Wed\n22Y5AfKpuo/LUjCzmKc+aEDv2WoTrPjXTENYqyFj8ugGgNL5lHurgVFWdcMssVoO\n66Mo/q7+1jLr00+OCUO/gdcYxULEtPaoH5w8d6+Fx2ebBcO/GS5sh/dJ4Xbdl5KV\nBmJ4kVW2WeI57eR2ps8WGoDQFxd1Q4b7pOf0MGgGzut6hQQsJC/FZq22H9rQ7gZH\nDljQqEm14sQvfaj1YQIDAQABo4IGqjCCBqYwDgYDVR0PAQH/BAQDAgWgMIGgBggr\nBgEFBQcBAQSBkzCBkDBNBggrBgEFBQcwAoZBaHR0cDovL3NlY3VyZS5nbG9iYWxz\naWduLmNvbS9jYWNlcnQvZ3Nvcmdhbml6YXRpb252YWxzaGEyZzJyMS5jcnQwPwYI\nKwYBBQUHMAGGM2h0dHA6Ly9vY3NwMi5nbG9iYWxzaWduLmNvbS9nc29yZ2FuaXph\ndGlvbnZhbHNoYTJnMjBWBgNVHSAETzBNMEEGCSsGAQQBoDIBFDA0MDIGCCsGAQUF\nBwIBFiZodHRwczovL3d3dy5nbG9iYWxzaWduLmNvbS9yZXBvc2l0b3J5LzAIBgZn\ngQwBAgIwCQYDVR0TBAIwADBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmds\nb2JhbHNpZ24uY29tL2dzL2dzb3JnYW5pemF0aW9udmFsc2hhMmcyLmNybDCCA2EG\nA1UdEQSCA1gwggNUggliYWlkdS5jb22CDGJhaWZ1YmFvLmNvbYIMd3d3LmJhaWR1\nLmNughB3d3cuYmFpZHUuY29tLmNugg9tY3QueS5udW9taS5jb22CC2Fwb2xsby5h\ndXRvggZkd3ouY26CCyouYmFpZHUuY29tgg4qLmJhaWZ1YmFvLmNvbYIRKi5iYWlk\ndXN0YXRpYy5jb22CDiouYmRzdGF0aWMuY29tggsqLmJkaW1nLmNvbYIMKi5oYW8x\nMjMuY29tggsqLm51b21pLmNvbYINKi5jaHVhbmtlLmNvbYINKi50cnVzdGdvLmNv\nbYIPKi5iY2UuYmFpZHUuY29tghAqLmV5dW4uYmFpZHUuY29tgg8qLm1hcC5iYWlk\ndS5jb22CDyoubWJkLmJhaWR1LmNvbYIRKi5mYW55aS5iYWlkdS5jb22CDiouYmFp\nZHViY2UuY29tggwqLm1pcGNkbi5jb22CECoubmV3cy5iYWlkdS5jb22CDiouYmFp\nZHVwY3MuY29tggwqLmFpcGFnZS5jb22CCyouYWlwYWdlLmNugg0qLmJjZWhvc3Qu\nY29tghAqLnNhZmUuYmFpZHUuY29tgg4qLmltLmJhaWR1LmNvbYISKi5iYWlkdWNv\nbnRlbnQuY29tggsqLmRsbmVsLmNvbYILKi5kbG5lbC5vcmeCEiouZHVlcm9zLmJh\naWR1LmNvbYIOKi5zdS5iYWlkdS5jb22CCCouOTEuY29tghIqLmhhbzEyMy5iYWlk\ndS5jb22CDSouYXBvbGxvLmF1dG+CEioueHVlc2h1LmJhaWR1LmNvbYIRKi5iai5i\nYWlkdWJjZS5jb22CESouZ3ouYmFpZHViY2UuY29tgg4qLnNtYXJ0YXBwcy5jboIN\nKi5iZHRqcmN2LmNvbYIMKi5oYW8yMjIuY29tggwqLmhhb2thbi5jb22CDyoucGFl\nLmJhaWR1LmNvbYIRKi52ZC5iZHN0YXRpYy5jb22CESouY2xvdWQuYmFpZHUuY29t\nghJjbGljay5obS5iYWlkdS5jb22CEGxvZy5obS5iYWlkdS5jb22CEGNtLnBvcy5i\nYWlkdS5jb22CEHduLnBvcy5iYWlkdS5jb22CFHVwZGF0ZS5wYW4uYmFpZHUuY29t\nMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAfBgNVHSMEGDAWgBSW3mHx\nvRwWKVMcwMx9O4MAQOYafDAdBgNVHQ4EFgQUNJKaLwxxYrw92yNtbT6z0R0Rne0w\nggF9BgorBgEEAdZ5AgQCBIIBbQSCAWkBZwB1ACJFRQdZVSRWlj+hL/H3bYbgIyZj\nrcBLf13Gg1xu4g8CAAABel+jJjkAAAQDAEYwRAIgIPt5kWXsm47PrqSljzkXx3xD\nt0xLC/fIIWbRTrvyJFUCIDxgcy89XYHRxW/WLY/pBDAv1fnK5MpocUYZi7c4uvDl\nAHYAKXm+8J45OSHwVnOfY6V35b5XfZxgCvj5TV0mXCVdx4QAAAF6X6MmKgAABAMA\nRzBFAiEAzl3C9AQOsbfgoBe61Dnc72Fa+8X3MmImCrsG6kb2f8oCIGeDQqgTEHzx\nbjQzGKr4nnjBDPkVpljrV4SUc3n5ysgvAHYAVYHUwhaQNgFK6gubVzxT8MDkOHhw\nJQgXL6OqHQcT0wwAAAF6X6MmSwAABAMARzBFAiB5KnN89d/LeQheoojaviS16dad\n95CR2Wr8pZWVamxDfgIhAL+3MqWq+E+8mtOIWDyebnH2nS+mm91pmO1mA5CSyiKR\nMA0GCSqGSIb3DQEBCwUAA4IBAQA5igBJkkgWWN4+nM6DORuxrJqV+Vb/fC2C06g2\nW+bPff1KmHJI8rf2UtQLCSyiXDNH4pqbPpe92LoACcmuHrO83uge4d7ZBfipsD3t\nuXqyqTyTTgeM8F7Mi/N1M25VguWZQp+cgVT7rc4oDDhCYJVo4U1fgy2kMnbYURwd\nZrecrR8Z+UDkfHRN2yq76vMkTek4dyFSPP0egR6QAISuyGb844F4kdBDeJkqpIUx\nPJ9r70ieHjlNUQe3U03/4hOr48ptfCH24voic/RlcXV32giO9y1b5gHJ95YMXy2o\n1z5MXsKSeOQbTpsoNp8Yd/K79WpkcXgP6tVofxFXtP8PsORz\n-----END CERTIFICATE-----\n"
}

从kube-config里可以反解证书

[root@jdss7-200 certs]# echo "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR0RENDQXB5Z0F3SUJBZ0lVRlpUZ2tmc3NDNWdHQ2w5am1vS0trL0xDNnFnd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1lERUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjJKbGFXcHBibWN4RURBT0JnTlZCQWNUQjJKbAphV3BwYm1jeEN6QUpCZ05WQkFvVEFtOWtNUXd3Q2dZRFZRUUxFd052Y0hNeEVqQVFCZ05WQkFNVENVOXNaR0p2CmVVVmtkVEFlRncweU1URXlNakF3TlRBeU1EQmFGdzAwTVRFeU1UVXdOVEF5TURCYU1HQXhDekFKQmdOVkJBWVQKQWtOT01SQXdEZ1lEVlFRSUV3ZGlaV2xxYVc1bk1SQXdEZ1lEVlFRSEV3ZGlaV2xxYVc1bk1Rc3dDUVlEVlFRSwpFd0p2WkRFTU1Bb0dBMVVFQ3hNRGIzQnpNUkl3RUFZRFZRUURFd2xQYkdSaWIzbEZaSFV3Z2dFaU1BMEdDU3FHClNJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUMvek45aUdqR2xwOXM1VnlBM0Rvb0RENUpvKzNwZWphVjAKckRrNjVnaldnVUdWd3ZxQTFPalNEZWdEbE5QSTV1N285OTc4NnRqMDl3UHdzUFZxK2t5ZkR3SFkrUXdoVVlMKwpITFBXWlhmcVZhRml5RDQzMW9vWkdKd052eUVHVUtuWDhaUzdjVkJIWHZEWXBhUjh4MXdiZnQ1ckovY29TblVaClF6b0wreTFRaEI2aUx3cmhjNklLQjVjYy9Bb1hWTThlQjJIbnUxMExuaC92SGtkV2xwTUNpWVFJd1hkM2l3cVQKa05GZ2FHSE81T2d4TnRMbHVXRlI4OEY4MGFSWjBCTld6UU56LzErc0hCVjNPdVFEWHR1cUl6NGxQTXZrTUJpMwpFbHBERUl3dG1vMDFrSERnOWFHL1BHd3JQckFzblNsWWcyaGZrSDhrKzU4a1JUbXVsRDhaQWdNQkFBR2paakJrCk1BNEdBMVVkRHdFQi93UUVBd0lCQmpBU0JnTlZIUk1CQWY4RUNEQUdBUUgvQWdFQ01CMEdBMVVkRGdRV0JCUkIKZUZGUDhIRDhUVTVWVHEyWC9VeEpFUlQrSERBZkJnTlZIU01FR0RBV2dCUkJlRkZQOEhEOFRVNVZUcTJYL1V4SgpFUlQrSERBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQVNWbCsrRTNyZFlFMmIxUmJVVHBnYkdtSERxRmVvL2VYCmNUVUhHVHJSM2RzNUpOdUUxMnBUS21Ed0pJTzBtMlJTYmVsSWw5akFMQnBxSndZU216d2RSTjVRN1ZqWkVKSTcKMFdTcTFsajgrR3dNRDZYdUpzOVY2N2JreDZvMXVHV1dFWFFXa3doTEp2NWV2bjQzUkhoNXh2TGtFdVVEd3RXMgpPck54aDkyQXBmK3ZGMzNFVUoweGl1RWdWOUFxVE5zU1UrUnU4eXU4UVgydHZmOGY1UTNOSUR6djdzZjBpSjRpCmhDYXJ3SkhWTTVIMW5oa2d6cjRzQVBwekNZNlU2bE81RmtubHpKbmVXbnZLVFg4K3dHMk1HZklxTG9WdnlRQjQKRnJxTTNHb0xwUmw0NnVKRkpJaVJlL1IyQnBWeHg1TFZCdklKdW50MjhXUDZISkI4NUw5SXVnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" | base64 -d

k8s里面证书要做的门清才行

2.1 kubeconfig文件

  • 其实是一个k8s用户的配置文件
  • 里面包含证书的信息
  • 证书过期或者更换,需要同步替换该文件
    (证书过期,重新签发证书,这个kubeconfig文件要重新去生成)
    kublet.kubeconfig以及kube-proxy.kubeconfig

3.kubectl命令行工具的使用

CNI网络插件flannel,解决pod之间不能跨主机通信
服务发现插件coredns
服务暴露插件trafilk
GUI管理工具dashboard(k8s 在1.16以上跑步起来)
rbac基于角色的访问控制
heapster,dashboard的监控小插件,让dashboard看着好看一些,基本不用了
kubernetes维护的生产经验

4.管理k8s核心资源(pod,pod控制器,service,ingress)的三种基本方法

  • 陈述式管理方法-主要依赖命令行cli工具进行管理
  • 声明式管理方法-主要依赖统一资源配置清单(mainfest)进行管理
  • gui管理方法-主要依赖图形化操作界面(web页面)进行管理

5. 陈述式资源管理

命令行工具用于与apiserver进行通信,用户在命令行输入的命令,组织并转化为apiserver能识别的信息,交互与本机127.0.0.1的apiserver,从而进行管理k8s各种资源的有效途径

5.1 命名空间

  • 查看名称空间
[root@jdss7-21 ~]# kubectl get namespace
NAME              STATUS   AGE
default           Active   14d
kube-node-lease   Active   14d
kube-public       Active   14d
kube-system       Active   14d
[root@jdss7-21 ~]# kubectl get ns
NAME              STATUS   AGE
default           Active   14d
kube-node-lease   Active   14d
kube-public       Active   14d
kube-system       Active   14d

不同名称空间的资源是有可能重名的

  • 查询default空间里的所有资源
[root@jdss7-21 ~]# kubectl get all -n default
NAME                 READY   STATUS    RESTARTS   AGE
pod/nginx-ds-8gbdr   1/1     Running   1          14d
pod/nginx-ds-twfkj   1/1     Running   2          14d


NAME                 TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   14d

NAME                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/nginx-ds   2         2         2       2            2           <none>          14d

daemonset是pod控制器

  • 创建名称空间
创建一个名叫app的namespace
[root@jdss7-21 ~]# kubectl create namespace app
namespace/app created
  • 删除名称空间
[root@jdss7-21 ~]# kubectl delete namespace app
namespace "app" deleted

5.2 pod控制器

  • 创建pod控制器,用来保证给你拉起一个pod,使的无限接近你,预先设置一个值,让资源无限向我靠近
创建一个名为nginx-dp的pod控制器
[root@jdss7-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.7.9 -n kube-public
deployment.apps/nginx-dp created

查看新创建的pod控制器

[root@jdss7-21 ~]# kubectl get deploy -n kube-public
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
nginx-dp   1/1     1            1           42s
[root@jdss7-21 ~]# kubectl get pods -n kube-public
NAME                        READY   STATUS    RESTARTS   AGE
nginx-dp-5dfc689474-jnznx   1/1     Running   0          52s

用扩展的方式显示我们的资源

[root@jdss7-21 ~]# kubectl get pods -n kube-public -o wide
NAME                        READY   STATUS    RESTARTS   AGE     IP           NODE                NOMINATED NODE   READINESS GATES
nginx-dp-5dfc689474-jnznx   1/1     Running   0          2m42s   172.7.22.3   jdss7-22.host.com   <none>           <none>

可以看到起来的ip地址是172.7.22.3,一下看出来是存在于7.22的计算节点上
7.22计算节点使用docker ps -a 查看5dfc689474的容器

pause镜像和你的业务镜像是边车模式运行的,帮你业务容器拉起来前,先帮你ipc空间,网络空间。uts等空间先占上
查看详细资源,查看详细pod控制器

/opt/kubernetes/server/bin/conf/kubelet.kubeconfig 文件里写死了server是

server: https://10.4.7.10:7443
  • 查看pod资源
[root@jdss7-21 ~]# kubectl get pods -n kube-public
NAME                        READY   STATUS    RESTARTS   AGE
nginx-dp-5dfc689474-jnznx   1/1     Running   0          37m

  • 进入pod资源
[root@jdss7-21 ~]# kubectl exec -ti nginx-dp-5dfc689474-jnznx /bin/bash -n kube-public

  • 删除pod资源(其实是重启pod容器,pod控制器来保证即使delete的pod后,pod控制器用来保证要和预期一样还会新拉取一个pod起来,拉起来的pod可能ip会变)
[root@jdss7-22 ~]# kubectl delete pods nginx-dp-5dfc689474-jnznx -n kube-public
pod "nginx-dp-5dfc689474-jnznx" deleted

强制删除pod参数(--force --grace-period=0)

  • 删除deployment资源
[root@jdss7-21 ~]# kubectl delete deployment nginx-dp -n kube-public
deployment.extensions "nginx-dp" deleted

查看

No resources found.
[root@jdss7-21 ~]# kubectl get all -n kube-public









No resources found.

5.3 service资源

  • 创建service资源
    service给pod提供一个稳定的接入点
[root@jdss7-21 ~]# kubectl expose deployment nginx-dp --port=80 -n kube-public
service/nginx-dp exposed

clusterIp就是你pod的固定接入点,你pod的ip怎么变,这个clusterIp不会变


搞成2份

[root@jdss7-21 ~]# kubectl scale deployment nginx-dp --replicas=2 -n kube-public
deployment.extensions/nginx-dp scaled

  • 查看service
[root@jdss7-21 ~]# kubectl describe svc nginx-dp -n kube-public
Name:              nginx-dp
Namespace:         kube-public
Labels:            app=nginx-dp
Annotations:       <none>
Selector:          app=nginx-dp
Type:              ClusterIP
IP:                192.168.5.14
Port:              <unset>  80/TCP
TargetPort:        80/TCP
Endpoints:         172.7.21.3:80
Session Affinity:  None
Events:            <none>

service如何找到pod的,依赖lable selector

6.k8s中文社区

docs.kubernetes.org.cn/683.html

7.命令行的缺点

命令冗长,复杂,难以记忆
特定场景下,无法实现管理需求
对资源增,删,查操作比较容易,改就很痛苦

8.特色的声明资源管理方法

依赖于资源配置清单(yaml/json)

  • 查看pod的资源配置清单
[root@jdss7-21 ~]# kubectl get pods nginx-dp-5dfc689474-t98fx -o yaml -n kube-public
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: "2022-01-04T07:49:48Z"
  generateName: nginx-dp-5dfc689474-
  labels:
    app: nginx-dp
    pod-template-hash: 5dfc689474
  name: nginx-dp-5dfc689474-t98fx
  namespace: kube-public
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: nginx-dp-5dfc689474
    uid: e6029552-05d5-40f3-8640-af3c915a0b6c
  resourceVersion: "73732"
  selfLink: /api/v1/namespaces/kube-public/pods/nginx-dp-5dfc689474-t98fx
  uid: 8fbb5af0-f41a-4229-ada5-c2e61747cbbd
spec:
  containers:
  - image: harbor.od.com/public/nginx:v1.7.9
    imagePullPolicy: IfNotPresent
    name: nginx
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: default-token-f7th5
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: jdss7-21.host.com
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: default
  serviceAccountName: default
  terminationGracePeriodSeconds: 30
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: default-token-f7th5
    secret:
      defaultMode: 420
      secretName: default-token-f7th5
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2022-01-04T07:49:48Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2022-01-04T07:49:49Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2022-01-04T07:49:49Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2022-01-04T07:49:48Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: docker://03a9fa9a975386441944201489c936fab0e6bd8562c201ef2c5cb82fa35a8e46
    image: harbor.od.com/public/nginx:v1.7.9
    imageID: docker-pullable://harbor.od.com/public/nginx@sha256:b1f5935eb2e9e2ae89c0b3e2e148c19068d91ca502e857052f14db230443e4c2
    lastState: {}
    name: nginx
    ready: true
    restartCount: 0
    state:
      running:
        startedAt: "2022-01-04T07:49:49Z"
  hostIP: 10.4.7.21
  phase: Running
  podIP: 172.7.21.3
  qosClass: BestEffort
  startTime: "2022-01-04T07:49:48Z"

  • 查看svc的资源配置清单
[root@jdss7-21 ~]# kubectl get svc nginx-dp -o yaml -n kube-public
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: "2022-01-04T07:57:19Z"
  labels:
    app: nginx-dp
  name: nginx-dp
  namespace: kube-public
  resourceVersion: "74379"
  selfLink: /api/v1/namespaces/kube-public/services/nginx-dp
  uid: 2cc6d303-ba90-4580-91b9-82af20840ee3
spec:
  clusterIP: 192.168.5.14
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx-dp
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}
  • 用explain 查看帮助文档
[root@jdss7-21 ~]# kubectl explain service.metadata
  • 创建资源配置清单nginx-ds-svc.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-ds
  name: nginx-ds
  namespace: default
spec:
  
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx-ds
  sessionAffinity: None
  type: ClusterIP

使用声明式方式使用资源配置清单

  • 干掉名为nginx-ds的svc
[root@jdss7-21 ~]# kubectl delete svc nginx-ds
service "nginx-ds" deleted

  • 修改资源配置清单并应用
    离线修改
    kubectl apply -f nginx-ds-svc.yaml
    在线修改
    kubectl edit svc nginx-ds

  • 删除资源配置清单

[root@jdss7-21 ~]# kubectl delete svc nginx-ds
service "nginx-ds" deleted
[root@jdss7-21 ~]# kubectl delete -f nginx-ds-svc.yaml 

9.声明式资源管理方法小结

依赖于资源配置清单yaml文件对资源进行管理
对资源的管理,通过事先定义在同一资源配置清单内,陈述式命令应用到K8S集群里

预发格式

kubectl create/apply/delete -f /path/to/yaml

资源配置清单的学习方法:
tip1:多看别人(官方)写的,能读懂
tip2:能照现成的文件改着用
tip3:遇到不懂的,用kubectl explain 查
tip4:初学切忌上来无中生有,自己憋着写

10.flanneId(cni里面的一个插件)

k8s里面的网络模型,集群之间,两个运算节点的docker还不能正常通信

最主要的功能就是:pod资源能够跨宿主机进行通信

不同环境部署多套k8s,要做物理的网络隔离,不要用网络插件做网络隔离的东西

10.1 部署flanned

下载二进制包,解压
必要的配置,签证书,启动脚本
创建目录,用户,委托给
380油费,13次洗车,验车,3月领年检标,验车。年检标。3135.76。(合计2755)

在7.21和7.22计算节点上都装flannel
https://github.com/flannel-io/flannel/releases
下载0.11.0的版本

cd /opt/src
- 二进制包放这里
mkdir -p /opt/flannel-v0.11.0
tar xf flannel-v0.11.0-linux-amd64.tar.gz  -C /opt/flannel-v0.11.0/
 ln -s /opt/flannel-v0.11.0/ /opt/flannel
mkdir -p /opt/flannel/cert
scp jdss7-200:/opt/certs/ca.pem .
scp jdss7-200:/opt/certs/client.pem .
scp jdss7-200:/opt/certs/client-key.pem 
// flannel默认使用etcd做一些存储及配置的
  • 创建配置文件

/opt/flannel/subnet.env

FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
  • 创建启动脚本
    /opt/flannel/flanneld.sh
#!/bin/sh
./flanneld \
  --public-ip=10.4.7.21 \
  --etcd-endpoints=https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
  --etcd-keyfile=./cert/client-key.pem \
  --etcd-certfile=./cert/client.pem \
  --etcd-cafile=./cert/ca.pem \
  --iface=enp0s3 \
  --subnet-file=./subnet.env \
  --healthz-port=2401

mkdir -p /data/logs/flanneld

  • 增加etcd的基础配置
    etcd上设置flanneld的网络配置(7.21上就有etcd)
./etcdctl set /coreos.com/network/config '{"Network":"172.7.0.0/16","Backend":{"Type":"host-gw"}}'

  • 添加supervisor配置
    /etc/supervisord.d/flanneld.ini
[program:flanneld-7-21]
command=/opt/flannel/flanneld.sh                        ;
numprocs=1                                              ;
directory=/opt/flannel                                  ;
autostart=true                                          ;
autorestart=true                                        ;
startsecs=30                                            ;
startretries=3                                          ;
exitcodes=0,2                                           ;
stopsignal=QUIT                                         ;
stopwaitsecs=10                                         ;
user=root                                               ;
redirect_stderr=true                                    ;
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log  ;
stdout_logfile_maxbytes=64MB                            ;
stdout_logfile_backups=4                                ;
stdout_capture_maxbytes=1MB                             ;
stdout_events_enabled=false                             ;
  • supervisorctl update
    21及22的机器上,都保证flanneld服务开启,各自宿主机上面的pod之间就可以互通了

10.2 flanneId 的host-gateway原理

局限:宿主机必须公用一个网关,在一个网段下
7.22机器上添加了一条路由表(flanneId的host的gateway模式,效率最高,内核转发)

route add -net 172.7.22.0/24 gw 10.4.7.22 dev eth0
route add -net 172.7.21.0/24 gw 10.4.7.21 dev eth0

10.3 flanneId 的vxlan原理(网络隧道)


两台不同的宿主机,每台宿主机虚拟化出来1个虚拟设备flannel.1
网络封包,解包,像VPN

vxlan如何使用
配置不一样

{"Network":"172.7.0.0/16","Backend":{"Type":"VxLAN"}}

查看目前使用的flanneId模式

etcdctl get /coreos.com/network/config
etcdctl rm /coreos.com/network/config
etcdctl set /coreos.com/network/config '{"Network":"172.7.0.0/16","Backend":{"Type":"VxLAN"}}'
重启flanneId,大概需要30秒时间


通过两个虚拟设备,打通虚拟网络设备

10.4 直接路由模型(vxlan和直接路由的混合模型)

当发现两台宿主机在同一个二层网络下的时候,就走host-gateway

{"Network":"172.7.0.0/16","Backend":{"Type":"VxLAN","Directrouting":true}}

10.5 flannel之snat规则优化

修改7.21机器上家目录的nginx-ds.yaml

apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: nginx-ds
spec:
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: harbor.od.com/public/nginx:curl
        ports:
        - containerPort: 8

kubectl apply -f nginx-ds.yaml
kubectl delete pod nginx-ds-twfkj // 重启pod

172.7.21.3 去访问http://172.7.22.2/ ,发现对端nginx记录的是宿主机的ip

怎么办SNAT转换


[root@jdss7-21 ~]# iptables-save | grep -i postrouting
:POSTROUTING ACCEPT [4:240]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE // 源地址是172.7.21.0地址段的出口是docker0网卡的,做转换
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE
-A KUBE-POSTROUTING -m comment --comment "Kubernetes endpoints dst ip:port, source ip for solving hairpin purpose" -m set --match-set KUBE-LOOP-BACK dst,dst,src -j MASQUERADE

修改规则
yum install iptables-services -y
systemctl enable iptables
systemctl start iptables

干掉规则-A POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
iptables -t nat -D POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
添加规则-A POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE//来源是172.7.21.0/24网段的docker的ip,目标ip不是172.7.0.0/16段,网络发包不从docker0设备出站的,才进行SNAT转换
iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
保存规则
iptables-save > /etc/sysconfig/iptables
删除无用的过滤规则
iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited

容器与容器交互,每次都iptables做snat转换,但容器之间是同一个2类网络
docker与docker之间要坦诚相见,22也做类似的动作,就都不需要做snat转换了

11.coreDns

服务发现核心依赖的组件

  • 服务发现,就是服务(应用)之间相互定位的过程
  • 服务发现并非云计算时代独有的,传统单体架构也会用到,一下应用场景,更需要服务发现
    • 服务的动态性强(今天服务还在10.4.7.21上,后台就跑了10.4.7.22上)
    • 服务更新发布频繁(小步快跑,先实现,再优化)
    • 服务支持自动伸缩(扩容)

在k8s集群里,pod的ip是不断变化的,如何“以不变应万变”?

  • 抽象出了service概念,通过标签选择器,关联一组pod
  • 抽象集群网络,通过相对固定集群ip,使服务接入点固定

如何自动关联service资源的名称和集群网络ip呢,从而达到服务被集群自动发现呢

  • 传统DNS模型:jdss7-21.host.com -> 10.4.7.21
  • 能否在k8s建立这样的模型:nginx-ds -> 192.168.0.5

11.1 安装部署coreDns(通过交付容器的方式交付服务)

11.1.1 部署k8s内网资源配置清单http服务

运维主机7-200上,配置一个虚拟主机,提供k8s统一的资源配置清单访问入口

/etc/nginx/conf.d/k8s-yaml.od.com.conf
server {
	listen	80;
	server_name	k8s-yaml.od.com;
	location / {
		autoindex on;
		default_type text/plain;
		root /data/k8s-yaml;
	}

}

nginx -t
nginx -s reload
然后到dns服务器7-11机器上修改dns配置用于解析k8s-yaml.od.com域名

vim /var/named/od.com.zone
添加内容k8s-yaml        A       10.4.7.200
并序列化后滚动一位
重启dns服务systemctl restart named
验证解析是否生效dig -t -A k8s-yaml.od.com @10.4.7.11 +short

11.1.2 部署coreDns

不下载二进制包了,以容器的方式交付到k8s容器里
选择1.6.1版本

jdss7-200机器上
cd /data/k8s-yaml/coredns

docker pull docker.io/coredns/coredns:1.6.1
docker images | grep coredns
然后打标签,推送到自己的harbor私服中


准备4个yaml文件在jdss7-200机器上
rbac.yaml //权限

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system

cm.yaml //ConfigMap

apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        log
        health
        ready
        kubernetes cluster.local 192.168.0.0/16
        forward . 10.4.7.11
        cache 30
        loop
        reload
        loadbalance
         }

dp.yaml // pod控制器

apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      containers:
      - name: coredns
        image: harbor.od.com/public/coredns:v1.6.1
        args:
        - -conf
        - /etc/coredns/Corefile
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile

svc.yaml // service

apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 192.168.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml
kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml
kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml
kubectl get all -n kube-system

11.2 验证coreDns

[root@jdss7-21 ~]# dig -t A jdss7-21.host.com @192.168.0.2 +short
10.4.7.21

给pod控制器nginx-dp创建一个service
kubectl expose deployment nginx-dp-5dfc689474-t98fx --port=80 -n kube-public
kubectl get service -n kube-public

使用fdn全域名,coreDns来解析,服务名-》具体集群ip的

// service名为nginx-dp
// kube-public是namespace
// svc是service的缩写
// cluster集群
// local 本地后缀
[root@jdss7-21 ~]# dig -t A nginx-dp.kube-public.svc.cluster.local @192.168.0.2 +short
192.168.5.14
[root@jdss7-21 ~]# dig -t A nginx-dp.kube-public.svc.cluster.local. @192.168.0.2 +short
192.168.5.14

只有集群内部的pod里才能使用短域名直接到对端nginx-dp.kube-public
是因为dns配置文件中使用了search 短域名匹配规则

12.服务暴露

集群内部通过服务名去找
集群外怎么找?nginx-dp.kube-public.svc.cluster.local 是不通的
(coreDns只在集群内部生效,外部不生效)

12.1 nodeport模式

12.1.1 修改kube-proxy使用iptables模式,而不是ipvs模式,nodeport的service演示

  • 7.21和7.22的计算节点上修改kube-proxy使用iptables模式
    vim /opt/kubernets/server/bin/kube-proxy.sh

    supervisorctl restart kube-proxy
  • 7.21和7.22的计算节点上删除ipvs的计算规则

12.1.2 nginx-ds的service使用nodeport方式暴露外面(pod里的80端口映射到宿主机的8000端口)

12.1.3 实际是写iptables规则

12.2 ingress模式

专门暴露7层应用,特指http和https协议(https还比较麻烦,建议暴露http就可以了)

www.od.com/abc -> tomcat
www.od.com/edf -> nodejs
www.od.com/qwe -> php

ingress规则可以定义一个www.od.com的域名支持,根据不同的url,分发到不同的service
实际的流量调度是从集群外面的用户请求->ingress->根据不同的域名及路径的匹配规则抛给不同的service
service再通过label selector去找到pod,真正提供服务的是pod
kube-proxy是service的一个实现

ingress其实就是简化版的nginx+一个go脚本
常用的ingress控制器的实现软件

  • Ingress-nginx
  • HAProxy
  • Tracfik

12.2.1 部署Tracfik

以资源配置清单,也就是pod启动的方式部署tracfik

  • 准备好镜像
7.200机器/data/k8s-yaml目录
# mkdir -p /data/k8s-yaml/traefik
# cd /data/k8s-yaml/traefik
// 建议使用1.7.2版本
# docker pull traefik:v1.7.2-alpine
# docker images | grep traefik
# docker tag add5fac61ae5 harbor.od.com/public/traefik:v1.7.2
# docker push harbor.od.com/public/traefik:v1.7.2
  • 准备好资源配置清单
    rbac.yaml(鉴权)
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups:
      - ""
    resources:
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: traefik-ingress-controller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
  name: traefik-ingress-controller
  namespace: kube-system

ds.yaml(daemonSet控制器,每个宿主机都要起一个pod)

apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: traefik-ingress
  namespace: kube-system
  labels:
    k8s-app: traefik-ingress
spec:
  template:
    metadata:
      labels:
        k8s-app: traefik-ingress
        name: traefik-ingress
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 60
      containers:
      - image: harbor.od.com/public/traefik:v1.7.2
        name: traefik-ingress
        ports:
        - name: controller
          containerPort: 80
          hostPort: 81 // 映射到宿主机的81端口
        - name: admin-web
          containerPort: 8080
        securityContext:
          capabilities:
            drop:
            - ALL
            add:
            - NET_BIND_SERVICE
        args:
        - --api
        - --kubernetes
        - --logLevel=INFO
        - --insecureskipverify=true
        - --kubernetes.endpoint=https://10.4.7.10:7443
        - --accesslog
        - --accesslog.filepath=/var/log/traefik_access.log/traefik_access
        - --traefiklog
        - --traefiklog.filepath=/var/log/traefik.log
        - --metrics.prometheus

service.yaml(服务)

apiVersion: v1
kind: Service
metadata:
  name: traefik-ingress-service
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik-ingress
  ports:
    - protocol: TCP
      port: 80
      name: controller
    - protocol: TCP
      port: 8080
      name: admin-web 

ingress.yaml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-web-ui
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: traefik.od.com
    http:
      paths:
      - path: /
        backend:
          serviceName: traefik-ingress-service
          servicePort: 8080   

kubectl get pods -n kube-system
kubectl describe pod traefik-ingress-qjp62
(重启kubectl)

  • 配置反代(10.4.7.11 ,10.4.7.12机器上od.com.conf)
upstream default_backend_traefik {
    server 10.4.7.21:81 max_fails=3 fail_timeout=10s;
    server 10.4.7.22:81 max_fails=3 fail_timeout=10s;
}
server {
    server_name *.od.com;
    location / {
      proxy_pass http://default_backend_traefik;
      proxy_set_header Host  $http_host;
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    }

}
  • dns添加A记录
    traefik.od.com

  • 展示

原创:做时间的朋友
原文地址:https://www.cnblogs.com/PythonOrg/p/15714658.html