cp ~/TLS/etcd/ca*pem ~/TLS/etcd/server*pem /opt/etcd/ssl/
scp -r /opt/etcd/ root@192.168.217.101:/opt/scp /usr/lib/systemd/system/etcd.service root@192.168.217.101:/usr/lib/systemd/system/scp -r /opt/etcd/ root@192.168.217.102:/opt/scp /usr/lib/systemd/system/etcd.service root@192.168.217.102:/usr/lib/systemd/system/
- 修改node节点中etcd.conf配置文件中节点名称和当前服务器的IP:
vim /opt/etcd/cfg/etcd.conf#[Member] ETCD_NAME="etcd-2" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://192.168.217.101:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.217.101:2379" #[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.217.101:2380" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.217.101:2379" ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.217.100:2380,etcd-2=https://192.168.217.101:2380,etcd-3=https://192.168.217.102:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"ETCD_INITIAL_CLUSTER_STATE="new" #[Member] ETCD_NAME="etcd-3" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://192.168.217.102:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.217.102:2379" #[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.217.102:2380" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.217.102:2379" ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.217.100:2380,etcd-2=https://192.168.217.101:2380,etcd-3=https://192.168.217.102:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new"
systemctl daemon-reloadsystemctl start etcdsystemctl enable etcd
systemctl status etcd.service5.4 为ApI Server自签证书
- 1️⃣自签证书颁发机构(CA):
cd ~/TLS/k8s/
cat > ca-config.json < ca-csr.json <
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
ll *pem
- 查看生成的证书:
- 生成证书:
- 自签CA:
- 进入工作目录:
- 2️⃣使用自签CA签发etcd的https证书:
cat > kube-proxy-csr.json << EOF{ "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "k8s", "OU": "System" } ]}EOFcat > server-csr.json<< EOF{ "CN": "kubernetes", "hosts": [ "10.0.0.1", "127.0.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local", "192.168.217.100", "192.168.217.101", "192.168.217.102", "192.168.217.1", "192.168.217.2", "192.168.31.198" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "k8s", "OU": "System" } ]}EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare servercfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
5.5 部署Master组件
5.5.1 查看GitHub上的地址
5.5.2 下载并解压二进制包
mkdir -pv /opt/kubernetes/{bin,cfg,ssl,logs}wget "https://dl.k8s.io/v1.18.10/kubernetes-server-linux-amd64.tar.gz"tar -zxvf kubernetes-server-linux-amd64.tar.gzcd kubernetes/server/bincp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bincp kubectl /usr/bin/5.5.3 部署kube-apiserver
cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOFKUBE_APISERVER_OPTS="--logtostderr=false //--v=2 //--log-dir=/opt/kubernetes/logs //--etcd-servers=https://192.168.217.100:2379,https://192.168.217.101:2379,https://192.168.217.102:2379 //--bind-address=192.168.217.100 //--secure-port=6443 //--advertise-address=192.168.217.100 //--allow-privileged=true //--service-cluster-ip-range=10.0.0.0/24 //--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction //--authorization-mode=RBAC,Node //--enable-bootstrap-token-auth=true //--token-auth-file=/opt/kubernetes/cfg/token.csv //--service-node-port-range=30000-32767 //--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem //--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem //--tls-cert-file=/opt/kubernetes/ssl/server.pem //--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem //--client-ca-file=/opt/kubernetes/ssl/ca.pem //--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem //--etcd-cafile=/opt/etcd/ssl/ca.pem //--etcd-certfile=/opt/etcd/ssl/server.pem //--etcd-keyfile=/opt/etcd/ssl/server-key.pem //--audit-log-maxage=30 //--audit-log-maxbackup=3 //--audit-log-maxsize=100 //--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"EOF
- --logtostderr:启用日志。
- --v:日志等级。
- --log-dir:日志目录。
- --etcd-servers:etcd集群地址。
- --bind-address:监听地址。
- --secure-port:https安全端口。
- --advertise-address:集群通告地址。
- --allow-privileged:启用授权。
- --service-cluster-ip-range:Service虚拟IP地址段。
- --enable-admission-plugins:准入控制模块。
- --authorization-mode:认证授权,启用RBAC授权和节点自管理。
- --enable-bootstrap-token-auth:启用TLS bootstrap机制。
- --token-auth-file:bootstrap token文件。
- --service-node-port-range:Sevice nodeport类型默认分配端口范围。
- --kubelet-client-xxx:apiserver访问kubelet客户端整数。
- --tls-xxx-file:apiserver https证书。
- --etcd-xxxfile:连接etcd集群证书。
- --audit-log-xxx:审计日志。
cp ~/TLS/k8s/ca*pem ~/TLS/k8s/server*pem ~/TLS/k8s/kube-proxy*pem /opt/kubernetes/ssl/5.5.4 启用TLS Bootstrapping机制
- Master上的apiserver启用TLS认证后,Node节点kubelet和kube-proxy要和kube-apiserver进行通信,必须使用CA签发的有效整数才可以,当Node节点很多的时候,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化操作流程,k8s引入了TLS Bootstrapping机制来自动颁发客户端证书,kubelet会以一个低权限用户向apiserver申请证书,kubelet的证书由apiserver动态签署。
- 制作token令牌:
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
cat > /opt/kubernetes/cfg/token.csv << EOF6cd622a46af13091337d98f0ac9da4d0,kubelet-bootstrap,10001,"system:nodebootstrapper"EOF5.5.5 systemd管理apiserver
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.confExecStart=/opt/kubernetes/bin/kube-apiserver /$KUBE_APISERVER_OPTSRestart=on-failure[Install]WantedBy=multi-user.targetEOF5.5.6 启动apiserver并设置开机启动
systemctl daemon-reloadsystemctl start kube-apiserversystemctl enable kube-apiserver5.5.7 部署kube-controller-manager
cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOFKUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false //--v=2 //--log-dir=/opt/kubernetes/logs //--leader-elect=true //--master=127.0.0.1:8080 //--bind-address=127.0.0.1 //--allocate-node-cidrs=true //--cluster-cidr=10.244.0.0/16 //--service-cluster-ip-range=10.0.0.0/24 //--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem //--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem //--root-ca-file=/opt/kubernetes/ssl/ca.pem //--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem //--experimental-cluster-signing-duration=87600h0m0s"EOF
- --master:通过本地非安全本地端口8080连接apiserver。
- --leader-elect:当该组件启动多个的时候,自动选举。
- --cluster-signing-cert-file和--cluster-signing-key-file:自动为kubelet颁发证书的CA,和apiserver保持一致。
5.5.8 systemd管理controller-manager
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF[Unit]Description=Kubernetes Controller ManagerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.confExecStart=/opt/kubernetes/bin/kube-controller-manager /$KUBE_CONTROLLER_MANAGER_OPTSRestart=on-failure[Install]WantedBy=multi-user.targetEOF5.5.9 启动controller-manager并设置开机启动
systemctl daemon-reloadsystemctl start kube-controller-managersystemctl enable kube-controller-manager5.5.10 部署kube-scheduler
cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOFKUBE_SCHEDULER_OPTS="--logtostderr=false //--v=2 //--log-dir=/opt/kubernetes/logs //--leader-elect=true ///--master=127.0.0.1:8080 //--bind-address=127.0.0.1"EOF
- --master:通过本地非安全本地端口 8080 连接 apiserver。
- --leader-elect:当该组件启动多个时, 自动选举( HA) 。
5.5.11 systemd管理scheduler
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF[Unit]Description=Kubernetes SchedulerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.confExecStart=/opt/kubernetes/bin/kube-scheduler /$KUBE_SCHEDULER_OPTSRestart=on-failure [Install]WantedBy=multi-user.targetEOF5.5.12 启动scheduler并设置开机启动
systemctl daemon-reloadsystemctl start kube-schedulersystemctl enable kube-scheduler5.5.13 查看集群状态
- 所有组件都已经启动成功,通过kubectl工具查看当前集群的组件状态:
kubectl get cs
5.6 Docker安装
- 和4.4.2安装Docker步骤一样(每个节点都需要安装Docker)。
5.7 部署Node组件
5.7.1 在所有Node节点创建工作目录
mkdir -pv /opt/kubernetes/{bin,cfg,ssl,logs}5.7.2 从Master节点拷贝二进制文件
cd ~/TLS/k8s/kubernetes/server/binscp kubelet kube-proxy root@192.168.217.101:/opt/kubernetes/binscp kubelet kube-proxy root@192.168.217.102:/opt/kubernetes/binscp -r /opt/kubernetes/ssl/ root@192.168.217.101:/opt/kubernetes/sslscp -r /opt/kubernetes/ssl/ root@192.168.217.102:/opt/kubernetes/ssl5.7.3 在Master节点生成bootstrap.kubeconfig和kube-proxy.kubeconfig文件
cat > ~/configure.sh << EOF#! /bin/bash# create TLS Bootstrapping Token#----------------#创建 kubelet bootstrapping 配置文件export PATH=$PATH:/opt/kubernetes/binexport KUBE_APISERVER="https://192.168.217.100:6443"export BOOTSTRAP_TOKEN="6cd622a46af13091337d98f0ac9da4d0"#创建绑定角色kubectl create clusterrolebinding kubelet-bootstrap / --clusterrole=system:node-bootstrapper / --user=kubelet-bootstrap# 设置 cluster 参数kubectl config set-cluster kubernetes / --certificate-authority=/opt/kubernetes/ssl/ca.pem / --embed-certs=true / --server=/${KUBE_APISERVER} / --kubeconfig=bootstrap.kubeconfig# 设置客户端认证参数kubectl config set-credentials kubelet-bootstrap / --token=/${BOOTSTRAP_TOKEN} / --kubeconfig=bootstrap.kubeconfig#设置上下文kubectl config set-context default / --cluster=kubernetes / --user=kubelet-bootstrap / --kubeconfig=bootstrap.kubeconfigkubectl config use-context default --kubeconfig=bootstrap.kubeconfig#-------------#创建 kube-proxy 配置文件kubectl config set-cluster kubernetes / --certificate-authority=/opt/kubernetes/ssl/ca.pem / --embed-certs=true / --server=/${KUBE_APISERVER} / --kubeconfig=kube-proxy.kubeconfigkubectl config set-credentials kube-proxy / --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem / --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem / --embed-certs=true / --kubeconfig=kube-proxy.kubeconfigkubectl config set-context default / --cluster=kubernetes / --user=kube-proxy / --kubeconfig=kube-proxy.kubeconfigkubectl config use-context default --kubeconfig=kube-proxy.kubeconfigEOF
- 执行脚本,并将bootstrap.kubeconfig和kube-proxy.kubeconfig文件复制到所有的Node节点:
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.217.101:/opt/kubernetes/cfgscp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.217.102:/opt/kubernetes/cfg5.7.4 所有Node节点部署kubelet
- 对192.168.217.101节点来说,需要创建如下文件:
cat > /opt/kubernetes/cfg/kubelet.conf << EOFKUBELET_OPTS="--logtostderr=false //--v=2 //--log-dir=/opt/kubernetes/logs //--hostname-override=k8s-node1 //--network-plugin=cni //--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig //--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig //--cert-dir=/opt/kubernetes/ssl //--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"EOF
- 对192.168.217.102节点来说,需要创建如下文件:
cat > /opt/kubernetes/cfg/kubelet.conf << EOFKUBELET_OPTS="--logtostderr=false //--v=2 //--log-dir=/opt/kubernetes/logs //--hostname-override=k8s-node2 //--network-plugin=cni //--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig //--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig //--cert-dir=/opt/kubernetes/ssl //--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"EOF
- --hostname-override:显示名称,集群中唯一。
- --network-plugin:启用CNI网络插件。
- --kubeconfig:用于连接apiserver。
- --cert-dir:kubelet证书生成目录。
- --pod-infra-container-image:管理Pod网络容器的镜像。
|
|
2020-11-4 14:42:19
评论
举报
|
|
|