兜兜    2021-09-22 10:20:24    2022-01-25 09:20:25   

kubernetes k8s nginx ingress
#### 环境介绍 ```sh k8s版本:1.18.20 ingress-nginx: 3.4.0 ``` #### 安装ingress-nginx ##### 下载helm安装包 ```sh $ wget https://github.com/kubernetes/ingress-nginx/releases/download/ingress-nginx-3.4.0/ingress-nginx-3.4.0.tgz $ tar xvf ingress-nginx-3.4.0.tgz $ cd ingress-nginx ``` #### 配置参数 ```sh $ cat > values.yaml <<EOF controller: image: repository: registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller #更好阿里云镜像 tag: "v0.40.1" digest: sha256:abffcf2d25e3e7c7b67a315a7c664ec79a1588c9c945d3c7a75637c2f55caec6 pullPolicy: IfNotPresent runAsUser: 101 allowPrivilegeEscalation: true containerPort: http: 80 https: 443 config: {} configAnnotations: {} proxySetHeaders: {} addHeaders: {} dnsConfig: {} dnsPolicy: ClusterFirst reportNodeInternalIp: false hostNetwork: true #开启主机网络 hostPort: enabled: true #开启主机端口 ports: http: 80 https: 443 electionID: ingress-controller-leader ingressClass: nginx podLabels: {} podSecurityContext: {} sysctls: {} publishService: enabled: true pathOverride: "" scope: enabled: false tcp: annotations: {} udp: annotations: {} extraArgs: {} extraEnvs: [] kind: DaemonSet #DaemonSet运行 annotations: {} labels: {} updateStrategy: {} minReadySeconds: 0 tolerations: [] affinity: {} topologySpreadConstraints: [] terminationGracePeriodSeconds: 300 nodeSelector: ingress: nginx #配置部署选择ingress=nginx节点 livenessProbe: failureThreshold: 5 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 port: 10254 readinessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 port: 10254 healthCheckPath: "/healthz" podAnnotations: {} #replicaCount: 1 #关闭 minAvailable: 1 resources: requests: cpu: 100m memory: 90Mi autoscaling: enabled: false minReplicas: 1 maxReplicas: 11 targetCPUUtilizationPercentage: 50 targetMemoryUtilizationPercentage: 50 autoscalingTemplate: [] enableMimalloc: true customTemplate: configMapName: "" configMapKey: "" service: enabled: true annotations: {} labels: {} externalIPs: [] loadBalancerSourceRanges: [] enableHttp: true enableHttps: true ports: http: 80 https: 443 targetPorts: http: http https: https type: LoadBalancer nodePorts: http: "" https: "" tcp: {} udp: {} internal: enabled: false annotations: {} extraContainers: [] extraVolumeMounts: [] extraVolumes: [] extraInitContainers: [] admissionWebhooks: enabled: false #关闭 failurePolicy: Fail port: 8443 service: annotations: {} externalIPs: [] loadBalancerSourceRanges: [] servicePort: 443 type: ClusterIP patch: enabled: true image: repository: docker.io/jettech/kube-webhook-certgen tag: v1.3.0 pullPolicy: IfNotPresent priorityClassName: "" podAnnotations: {} nodeSelector: {} tolerations: [] runAsUser: 2000 metrics: port: 10254 enabled: false service: annotations: {} externalIPs: [] loadBalancerSourceRanges: [] servicePort: 9913 type: ClusterIP serviceMonitor: enabled: false additionalLabels: {} namespace: "" namespaceSelector: {} scrapeInterval: 30s targetLabels: [] metricRelabelings: [] prometheusRule: enabled: false additionalLabels: {} rules: [] lifecycle: preStop: exec: command: - /wait-shutdown priorityClassName: "" revisionHistoryLimit: 10 maxmindLicenseKey: "" defaultBackend: enabled: false image: repository: k8s.gcr.io/defaultbackend-amd64 tag: "1.5" pullPolicy: IfNotPresent runAsUser: 65534 extraArgs: {} serviceAccount: create: true name: extraEnvs: [] port: 8080 livenessProbe: failureThreshold: 3 initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 readinessProbe: failureThreshold: 6 initialDelaySeconds: 0 periodSeconds: 5 successThreshold: 1 timeoutSeconds: 5 tolerations: [] affinity: {} podSecurityContext: {} podLabels: {} nodeSelector: {} podAnnotations: {} replicaCount: 1 minAvailable: 1 resources: {} service: annotations: {} externalIPs: [] loadBalancerSourceRanges: [] servicePort: 80 type: ClusterIP priorityClassName: "" rbac: create: true scope: false podSecurityPolicy: enabled: false serviceAccount: create: true name: imagePullSecrets: [] tcp: {} udp: {} EOF ``` #### 创建命名空间 ```sh $ kubectl create namespace ingress-nginx ``` #### 节点打标签 ```sh $ kubectl label nodes k8s-master1 ingress=nginx $ kubectl label nodes k8s-master2 ingress=nginx $ kubectl label nodes k8s-node1 ingress=nginx ``` #### 安装nginx-ingress ```sh $ helm -n ingress-nginx upgrade -i ingress-nginx . ``` #### 卸载ingress-nginx ```sh $ helm -n ingress-nginx uninstall ingress-nginx ``` #### 测试nginx-ingress #### 部署一个测试nginx服务 ```sh $ cat > nginx-deployment.yml <<EOF apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deploy spec: replicas: 3 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx ports: - containerPort: 80 --- apiVersion: v1 kind: Service metadata: name: nginx-service spec: selector: app: nginx ports: - protocol: TCP port: 80 targetPort: 80 type: ClusterIP EOF ``` 配置ingress对象 创建TLS证书 ```sh $ kubectl create secret tls shudoon-com-tls --cert=5024509__example.com.pem --key=5024509__example.com.key ``` #### 创建ingress规则 ```sh $ cat >tnginx-ingress.yaml <<EOF apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: tnginx-ingress spec: rules: - host: tnginx.example.com http: paths: - path: / backend: serviceName: nginx-service servicePort: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - tnginx.example.com secretName: shudoon-com-tls EOF ``` 测试 https://tnginx.example.com/ #### 问题 `问题:创建自定义ingress报错:Internal error occurred: failed calling webhook “validate.nginx.ingress.kubernetes.io` 查看策略 ```sh $ kubectl get validatingwebhookconfigurations ``` 删除策略 ```sh $ kubectl delete -A ValidatingWebhookConfiguration ingress-nginx-admission ```
阅读 2727 评论 0 收藏 0
阅读 2727
评论 0
收藏 0

兜兜    2021-09-22 10:18:21    2021-09-24 09:41:49   

k8s cephfs
#### 环境介绍 ```sh ceph集群节点: 172.16.100.1:6789,172.16.100.2:6789,172.16.100.11:6789 ceph version 15.2.13 (c44bc49e7a57a87d84dfff2a077a2058aa2172e2) octopus (stable) ceph client:15.2.14 (pod镜像:"elementalnet/cephfs-provisioner:0.8.0") ``` `注意:目前官网的quay.io/external_storage/cephfs-provisioner:latest镜像ceph版本为13.x,跟ceph集群的版本15不匹配,导致pv一直是pengding状态。这里使用第三方镜像:elementalnet/cephfs-provisioner:0.8.0` #### 安装ceph k8s节点安装客户端 ```sh $ yum install ceph-common -y ``` 拷贝ceph节点的key到k8s节点 ```sh $ scp /etc/ceph/ceph.client.admin.keyring 172.16.100.100:/etc/ceph ``` 配置访问ceph的secert ```sh $ ceph auth get-key client.admin | base64 QVFEa0RFTmhYQ1UzQUJBQXFmSWptMFJkSVpGaC9VR0V4M0RNc3c9PQ== ``` ```sh $ cat >cephfs-secret.yaml<<EOF apiVersion: v1 kind: Secret metadata: name: ceph-secret-admin namespace: cephfs type: "kubernetes.io/rbd" data: key: QVFEa0RFTmhYQ1UzQUJBQXFmSWptMFJkSVpGaC9VR0V4M0RNc3c9PQ== #替换上面的输出内容 EOF ``` ```sh $ kubectl create -f cephfs-secret.yaml ``` #### 安装cephfs provisioner 参考:https://github.com/kubernetes-retired/external-storage/tree/master/ceph/cephfs/deploy 创建RBAC ```sh $ cat >cephfs-rbac.yaml <<EOF --- apiVersion: v1 kind: ServiceAccount metadata: name: cephfs-provisioner namespace: cephfs --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: cephfs-provisioner namespace: cephfs rules: - apiGroups: [""] resources: ["secrets"] verbs: ["create", "get", "delete"] - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: cephfs-provisioner namespace: cephfs roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: cephfs-provisioner subjects: - kind: ServiceAccount name: cephfs-provisioner --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: cephfs-provisioner namespace: cephfs rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] - apiGroups: [""] resources: ["services"] resourceNames: ["kube-dns","coredns"] verbs: ["list", "get"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: cephfs-provisioner subjects: - kind: ServiceAccount name: cephfs-provisioner namespace: cephfs roleRef: kind: ClusterRole name: cephfs-provisioner apiGroup: rbac.authorization.k8s.io EOF ``` ```sh $ kubectl create -f cephfs-rbac.yaml ``` 创建cephfs-provisioner ```sh $ cat > cephfs-provisioner.yaml <<EOF apiVersion: apps/v1 kind: Deployment metadata: name: cephfs-provisioner namespace: cephfs spec: replicas: 1 selector: matchLabels: app: cephfs-provisioner strategy: type: Recreate template: metadata: labels: app: cephfs-provisioner spec: containers: - name: cephfs-provisioner #image: "quay.io/external_storage/cephfs-provisioner:latest" #ceph集群版本为15.2,该版本对应的ceph客户端太旧,镜像没有更新,替换下面的镜像 image: "elementalnet/cephfs-provisioner:0.8.0" env: - name: PROVISIONER_NAME value: ceph.com/cephfs - name: PROVISIONER_SECRET_NAMESPACE value: cephfs command: - "/usr/local/bin/cephfs-provisioner" args: - "-id=cephfs-provisioner-1" serviceAccount: cephfs-provisioner EOF ``` ```sh $ kubectl create -f cephfs-provisioner.yaml ``` #### 创建存储类 ```sh $ cat > cephfs-storageclass.yaml <<EOF kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: cephfs namespace: cephfs provisioner: ceph.com/cephfs parameters: monitors: 172.16.100.1:6789,172.16.100.2:6789,172.16.100.11:6789 adminId: admin adminSecretName: ceph-secret-admin adminSecretNamespace: cephfs claimRoot: /pvc-volumes EOF ``` ```sh $ kubectl create -f cephfs-storageclass.yaml ``` #### 创建测试pvc ```sh $ cat > cephfs-test-pvc.yaml <<EOF kind: PersistentVolumeClaim apiVersion: v1 metadata: name: cephfs-test-pvc-1 annotations: volume.beta.kubernetes.io/storage-class: "cephfs" spec: accessModes: - ReadWriteMany resources: requests: storage: 500Mi EOF ``` ```sh $ kubectl create -f cephfs-test-pvc.yaml ``` 获取pvc ```sh $ kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE cephfs-test-pvc-1 Bound pvc-571ae252-b080-4a67-8f3d-40bda1304fe3 500Mi RWX cephfs 2m1s ``` 获取pv ```sh $ kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-571ae252-b080-4a67-8f3d-40bda1304fe3 500Mi RWX Delete Bound default/cephfs-test-pvc-1 cephfs 2m3s ``` #### 创建nginx挂载pvc ```sh $ cat > cephfs-test-busybox-deployment.yml <<EOF apiVersion: apps/v1 kind: Deployment metadata: name: cephfs-test-deploy-busybox spec: replicas: 3 selector: matchLabels: app: cephfs-test-busybox template: metadata: labels: app: cephfs-test-busybox spec: containers: - name: busybox image: busybox command: ["sleep", "60000"] volumeMounts: - mountPath: "/mnt/cephfs" name: cephfs-test-pvc volumes: - name: cephfs-test-pvc persistentVolumeClaim: claimName: cephfs-test-pvc-1 EOF ``` ```sh $ kubectl create -f cephfs-test-busybox-deployment.yml ``` ```sh $ kubectl get pods NAME READY STATUS RESTARTS AGE cephfs-test-deploy-busybox-56556d86ff-4dmzn 1/1 Running 0 4m28s cephfs-test-deploy-busybox-56556d86ff-6dr6v 1/1 Running 0 4m28s cephfs-test-deploy-busybox-56556d86ff-b75mw 1/1 Running 0 4m28s ``` 测试pod挂载的cephfs文件读写是否同步 ```sh $ kubectl exec -ti cephfs-test-deploy-busybox-56556d86ff-4dmzn sh / # cd /mnt/cephfs/ /mnt/cephfs # ls /mnt/cephfs # touch cephfs-test-deploy-busybox-56556d86ff-4dmzn /mnt/cephfs # exit ``` ```sh $ kubectl exec -ti cephfs-test-deploy-busybox-56556d86ff-6dr6v sh / # cd /mnt/cephfs/ /mnt/cephfs # ls cephfs-test-deploy-busybox-56556d86ff-4dmzn #pod之间同步成功 ```
阅读 907 评论 0 收藏 0
阅读 907
评论 0
收藏 0

兜兜    2021-09-03 11:27:17    2022-04-01 17:09:23   

k8s
#### 软件版本 ```bash docker 19.03 kubernets 1.20.0 flannel ``` ### 准备工作 #### 更新系统包 ```bash $ yum update -y ``` #### 配置hostname ```bash 172.16.13.80 k8s-master 172.16.13.81 k8s-node1 172.16.13.82 k8s-node2 ``` #### 关闭swap ```bash $ swapoff -a $ cat /etc/fstab #修改去掉swap ``` ```sh ... #/dev/mapper/centos-swap swap swap defaults 0 0 ``` #### 关闭selinux ```bash $ setenforce 0 $ cat /etc/selinux/config #关闭selinux ``` ```sh ... SELINUX=disabled ... ``` #### 配置iptables ```sh $ systemctl stop firewalld&&systemctl disable firewalld $ systemctl stop iptables&&systemctl disable iptables $ cat <<EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF $ sysctl --system ``` #### 开启时间同步 ##### 安装chrony: ```sh $ yum install -y chrony ``` ##### 注释默认ntp服务器 ```sh $ sed -i 's/^server/#&/' /etc/chrony.conf ``` ##### 指定上游公共 ntp 服务器,并允许其他节点同步时间 ```sh $ cat >> /etc/chrony.conf << EOF server ntp.aliyun.com iburst driftfile /var/lib/chrony/drift makestep 1.0 3 rtcsync allow all local stratum 10 logdir /var/log/chrony EOF ``` ##### 重启chronyd服务并设为开机启动: ```sh $ systemctl enable chronyd && systemctl restart chronyd ``` ##### 查看时间服务器源 ```bash $ chronyc sources -v ``` ##### 查看时间同步源状态 ```bash $ chronyc sourcestats -v ``` ##### 开启网络时间同步功能 ```sh $ chronyc -a 'burst 4/4' $ chronyc -a makestep ``` ### 安装docker 安装docker相关包 ```bash $ yum install -y yum-utils $ yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo ``` 查看docker包版本 ```bash $ yum list docker-ce --showduplicates ``` ```sh Installed Packages docker-ce.x86_64 3:19.03.0-3.el7 @docker-ce-stable Available Packages docker-ce.x86_64 3:18.09.0-3.el7 docker-ce-stable docker-ce.x86_64 3:18.09.1-3.el7 docker-ce-stable docker-ce.x86_64 3:18.09.2-3.el7 docker-ce-stable docker-ce.x86_64 3:18.09.3-3.el7 docker-ce-stable docker-ce.x86_64 3:18.09.4-3.el7 docker-ce-stable docker-ce.x86_64 3:18.09.5-3.el7 docker-ce-stable docker-ce.x86_64 3:18.09.6-3.el7 docker-ce-stable docker-ce.x86_64 3:18.09.7-3.el7 docker-ce-stable docker-ce.x86_64 3:18.09.8-3.el7 docker-ce-stable docker-ce.x86_64 3:18.09.9-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.0-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.1-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.2-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.3-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.4-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.5-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.6-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.7-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.8-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.9-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.10-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.11-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.12-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.13-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.14-3.el7 docker-ce-stable docker-ce.x86_64 3:19.03.15-3.el7 docker-ce-stable docker-ce.x86_64 3:20.10.0-3.el7 docker-ce-stable docker-ce.x86_64 3:20.10.1-3.el7 docker-ce-stable docker-ce.x86_64 3:20.10.2-3.el7 docker-ce-stable docker-ce.x86_64 3:20.10.3-3.el7 docker-ce-stable docker-ce.x86_64 3:20.10.4-3.el7 docker-ce-stable docker-ce.x86_64 3:20.10.5-3.el7 docker-ce-stable docker-ce.x86_64 3:20.10.6-3.el7 docker-ce-stable docker-ce.x86_64 3:20.10.7-3.el7 docker-ce-stable ``` 安装 ```bash $ yum install -y docker-ce-19.03.0 docker-ce-cli-19.03.0 containerd.io ``` 修改Cgroup Driver为systemd ```bash $ cat /usr/lib/systemd/system/docker.service ... ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd #添加 --exec-opt native.cgroupdriver=systemd ... ``` ```sh $ systemctl daemon-reload $ docker info|grep Cgroup Cgroup Driver: systemd ``` 启动docker ```bash $ systemctl start docker $ systemctl enable docker ``` 查看docker版本 ```bash $ docker version ``` ```sh Client: Docker Engine - Community Version: 19.03.0 API version: 1.40 Go version: go1.12.5 Git commit: aeac9490dc Built: Wed Jul 17 18:15:40 2019 OS/Arch: linux/amd64 Experimental: false Server: Docker Engine - Community Engine: Version: 19.03.0 API version: 1.40 (minimum version 1.12) Go version: go1.12.5 Git commit: aeac9490dc Built: Wed Jul 17 18:14:16 2019 OS/Arch: linux/amd64 Experimental: false containerd: Version: 1.4.9 GitCommit: e25210fe30a0a703442421b0f60afac609f950a3 runc: Version: 1.0.1 GitCommit: v1.0.1-0-g4144b63 docker-init: Version: 0.18.0 GitCommit: fec3683 ``` ### 安装Kubernetes ```bash cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF ``` ```bash $ yum check-update $ yum list kubelet --showduplicates ``` ```sh Installed Packages kubelet.x86_64 1.20.0-0 @kubernetes Available Packages kubelet.x86_64 1.18.0-0 kubernetes kubelet.x86_64 1.18.1-0 kubernetes kubelet.x86_64 1.18.2-0 kubernetes kubelet.x86_64 1.18.3-0 kubernetes kubelet.x86_64 1.18.4-0 kubernetes kubelet.x86_64 1.18.4-1 kubernetes kubelet.x86_64 1.18.5-0 kubernetes kubelet.x86_64 1.18.6-0 kubernetes kubelet.x86_64 1.18.8-0 kubernetes kubelet.x86_64 1.18.9-0 kubernetes kubelet.x86_64 1.18.10-0 kubernetes kubelet.x86_64 1.18.12-0 kubernetes kubelet.x86_64 1.18.13-0 kubernetes kubelet.x86_64 1.18.14-0 kubernetes kubelet.x86_64 1.18.15-0 kubernetes kubelet.x86_64 1.18.16-0 kubernetes kubelet.x86_64 1.18.17-0 kubernetes kubelet.x86_64 1.18.18-0 kubernetes kubelet.x86_64 1.18.19-0 kubernetes kubelet.x86_64 1.18.20-0 kubernetes kubelet.x86_64 1.19.0-0 kubernetes kubelet.x86_64 1.19.1-0 kubernetes kubelet.x86_64 1.19.2-0 kubernetes kubelet.x86_64 1.19.3-0 kubernetes kubelet.x86_64 1.19.4-0 kubernetes kubelet.x86_64 1.19.5-0 kubernetes kubelet.x86_64 1.19.6-0 kubernetes kubelet.x86_64 1.19.7-0 kubernetes kubelet.x86_64 1.19.8-0 kubernetes kubelet.x86_64 1.19.9-0 kubernetes kubelet.x86_64 1.19.10-0 kubernetes kubelet.x86_64 1.19.11-0 kubernetes kubelet.x86_64 1.19.12-0 kubernetes kubelet.x86_64 1.19.13-0 kubernetes kubelet.x86_64 1.19.14-0 kubernetes kubelet.x86_64 1.20.0-0 kubernetes kubelet.x86_64 1.20.1-0 kubernetes kubelet.x86_64 1.20.2-0 kubernetes kubelet.x86_64 1.20.4-0 kubernetes kubelet.x86_64 1.20.5-0 kubernetes kubelet.x86_64 1.20.6-0 kubernetes kubelet.x86_64 1.20.7-0 kubernetes ``` ```sh $ yum install -y kubelet-1.20.0 kubeadm-1.20.0 kubectl-1.20.0 ``` ```bash $ systemctl enable kubelet ``` 初始化K8S(k8s-master节点) ```bash $ kubeadm init \ --kubernetes-version=v1.20.10 \ --pod-network-cidr=10.244.0.0/16 \ --image-repository registry.aliyuncs.com/google_containers \ --apiserver-advertise-address 172.16.13.80 \ --v=6 ``` ```sh I0904 10:39:55.512878 18003 initconfiguration.go:104] detected and using CRI socket: /var/run/dockershim.sock [init] Using Kubernetes version: v1.20.10 [preflight] Running pre-flight checks I0904 10:39:55.609411 18003 checks.go:577] validating Kubernetes and kubeadm version I0904 10:39:55.609436 18003 checks.go:166] validating if the firewall is enabled and active I0904 10:39:55.615977 18003 checks.go:201] validating availability of port 6443 I0904 10:39:55.616145 18003 checks.go:201] validating availability of port 10259 I0904 10:39:55.616175 18003 checks.go:201] validating availability of port 10257 I0904 10:39:55.616202 18003 checks.go:286] validating the existence of file /etc/kubernetes/manifests/kube-apiserver.yaml I0904 10:39:55.616218 18003 checks.go:286] validating the existence of file /etc/kubernetes/manifests/kube-controller-manager.yaml I0904 10:39:55.616225 18003 checks.go:286] validating the existence of file /etc/kubernetes/manifests/kube-scheduler.yaml I0904 10:39:55.616231 18003 checks.go:286] validating the existence of file /etc/kubernetes/manifests/etcd.yaml I0904 10:39:55.616243 18003 checks.go:432] validating if the connectivity type is via proxy or direct I0904 10:39:55.616278 18003 checks.go:471] validating http connectivity to first IP address in the CIDR I0904 10:39:55.616300 18003 checks.go:471] validating http connectivity to first IP address in the CIDR I0904 10:39:55.616311 18003 checks.go:102] validating the container runtime I0904 10:39:55.710933 18003 checks.go:128] validating if the "docker" service is enabled and active I0904 10:39:55.812851 18003 checks.go:335] validating the contents of file /proc/sys/net/bridge/bridge-nf-call-iptables I0904 10:39:55.812907 18003 checks.go:335] validating the contents of file /proc/sys/net/ipv4/ip_forward I0904 10:39:55.812930 18003 checks.go:649] validating whether swap is enabled or not I0904 10:39:55.812975 18003 checks.go:376] validating the presence of executable conntrack I0904 10:39:55.812999 18003 checks.go:376] validating the presence of executable ip I0904 10:39:55.813017 18003 checks.go:376] validating the presence of executable iptables I0904 10:39:55.813037 18003 checks.go:376] validating the presence of executable mount I0904 10:39:55.813051 18003 checks.go:376] validating the presence of executable nsenter I0904 10:39:55.813063 18003 checks.go:376] validating the presence of executable ebtables I0904 10:39:55.813074 18003 checks.go:376] validating the presence of executable ethtool I0904 10:39:55.813085 18003 checks.go:376] validating the presence of executable socat I0904 10:39:55.813099 18003 checks.go:376] validating the presence of executable tc I0904 10:39:55.813109 18003 checks.go:376] validating the presence of executable touch I0904 10:39:55.813123 18003 checks.go:520] running all checks I0904 10:39:55.915575 18003 checks.go:406] checking whether the given node name is reachable using net.LookupHost I0904 10:39:55.915792 18003 checks.go:618] validating kubelet version I0904 10:39:55.985451 18003 checks.go:128] validating if the "kubelet" service is enabled and active I0904 10:39:55.994819 18003 checks.go:201] validating availability of port 10250 I0904 10:39:55.994889 18003 checks.go:201] validating availability of port 2379 I0904 10:39:55.994913 18003 checks.go:201] validating availability of port 2380 I0904 10:39:55.994936 18003 checks.go:249] validating the existence and emptiness of directory /var/lib/etcd [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' I0904 10:39:56.043119 18003 checks.go:839] image exists: registry.aliyuncs.com/google_containers/kube-apiserver:v1.20.10 I0904 10:39:56.095120 18003 checks.go:839] image exists: registry.aliyuncs.com/google_containers/kube-controller-manager:v1.20.10 I0904 10:39:56.159069 18003 checks.go:839] image exists: registry.aliyuncs.com/google_containers/kube-scheduler:v1.20.10 I0904 10:39:56.212530 18003 checks.go:839] image exists: registry.aliyuncs.com/google_containers/kube-proxy:v1.20.10 I0904 10:39:56.265125 18003 checks.go:839] image exists: registry.aliyuncs.com/google_containers/pause:3.2 I0904 10:39:56.320004 18003 checks.go:839] image exists: registry.aliyuncs.com/google_containers/etcd:3.4.13-0 I0904 10:39:56.371299 18003 checks.go:839] image exists: registry.aliyuncs.com/google_containers/coredns:1.7.0 [certs] Using certificateDir folder "/etc/kubernetes/pki" I0904 10:39:56.371382 18003 certs.go:110] creating a new certificate authority for ca [certs] Generating "ca" certificate and key I0904 10:39:56.729903 18003 certs.go:474] validating certificate period for ca certificate [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local shudoon101] and IPs [10.96.0.1 172.16.13.80] [certs] Generating "apiserver-kubelet-client" certificate and key I0904 10:39:57.334553 18003 certs.go:110] creating a new certificate authority for front-proxy-ca [certs] Generating "front-proxy-ca" certificate and key I0904 10:39:57.486574 18003 certs.go:474] validating certificate period for front-proxy-ca certificate [certs] Generating "front-proxy-client" certificate and key I0904 10:39:57.694560 18003 certs.go:110] creating a new certificate authority for etcd-ca [certs] Generating "etcd/ca" certificate and key I0904 10:39:57.821367 18003 certs.go:474] validating certificate period for etcd/ca certificate [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [localhost shudoon101] and IPs [172.16.13.80 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [localhost shudoon101] and IPs [172.16.13.80 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key I0904 10:39:58.861298 18003 certs.go:76] creating new public/private key files for signing service account users [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" I0904 10:39:59.035771 18003 kubeconfig.go:101] creating kubeconfig file for admin.conf [kubeconfig] Writing "admin.conf" kubeconfig file I0904 10:39:59.330053 18003 kubeconfig.go:101] creating kubeconfig file for kubelet.conf [kubeconfig] Writing "kubelet.conf" kubeconfig file I0904 10:39:59.481405 18003 kubeconfig.go:101] creating kubeconfig file for controller-manager.conf [kubeconfig] Writing "controller-manager.conf" kubeconfig file I0904 10:39:59.645125 18003 kubeconfig.go:101] creating kubeconfig file for scheduler.conf [kubeconfig] Writing "scheduler.conf" kubeconfig file I0904 10:40:00.334922 18003 kubelet.go:63] Stopping the kubelet [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" I0904 10:40:00.420357 18003 manifests.go:96] [control-plane] getting StaticPodSpecs I0904 10:40:00.420779 18003 certs.go:474] validating certificate period for CA certificate I0904 10:40:00.420895 18003 manifests.go:109] [control-plane] adding volume "ca-certs" for component "kube-apiserver" I0904 10:40:00.420908 18003 manifests.go:109] [control-plane] adding volume "etc-pki" for component "kube-apiserver" I0904 10:40:00.420916 18003 manifests.go:109] [control-plane] adding volume "k8s-certs" for component "kube-apiserver" I0904 10:40:00.428795 18003 manifests.go:126] [control-plane] wrote static Pod manifest for component "kube-apiserver" to "/etc/kubernetes/manifests/kube-apiserver.yaml" [control-plane] Creating static Pod manifest for "kube-controller-manager" I0904 10:40:00.428822 18003 manifests.go:96] [control-plane] getting StaticPodSpecs I0904 10:40:00.429308 18003 manifests.go:109] [control-plane] adding volume "ca-certs" for component "kube-controller-manager" I0904 10:40:00.429323 18003 manifests.go:109] [control-plane] adding volume "etc-pki" for component "kube-controller-manager" I0904 10:40:00.429331 18003 manifests.go:109] [control-plane] adding volume "flexvolume-dir" for component "kube-controller-manager" I0904 10:40:00.429337 18003 manifests.go:109] [control-plane] adding volume "k8s-certs" for component "kube-controller-manager" I0904 10:40:00.429341 18003 manifests.go:109] [control-plane] adding volume "kubeconfig" for component "kube-controller-manager" I0904 10:40:00.431212 18003 manifests.go:126] [control-plane] wrote static Pod manifest for component "kube-controller-manager" to "/etc/kubernetes/manifests/kube-controller-manager.yaml" [control-plane] Creating static Pod manifest for "kube-scheduler" I0904 10:40:00.431233 18003 manifests.go:96] [control-plane] getting StaticPodSpecs I0904 10:40:00.431917 18003 manifests.go:109] [control-plane] adding volume "kubeconfig" for component "kube-scheduler" I0904 10:40:00.432442 18003 manifests.go:126] [control-plane] wrote static Pod manifest for component "kube-scheduler" to "/etc/kubernetes/manifests/kube-scheduler.yaml" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" I0904 10:40:00.433542 18003 local.go:74] [etcd] wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml" I0904 10:40:00.433568 18003 waitcontrolplane.go:87] [wait-control-plane] Waiting for the API server to be healthy I0904 10:40:00.435037 18003 loader.go:379] Config loaded from file: /etc/kubernetes/admin.conf [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s I0904 10:40:00.436681 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:00.938722 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 1 milliseconds I0904 10:40:01.437273 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:01.937162 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:02.437215 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:02.937090 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:03.437168 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:03.937151 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:04.437369 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:04.937187 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:05.437078 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:05.937120 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:06.437218 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:06.937134 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:07.437199 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:07.937158 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:08.437692 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s in 0 milliseconds I0904 10:40:12.453695 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s 500 Internal Server Error in 3516 milliseconds I0904 10:40:12.938805 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s 500 Internal Server Error in 1 milliseconds I0904 10:40:13.438240 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s 500 Internal Server Error in 1 milliseconds I0904 10:40:13.938725 18003 round_trippers.go:445] GET https://172.16.13.80:6443/healthz?timeout=10s 200 OK in 1 milliseconds [apiclient] All control plane components are healthy after 13.502539 seconds I0904 10:40:13.938847 18003 uploadconfig.go:108] [upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace I0904 10:40:13.943583 18003 round_trippers.go:445] POST https://172.16.13.80:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 2 milliseconds I0904 10:40:13.946914 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles?timeout=10s 201 Created in 2 milliseconds I0904 10:40:13.949757 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s 201 Created in 2 milliseconds I0904 10:40:13.950284 18003 uploadconfig.go:122] [upload-config] Uploading the kubelet component config to a ConfigMap [kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster I0904 10:40:13.952552 18003 round_trippers.go:445] POST https://172.16.13.80:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 1 milliseconds I0904 10:40:13.954630 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles?timeout=10s 201 Created in 1 milliseconds I0904 10:40:13.956733 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s 201 Created in 1 milliseconds I0904 10:40:13.956848 18003 uploadconfig.go:127] [upload-config] Preserving the CRISocket information for the control-plane node I0904 10:40:13.956861 18003 patchnode.go:30] [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "shudoon101" as an annotation I0904 10:40:14.460485 18003 round_trippers.go:445] GET https://172.16.13.80:6443/api/v1/nodes/shudoon101?timeout=10s 200 OK in 3 milliseconds I0904 10:40:14.467558 18003 round_trippers.go:445] PATCH https://172.16.13.80:6443/api/v1/nodes/shudoon101?timeout=10s 200 OK in 4 milliseconds [upload-certs] Skipping phase. Please see --upload-certs [mark-control-plane] Marking the node shudoon101 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)" [mark-control-plane] Marking the node shudoon101 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] I0904 10:40:14.969784 18003 round_trippers.go:445] GET https://172.16.13.80:6443/api/v1/nodes/shudoon101?timeout=10s 200 OK in 1 milliseconds I0904 10:40:14.976503 18003 round_trippers.go:445] PATCH https://172.16.13.80:6443/api/v1/nodes/shudoon101?timeout=10s 200 OK in 4 milliseconds [bootstrap-token] Using token: vqlfov.pkv1r7fsucnvijix [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles I0904 10:40:14.979889 18003 round_trippers.go:445] GET https://172.16.13.80:6443/api/v1/namespaces/kube-system/secrets/bootstrap-token-vqlfov?timeout=10s 404 Not Found in 2 milliseconds I0904 10:40:14.983266 18003 round_trippers.go:445] POST https://172.16.13.80:6443/api/v1/namespaces/kube-system/secrets?timeout=10s 201 Created in 2 milliseconds [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes I0904 10:40:14.986413 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/clusterroles?timeout=10s 201 Created in 2 milliseconds I0904 10:40:14.989537 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials I0904 10:40:14.991819 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 1 milliseconds [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token I0904 10:40:14.993446 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 1 milliseconds [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster I0904 10:40:14.995203 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 1 milliseconds [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace I0904 10:40:14.995311 18003 clusterinfo.go:45] [bootstrap-token] loading admin kubeconfig I0904 10:40:14.995908 18003 loader.go:379] Config loaded from file: /etc/kubernetes/admin.conf I0904 10:40:14.995924 18003 clusterinfo.go:53] [bootstrap-token] copying the cluster from admin.conf to the bootstrap kubeconfig I0904 10:40:14.996407 18003 clusterinfo.go:65] [bootstrap-token] creating/updating ConfigMap in kube-public namespace I0904 10:40:14.999040 18003 round_trippers.go:445] POST https://172.16.13.80:6443/api/v1/namespaces/kube-public/configmaps?timeout=10s 201 Created in 2 milliseconds I0904 10:40:14.999190 18003 clusterinfo.go:79] creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace I0904 10:40:15.001641 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles?timeout=10s 201 Created in 2 milliseconds I0904 10:40:15.003854 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings?timeout=10s 201 Created in 1 milliseconds I0904 10:40:15.004058 18003 kubeletfinalize.go:88] [kubelet-finalize] Assuming that kubelet client certificate rotation is enabled: found "/var/lib/kubelet/pki/kubelet-client-current.pem" [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key I0904 10:40:15.004638 18003 loader.go:379] Config loaded from file: /etc/kubernetes/kubelet.conf I0904 10:40:15.005181 18003 kubeletfinalize.go:132] [kubelet-finalize] Restarting the kubelet to enable client certificate rotation I0904 10:40:15.086465 18003 round_trippers.go:445] GET https://172.16.13.80:6443/apis/apps/v1/namespaces/kube-system/deployments?labelSelector=k8s-app%3Dkube-dns 200 OK in 5 milliseconds I0904 10:40:15.092852 18003 round_trippers.go:445] GET https://172.16.13.80:6443/api/v1/namespaces/kube-system/configmaps/kube-dns?timeout=10s 404 Not Found in 1 milliseconds I0904 10:40:15.094538 18003 round_trippers.go:445] GET https://172.16.13.80:6443/api/v1/namespaces/kube-system/configmaps/coredns?timeout=10s 404 Not Found in 1 milliseconds I0904 10:40:15.097004 18003 round_trippers.go:445] POST https://172.16.13.80:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 2 milliseconds I0904 10:40:15.099782 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/clusterroles?timeout=10s 201 Created in 2 milliseconds I0904 10:40:15.104903 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 4 milliseconds I0904 10:40:15.109540 18003 round_trippers.go:445] POST https://172.16.13.80:6443/api/v1/namespaces/kube-system/serviceaccounts?timeout=10s 201 Created in 3 milliseconds I0904 10:40:15.132165 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/apps/v1/namespaces/kube-system/deployments?timeout=10s 201 Created in 11 milliseconds I0904 10:40:15.143679 18003 round_trippers.go:445] POST https://172.16.13.80:6443/api/v1/namespaces/kube-system/services?timeout=10s 201 Created in 9 milliseconds [addons] Applied essential addon: CoreDNS I0904 10:40:15.170722 18003 round_trippers.go:445] POST https://172.16.13.80:6443/api/v1/namespaces/kube-system/serviceaccounts?timeout=10s 201 Created in 2 milliseconds I0904 10:40:15.368235 18003 request.go:591] Throttling request took 195.771994ms, request: POST:https://172.16.13.80:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s I0904 10:40:15.371885 18003 round_trippers.go:445] POST https://172.16.13.80:6443/api/v1/namespaces/kube-system/configmaps?timeout=10s 201 Created in 3 milliseconds I0904 10:40:15.389809 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/apps/v1/namespaces/kube-system/daemonsets?timeout=10s 201 Created in 10 milliseconds I0904 10:40:15.392633 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s 201 Created in 2 milliseconds I0904 10:40:15.395548 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles?timeout=10s 201 Created in 2 milliseconds I0904 10:40:15.398242 18003 round_trippers.go:445] POST https://172.16.13.80:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings?timeout=10s 201 Created in 2 milliseconds [addons] Applied essential addon: kube-proxy I0904 10:40:15.399040 18003 loader.go:379] Config loaded from file: /etc/kubernetes/admin.conf I0904 10:40:15.399615 18003 loader.go:379] Config loaded from file: /etc/kubernetes/admin.conf Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config Alternatively, if you are the root user, you can run: export KUBECONFIG=/etc/kubernetes/admin.conf You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: kubeadm join 172.16.13.80:6443 --token vqlfov.pkv1r7fsucnvijix \ --discovery-token-ca-cert-hash sha256:93f10f90ee14d64eaa3f5d6f7086673a7264ac9d00674853d39bf34fce4a5622 ``` 查看节点 ```bash $ kubectl get nodes NAME STATUS ROLES AGE VERSION shudoon101 NotReady control-plane,master 12m v1.20.0 ``` ### 安装Flannel(所有节点) #### flannel通信原理图 ![enter image description here](https://files.ynotes.cn/flannel.png "enter image title here") 下载kube-flannel.yml ```bash $ wget https://github.com/coreos/flannel/raw/master/Documentation/kube-flannel.yml $ cat kube-flannel.yml ``` ```yaml --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: psp.flannel.unprivileged annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default spec: privileged: false volumes: - configMap - secret - emptyDir - hostPath allowedHostPaths: - pathPrefix: "/etc/cni/net.d" - pathPrefix: "/etc/kube-flannel" - pathPrefix: "/run/flannel" readOnlyRootFilesystem: false # Users and groups runAsUser: rule: RunAsAny supplementalGroups: rule: RunAsAny fsGroup: rule: RunAsAny # Privilege Escalation allowPrivilegeEscalation: false defaultAllowPrivilegeEscalation: false # Capabilities allowedCapabilities: ['NET_ADMIN', 'NET_RAW'] defaultAddCapabilities: [] requiredDropCapabilities: [] # Host namespaces hostPID: false hostIPC: false hostNetwork: true hostPorts: - min: 0 max: 65535 # SELinux seLinux: # SELinux is unused in CaaSP rule: 'RunAsAny' --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel rules: - apiGroups: ['extensions'] resources: ['podsecuritypolicies'] verbs: ['use'] resourceNames: ['psp.flannel.unprivileged'] - apiGroups: - "" resources: - pods verbs: - get - apiGroups: - "" resources: - nodes verbs: - list - watch - apiGroups: - "" resources: - nodes/status verbs: - patch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: flannel subjects: - kind: ServiceAccount name: flannel namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: flannel namespace: kube-system --- kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg namespace: kube-system labels: tier: node app: flannel data: cni-conf.json: | { "name": "cbr0", "cniVersion": "0.3.1", "plugins": [ { "type": "flannel", "delegate": { "hairpinMode": true, "isDefaultGateway": true } }, { "type": "portmap", "capabilities": { "portMappings": true } } ] } net-conf.json: | { "Network": "10.244.0.0/16", "Backend": { "Type": "vxlan" } } --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux hostNetwork: true priorityClassName: system-node-critical tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.14.0 command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.14.0 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN", "NET_RAW"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg ``` 替换镜像地址 ```bash $ sed -i 's#quay.io/coreos/flannel#quay.mirrors.ustc.edu.cn/coreos/flannel#' kube-flannel.yml ``` ```bash $ kubectl apply -f kube-flannel.yml ``` #### 加入其他节点 ##### 加入其他节点 ```bash $ kubeadm join 172.16.13.80:6443 --token vqlfov.pkv1r7fsucnvijix \ --discovery-token-ca-cert-hash sha256:93f10f90ee14d64eaa3f5d6f7086673a7264ac9d00674853d39bf34fce4a5622 ``` ##### 查看集群节点信息 ```sh $ kubectl get node -o wide NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME shudoon101 Ready control-plane,master 66m v1.20.0 172.16.13.80 <none> CentOS Linux 7 (Core) 3.10.0-1160.41.1.el7.x86_64 docker://19.3.0 shudoon102 Ready <none> 7m51s v1.20.0 172.16.13.81 <none> CentOS Linux 7 (Core) 3.10.0-1062.el7.x86_64 docker://19.3.0 shudoon103 NotReady <none> 4m33s v1.20.0 172.16.13.82 <none> CentOS Linux 7 (Core) 3.10.0-1160.41.1.el7.x86_64 docker://19.3.0 ``` #### 问题 ##### 1.Flannel三种模式区别? ```sh udp模式:flanneld进程udp封装上层的数据包,用户空间到内核空间上下文切换,性能最差,已放弃。 vxlan模式:vetp添加vxlan头封装在udp包中实现虚拟局域网,通过隧道通信,性能较好,支持100node左右。 host-gw模式:节点添加容器子网路由,性能最好,支持130node左右。 ``` ##### 2.修改Flannel VxLAN为Direct routing模式? `VxLAN为Direct routing模式,节点同网段使用host-gw模式,不同网段使用vxlan模式` ```sh $ vim kube-flannel.yml ``` ```yaml ...... net-conf.json: | { "Network": "10.244.0.0/16", #默认网段 "Backend": { "Type": "vxlan", "Directrouting": true #增加 } } ...... ``` ```sh $ kubectl apply -f kube-flannel.yml ``` ```sh clusterrole.rbac.authorization.k8s.io/flannel configured clusterrolebinding.rbac.authorization.k8s.io/flannel configured serviceaccount/flannel unchanged configmap/kube-flannel-cfg configured daemonset.extensions/kube-flannel-ds-amd64 created daemonset.extensions/kube-flannel-ds-arm64 created daemonset.extensions/kube-flannel-ds-arm created daemonset.extensions/kube-flannel-ds-ppc64le created daemonset.extensions/kube-flannel-ds-s390x created ``` ```sh $ route -n ``` ```sh Kernel IP routing table Destination Gateway Genmask Flags Metric Ref Use Iface 0.0.0.0 172.16.13.254 0.0.0.0 UG 100 0 0 ens192 10.244.0.0 0.0.0.0 255.255.255.0 U 0 0 0 cni0 10.244.1.0 172.16.13.81 255.255.255.0 UG 0 0 0 ens192 #同网段host-gw,直接路由 10.244.2.0 172.16.13.82 255.255.255.0 UG 0 0 0 ens192 #同网段host-gw,直接路由 10.244.4.0 10.244.4.0 255.255.255.0 UG 0 0 0 flannel.1 #不同网段vxlan 172.16.13.0 0.0.0.0 255.255.255.0 U 100 0 0 ens192 172.17.0.0 0.0.0.0 255.255.0.0 U 0 0 0 docker0 ``` ##### 3.卸载flannel网络配置 在master节点删除flannel ```sh $ kubectl delete -f kube-flannel.yml ``` 在node节点清理flannel网络留下的文件 ```sh $ ifconfig flannel.1 down $ ip link delete flannel.1 $ rm -rf /var/lib/cni/ $ rm -f /etc/cni/net.d/* $ systemctl restart kubelet ``` ##### 4.节点退出k8s集群? k8s master节点操作 ```sh $ kubectl delete node node-01 ``` k8s 需要退出集群的节点操作 ```sh $ systemctl stop kubelet $ rm -rf /etc/kubernetes/* $ kubeadm reset $ iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X ``` ##### 5.加入进集群的token过期,如何创建新token? `方法一`: ```sh $ kubeadm token create --ttl 0 --print-join-command ``` `方法二`: ```sh $ kubeadm token create #重新生成token ``` ```sh $ kubeadm token list | awk -F" " '{print $1}' |tail -n 1 #列出token ``` ```sh hucls9.zea52rjxsmt0ze0b ``` ```sh $ openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^ .* //' #获取hash值 ``` ```sh (stdin)= 93f10f90ee14d64eaa3f5d6f7086673a7264ac9d00674853d39bf34fce4a5622 ``` ```sh $ kubeadm join 172.16.13.80:6443 --token hucls9.zea52rjxsmt0ze0b --discovery-token-ca-cert-hash sha256:93f10f90ee14d64eaa3f5d6f7086673a7264ac9d00674853d39bf34fce4a5622 #从节点加入集群 ```
阅读 669 评论 0 收藏 0
阅读 669
评论 0
收藏 0

兜兜    2021-09-03 11:26:43    2021-10-19 14:36:12   

k8s
阅读 559 评论 0 收藏 0
阅读 559
评论 0
收藏 0

兜兜    2021-09-03 11:26:15    2022-01-25 09:20:47   

k8s nacos ceph
```sh kubernetes: 1.18.20 nacos: 2.0.1 storage: ceph ``` 下载nacos部署文件 ```sh git clone https://github.com/nacos-group/nacos-k8s.git ``` #### 部署数据库 部署pvc ```sh $ cat > pvc.yaml <<EOF --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mysql-dynamic-pvc spec: accessModes: - ReadWriteOnce storageClassName: rook-ceph-block #rook-ceph部署ceph集群,默认存储类为rook-ceph-block resources: requests: storage: 5Gi EOF ``` ```sh $ kubectl apply -f pvc.yaml ``` ```sh $ kubectl apply -f mysql-ceph.yaml ``` ```sh $ kubectl get pods mysql-8gcsd 1/1 Running 0 6m30s ``` #### 部署NACOS ```sh $ vim nacos-pvc-ceph.yaml ... serviceAccountName: cephfs-provisioner #因为没有创建该账号,删除 ... volumeClaimTemplates: - metadata: name: plguindir spec: accessModes: [ "ReadWriteMany" ] storageClassName: "rook-cephfs" #改成自己对应的存储类名 resources: requests: storage: 5Gi - metadata: name: datadir spec: accessModes: [ "ReadWriteMany" ] storageClassName: "rook-cephfs" #改成自己对应的存储类名 resources: requests: storage: 5Gi - metadata: name: logdir spec: accessModes: [ "ReadWriteMany" ] storageClassName: "rook-cephfs" #改成自己对应的存储类名 resources: requests: storage: 5Gi selector: matchLabels: app: nacos ``` ```sh $ kubectl apply -f nacos-pvc-ceph.yaml ``` 暴露nacos服务 ```sh $ cat >nacos-ingress.yaml <<EOF apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: nacos-ingress spec: rules: - host: tnacos.example.com http: paths: - path: / backend: serviceName: nacos-headless servicePort: 8848 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - tnacos.example.com secretName: shudoon-com-tls EOF ``` ```sh $ kubectl apply -f nacos-ingress.yaml ``` #### 配置域名解析 解析tnacos.example.com到对应的ingress-controller前端负载均衡即可 参考: https://nacos.io/zh-cn/docs/use-nacos-with-kubernetes.htm
阅读 651 评论 0 收藏 0
阅读 651
评论 0
收藏 0

兜兜    2021-09-03 11:25:50    2022-01-25 09:20:35   

k8s
#### 一、准备工作 ```sh 1.修改springboot服务启动方式以及server.port改成固定端口 #为了K8S pod监控对应的端口,做健康检查 2.pom.xml加监控相关依赖以及修改监控配置 #为了K8S pod监控对应的端口,做健康检查 3.添加DockerFile以及其他文件 4.修改pom.xml的注册中心的地址 5.安装helm 6.创建helm chart ``` ##### 1.1 修改springboot服务启动方式 ```java @EnableCustomConfig @EnableRyFeignClients @SpringCloudApplication public class ShudoonHomeworkServerApplication { public static void main(String[] args) { //new StartCommand(args,false, true); 禁用StartCommand SpringApplication.run(ShudoonHomeworkServerApplication.class, args); } } ``` ##### 1.2 修改server.port改成固定端口 ```yaml server: port: 18009 ``` ##### 2.1 pom.xml加监控相关依赖 ```xml ... <!-- SpringBoot Actuator --> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-actuator</artifactId> </dependency> <!-- 监听服务jar --> <dependency> <groupId>io.micrometer</groupId> <artifactId>micrometer-registry-prometheus</artifactId> <version>1.5.1</version> </dependency> ... ``` ##### 2.2 修改监控配置 `暴露监控相关的API` ```yaml # 监听服务 management: endpoints: web: exposure: include: "*" metrics: tags: application: ${spring.application.name} ``` ##### 3.1 添加DockerFile以及其他文件 `代码版本库添加Dockerfile文件,方便统一管理` Dockerfile(`springboot微服务`) ```sh FROM openjdk:8 ARG server_port ENV SERVER_PORT=$server_port ADD target/*.jar /app.jar EXPOSE ${SERVER_PORT} ENTRYPOINT java -Dserver.port=${SERVER_PORT} -jar /app.jar ``` Dockerfile(`前端web服务`) ```sh FROM harbor.example.com/other/nginx:1.20.0 MAINTAINER sheyinsong COPY dist/ /usr/share/nginx/html/ COPY default.conf /etc/nginx/conf.d/default.conf ``` 添加default.conf(`前端web服务`) default.conf ```sh server { listen 80; root /usr/share/nginx/html; try_files $uri $uri/ /index.html; index index.html; } ``` ##### 4.1 修改pom.xml的注册中心的地址 `nacos部署在K8S集群中,微服务改成nacos的服务地址:nacos-headless:8848` ```xml ... <config.server-addr>nacos-headless:8848</config.server-addr> <nacos.namespace>f1e1891f-d11e-4180-883e-d709f02c4040</nacos.namespace> <!--Nacos服务发现地址--> <discovery.server-addr>nacos-headless:8848</discovery.server-addr> <dubbo.nacos.address>nacos://nacos-headless:8848</dubbo.nacos.address> ... ``` ##### 5.1 安装以及配置helm 5.1.1 安装以及配置helm ```sh $ wget https://get.helm.sh/helm-v3.5.4-linux-amd64.tar.gz $ tar -zxvf helm-v3.5.4-linux-amd64.tar.gz $ mv linux-amd64/helm /usr/local/bin/helm $ helm version version.BuildInfo{Version:"v3.5.4", GitCommit:"1b5edb69df3d3a08df77c9902dc17af864ff05d1", GitTreeState:"clean", GoVersion:"go1.15.11"} $ helm plugin install https://github.com/chartmuseum/helm-push $ helm repo add shudoon-local --username=shudoon --password=xxxxx https://harbor.example.com/chartrepo/shudoon/ #配置私有仓库shudoon-local $ helm repo list ``` ##### 6.1 创建helm chart 6.1.1 创建helm 微服务chart `微服务chart增加filebeat容器和skywalking初始化容器` ```sh $ helm create springboot-demo ``` 配置deployment.yml ```yml apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "springboot-demo.fullname" . }} labels: {{- include "springboot-demo.labels" . | nindent 4 }} spec: {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicaCount }} {{- end }} selector: matchLabels: {{- include "springboot-demo.selectorLabels" . | nindent 6 }} template: metadata: {{- with .Values.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "springboot-demo.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "springboot-demo.serviceAccountName" . }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} initContainers: - name: {{ .Values.image3.name }} #skywalking容器 image: "{{ .Values.image3.repository }}:{{ .Values.image3.tag }}" imagePullPolicy: IfNotPresent command: ["sh"] args: [ "-c", "mkdir -p /skywalking/agent && cp -r /usr/skywalking/agent/* /skywalking/agent", ] volumeMounts: - mountPath: /skywalking/agent name: sw-agent containers: - name: {{ .Values.image2.name }} #filebeat容器 image: "{{ .Values.image2.repository }}:{{ .Values.image2.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} command: - "/bin/sh" args: - "-c" - "filebeat -c /etc/filebeat/filebeat.yml" volumeMounts: - name: app-logs mountPath: /log - name: filebeat-{{.Release.Name}}-config mountPath: /etc/filebeat/ - name: {{ .Chart.Name }} securityContext: {{- toYaml .Values.securityContext | nindent 12 }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - name: JAVA_TOOL_OPTIONS value: -javaagent:/usr/skywalking/agent/skywalking-agent.jar - name: SW_AGENT_NAME value: {{.Release.Name}} - name: SW_AGENT_COLLECTOR_BACKEND_SERVICES value: skywalking-oap:11800 #k8s需要提前安装skywalking服务 volumeMounts: - name: app-logs mountPath: /serverlog - name: sw-agent mountPath: /usr/skywalking/agent ports: - name: http containerPort: {{ .Values.service.targetPort | default 80 }} protocol: TCP livenessProbe: httpGet: path: /actuator/health/liveness port: {{ .Values.service.targetPort | default 80 }} initialDelaySeconds: 20 failureThreshold: 15 timeoutSeconds: 10 periodSeconds: 5 readinessProbe: httpGet: path: /actuator/health/readiness port: {{ .Values.service.targetPort | default 80 }} initialDelaySeconds: 20 failureThreshold: 15 timeoutSeconds: 10 periodSeconds: 5 resources: {{- toYaml .Values.resources | nindent 12 }} volumes: - name: app-logs emptyDir: {} - name: sw-agent emptyDir: {} - name: filebeat-{{.Release.Name}}-config configMap: name: filebeat-{{.Release.Name}}-config {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} ``` configmap.yaml ```yml apiVersion: v1 kind: ConfigMap metadata: name: filebeat-{{.Release.Name}}-config data: filebeat.yml: | filebeat.inputs: - type: log enabled: true paths: - "/log/*/log_info.log" #日志路径 - "/log/*/*/log_info.log" - "/log/*/*/*/log_info.log" tags: ["{{ .Release.Name }}"] multiline.pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}' multiline.negate: true multiline.match: after exclude_lines: ['.*com.alibaba.nacos.naming.client.listener.*'] output.elasticsearch: #配置日志输出到elasticsearch hosts: ["xxxxx.elasticsearch.com"] username: "elastic" password: "xxxxx" index: "{{ .Release.Name }}-%{+yyyy.MM.dd}" setup.ilm.enabled: false setup.template.name: "{{ .Release.Name }}" setup.template.pattern: "{{ .Release.Name }}-*" ``` value.yml ```yml env: #JAVA优化参数,限制pod的内存使用 JAVA_OPTS: -XX:MaxRAMFraction=2 replicaCount: 2 image: #默认镜像,部署的时候指定镜像替换默认镜像 repository: nginx pullPolicy: IfNotPresent imagePullPolicy: Always tag: "" image2: #filebeat镜像 name: filebeat repository: harbor.example.com/shudoon/filebeat pullPolicy: IfNotPresent imagePullPolicy: Always tag: "7.4.2" image3: #skywalking镜像 name: skywalking-agent-sidecar repository: harbor.example.com/shudoon/skywalking-agent-sidecar pullPolicy: IfNotPresent imagePullPolicy: Always tag: "8.7.0-fixbug-1" imagePullSecrets: [] nameOverride: "" fullnameOverride: "" serviceAccount: create: true annotations: {} name: "" podAnnotations: {} podSecurityContext: {} securityContext: {} service: #服务配置 type: ClusterIP port: 80 targetPort: 80 ingress: #ingress关闭 annotations: kubernetes.io/ingress.class: "nginx" ingress.kubernetes.io/ssl-redirect: "false" nginx.ingress.kubernetes.io/proxy-body-size: 100m nginx.ingress.kubernetes.io/proxy-connect-timeout: "600" nginx.ingress.kubernetes.io/proxy-read-timeout: "600" nginx.ingress.kubernetes.io/proxy-send-timeout: "600" enabled: false host: chart-example.local tls: [] resources: #资源配置 limits: cpu: 1000m memory: 2048Mi requests: cpu: 100m memory: 256Mi autoscaling: enabled: false minReplicas: 1 maxReplicas: 100 targetCPUUtilizationPercentage: 80 nodeSelector: {} tolerations: [] affinity: {} ``` Chart.yaml ```yaml apiVersion: v2 appVersion: 1.16.0 description: A Helm chart for Kubernetes name: springboot-demo type: application version: 0.2.24-filebeat-skywalking-javaheap ``` 打包&推送chart到仓库 ```sh $ helm package springboot-demo $ helm push /root/springboot-demo-filebeat/0.2.24-filebeat-skywalking-javaheap.tgz shudoon-local #指定仓库的名字 $ helm repo update ``` 6.1.2 创建helm 前端web服务chart ```sh $ helm create web-demo ``` deployment.yaml ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "springboot-demo.fullname" . }} labels: {{- include "springboot-demo.labels" . | nindent 4 }} spec: {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicaCount }} {{- end }} selector: matchLabels: {{- include "springboot-demo.selectorLabels" . | nindent 6 }} template: metadata: {{- with .Values.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "springboot-demo.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "springboot-demo.serviceAccountName" . }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} containers: - name: {{ .Chart.Name }} securityContext: {{- toYaml .Values.securityContext | nindent 12 }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http containerPort: {{ .Values.containerPort | default 80 }} protocol: TCP livenessProbe: httpGet: host: path: / port: {{ .Values.containerPort | default 80 }} initialDelaySeconds: 5 failureThreshold: 10 timeoutSeconds: 10 periodSeconds: 5 resources: {{- toYaml .Values.resources | nindent 12 }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} ``` Chart.yaml ```yaml apiVersion: v2 name: web-demo description: A Helm chart for Kubernetes type: application version: 0.2.0 appVersion: "1.16.0" ``` 打包&推送chart到仓库 ```sh $ helm package web-demo $ helm push /root/web-demo-0.2.0.tgz shudoon-local #指定仓库的名字 $ helm repo update ```
阅读 510 评论 0 收藏 0
阅读 510
评论 0
收藏 0

兜兜    2021-09-03 11:25:28    2022-01-25 09:21:13   

k8s
#### 准备工作 ```sh jenkins 安装pipeline相关插件 ``` #### 工作流程 ##### springboot微服务打包工作流程 ```sh 1.jenkins拉取gitlab代码 2.maven编译微服务打包生成jar包 3.docker build通过DockerFile文件把jar包打成镜像 4.docker push推送镜像到harbor仓库 5.helm部署会拉取harbor的服务镜像以及chart部署K8S集群 6.配置ingress暴露前端服务和API接口服务、网关服务 ``` pipeline配置 ```Groovy pipeline { agent any environment { REGISTRY_HOST = 'harbor.example.com' #私有仓库的域名 REGISTRY_USERNAME = 'shudoon' REGISTRY_PASSWORD = 'xxxxxxx' REGISTRY_NAMESPACE = 'shudoon' #私有仓库的命名空间 REGISTRY_SECRET = 'shudoon-harbor-secret' #k8s存储私有仓库的secret FILEBEAT_IMAGE="filebeat" #指定filebeat镜像名 SKYWALKING_IMAGE="skywalking-agent-sidecar" #指定skywalking镜像名 HELM_LOCAL_REPO = 'shudoon-local' #helm本地仓库名 HELM_TEMPLATE_NAME = 'springboot-demo' #helm的模板名 HELM_TEMPLATE_VERSION = '0.2.24-filebeat-skywalking-javaheap' #helm模板的版本 } parameters { string defaultValue: 'shudoon-data-service', description: '服务名', name: 'SERVICE_NAME', trim: false gitParameter branchFilter: '.*', defaultValue: 'master', name: 'branch_or_tag', type: 'PT_BRANCH_TAG' string defaultValue: '1', description: '实例数', name: 'REPLICAS', trim: false string defaultValue: '18004', description: '服务端口', name: 'SERVER_PORT', trim: false string defaultValue: 'test2', description: '环境', name: 'BUILD_ENV', trim: false string defaultValue: 'http://git.example.com/shudoon-cloud-new/shudoon-cloud-service/shudoon-data.git', description: 'git地址', name: 'GIT_URL', trim: false } stages { stage('拉取代码') { steps { sh 'pwd' checkout([$class: 'GitSCM', branches: [[name: '${branch_or_tag}']], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: '7ace3f3e-4ef7-4005-aa6c-ae5aef462fac', url: '${GIT_URL}']]]) //jenkins配置的用户账号密码的对应的credentialsId } } stage('获取版本'){ steps{ script{ TAG_BRANCH = sh(returnStdout: true, script: 'echo $branch_or_tag|awk -F/ \'{print $NF}\'').trim() GIT_COMMITID = sh(returnStdout: true, script: 'git rev-parse --short HEAD').trim() BUILD_IMAGE_TAG = "${TAG_BRANCH}-${GIT_COMMITID}-${BUILD_ID}" } } } stage('构建MAVEN项目') { steps { sh 'pwd' sh "echo ${TAG_BRANCH}-${GIT_COMMITID}" sh "echo $BUILD_IMAGE_TAG" sh '/usr/local/maven3.6.2/bin/mvn -f src/pom.xml clean install -Dmaven.test.skip=true -P${BUILD_ENV}' } } stage('docker构建镜像') { steps { sh "cd src/${SERVICE_NAME};pwd;docker build --build-arg server_port=${SERVER_PORT} -t ${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SERVICE_NAME}:${BUILD_IMAGE_TAG} ." } } stage('推送构建的镜像') { steps { sh "docker login -u ${REGISTRY_USERNAME} -p ${REGISTRY_PASSWORD} ${REGISTRY_HOST}" sh "docker push ${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SERVICE_NAME}:${BUILD_IMAGE_TAG}" sh "docker image rm ${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SERVICE_NAME}:${BUILD_IMAGE_TAG}" } } stage('部署镜像到k8s') { steps { sh "/usr/local/bin/helm upgrade -i --kubeconfig /root/.kube/config --set image.repository=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SERVICE_NAME},image.tag=${BUILD_IMAGE_TAG},image2.repository=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${FILEBEAT_IMAGE},image3.repository=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SKYWALKING_IMAGE},imagePullSecrets[0].name=${REGISTRY_SECRET},replicaCount=${REPLICAS},service.targetPort=${SERVER_PORT} --version ${HELM_TEMPLATE_VERSION} ${SERVICE_NAME} ${HELM_LOCAL_REPO}/${HELM_TEMPLATE_NAME}" } } } } ``` ##### 前端服务打包工作流程 ```sh 1.jenkins拉取gitlab代码 2.npm build构建前端代码生成dist静态文件 2.docker build通过DockerFile文件把dist静态文件包打成镜像 3.docker push推送镜像到harbor仓库 4.helm部署会拉取harbor的服务镜像以及chart部署K8S集群 5.配置ingress暴露前端服务和API接口服务、网关服务 ``` pipeline配置 ```Groovy pipeline { agent any environment { VUE_APP_BASE_API = 'https://tgw2.example.com/' #后台的API地址 REGISTRY_HOST = 'harbor.example.com' REGISTRY_USERNAME = 'shudoon' REGISTRY_PASSWORD = 'xxxxx' REGISTRY_NAMESPACE = 'shudoon' REGISTRY_SECRET = 'shudoon-harbor-secret' FILEBEAT_IMAGE="filebeat" SKYWALKING_IMAGE="skywalking-agent-sidecar" HELM_LOCAL_REPO = 'shudoon-local' HELM_TEMPLATE_NAME = 'web-demo' HELM_TEMPLATE_VERSION = '0.2.1-filebeat' } parameters { string defaultValue: 'dc-web', description: '服务名', name: 'SERVICE_NAME', trim: false gitParameter branchFilter: '.*', defaultValue: 'master', name: 'tag_branch', type: 'PT_BRANCH_TAG' string defaultValue: '1', description: '实例数', name: 'REPLICAS', trim: false string defaultValue: '80', description: '服务端口', name: 'SERVER_PORT', trim: false string defaultValue: 'http://git.example.com/webapp/report-pc.git', description: 'git地址', name: 'GIT_URL', trim: false } stages { stage('拉取代码') { steps { sh 'pwd' sh "echo $VUE_APP_BASE_API" checkout([$class: 'GitSCM', branches: [[name: '${tag_branch}']], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: '7ace3f3e-4ef7-4005-aa6c-ae5aef462fac', url: '${GIT_URL}']]]) } } stage('获取版本'){ steps{ script{ TAG_BRANCH = sh(returnStdout: true, script: 'echo $tag_branch|awk -F/ \'{print $NF}\'').trim() GIT_COMMITID = sh(returnStdout: true, script: 'git rev-parse --short HEAD').trim() BUILD_IMAGE_TAG = "${TAG_BRANCH}-${GIT_COMMITID}-${BUILD_ID}" } } } stage('构建前端node项目') { tools { nodejs 'nodejs_12.18.1' } steps { sh 'node --version' sh 'npm --version' sh 'npm config set registry https://registry.npm.taobao.org' sh 'npm config get registry' sh 'npm install --unsafe-perm' sh 'npm run build:stage' } } stage('构建镜像') { steps { script { def image = docker.build("${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SERVICE_NAME}:${BUILD_IMAGE_TAG}") } } } stage('推送构建的镜像') { steps { sh "docker login -u ${REGISTRY_USERNAME} -p ${REGISTRY_PASSWORD} ${REGISTRY_HOST}" sh "docker push ${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SERVICE_NAME}:${BUILD_IMAGE_TAG}" sh "docker image rm ${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SERVICE_NAME}:${BUILD_IMAGE_TAG}" } } stage('部署镜像到k8s') { steps { sh "/usr/local/bin/helm upgrade -i --set image.repository=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SERVICE_NAME},image.tag=${BUILD_IMAGE_TAG},image2.repository=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${FILEBEAT_IMAGE},image3.repository=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/${SKYWALKING_IMAGE},imagePullSecrets[0].name=${REGISTRY_SECRET},replicaCount=${REPLICAS},service.targetPort=${SERVER_PORT} --version ${HELM_TEMPLATE_VERSION} ${SERVICE_NAME} ${HELM_LOCAL_REPO}/${HELM_TEMPLATE_NAME}" } } } } ```
阅读 725 评论 0 收藏 0
阅读 725
评论 0
收藏 0

兜兜    2021-09-03 11:24:57    2022-01-25 09:20:30   

k8s
#### 一、helm安装skywalking ##### 下载skywalking-kubernetes ```sh $ git clone https://github.com/apache/skywalking-kubernetes.git ``` ##### 配置skywalking ```bash $ cd skywalking-kubernetes/chart $ vim skywalking/values-my-es.yaml ``` ```yaml oap: image: tag: 8.7.0-es7 # Set the right tag according to the existing Elasticsearch version storageType: elasticsearch7 ui: image: tag: 8.7.0 elasticsearch: enabled: false config: # For users of an existing elasticsearch cluster,takes effect when `elasticsearch.enabled` is false host: es-cn-xxxxx.elasticsearch.aliyuncs.com port: http: 9200 user: "elastic" # [optional] password: "xxxxxx" # [optional] ``` ##### helm安装 ```bash $ export SKYWALKING_RELEASE_NAME=skywalking $ export SKYWALKING_RELEASE_NAMESPACE=default $ export REPO=skywalking $ helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm $ helm repo update $ helm install "${SKYWALKING_RELEASE_NAME}" ${REPO}/skywalking -n "${SKYWALKING_RELEASE_NAMESPACE}" -f ./values-my-es.yaml ``` #### 二、微服务配置skywalking代理 制作skywalking-agent-sidecar镜像(`微服务的初始化容器`) 下载skywalking-agent ```bash $ cd /opt/skywalking $ wget https://dlcdn.apache.org/skywalking/8.7.0/apache-skywalking-apm-es7-8.7.0.tar.gz #es7表示elasticsearch 7 $ tar xvf apache-skywalking-apm-es7-8.7.0.tar.gz $ cd apache-skywalking-apm-bin-es7/agent $ /bin/cp -r optional-plugins/* plugins/ -f #注意:springboot gateway需要执行该操作,其他springboot微服务不需要 ``` 创建Dockerfile ```bash $ cat Dockerfile ``` ```bash FROM busybox:latest ENV LANG=C.UTF-8 RUN set -eux && mkdir -p /usr/skywalking/agent add apache-skywalking-apm-bin-es7/agent /usr/skywalking/agent WORKDIR / ``` 生成镜像 ```bash $ docker build . -t skywalking-agent-sidecar:8.7.0-fixbug-1 ``` 修改deployment.yaml ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "springboot-demo.fullname" . }} labels: {{- include "springboot-demo.labels" . | nindent 4 }} spec: {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicaCount }} {{- end }} selector: matchLabels: {{- include "springboot-demo.selectorLabels" . | nindent 6 }} template: metadata: {{- with .Values.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "springboot-demo.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "springboot-demo.serviceAccountName" . }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} initContainers: #增加初始化容器skywalking - name: {{ .Values.image3.name }} image: "{{ .Values.image3.repository }}:{{ .Values.image3.tag }}" imagePullPolicy: IfNotPresent command: ["sh"] args: [ "-c", "mkdir -p /skywalking/agent && cp -r /usr/skywalking/agent/* /skywalking/agent", ] volumeMounts: - mountPath: /skywalking/agent name: sw-agent containers: - name: {{ .Values.image2.name }} image: "{{ .Values.image2.repository }}:{{ .Values.image2.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} command: - "/bin/sh" args: - "-c" - "filebeat -c /etc/filebeat/filebeat.yml" volumeMounts: - name: app-logs mountPath: /log - name: filebeat-{{.Release.Name}}-config mountPath: /etc/filebeat/ - name: {{ .Chart.Name }} securityContext: {{- toYaml .Values.securityContext | nindent 12 }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - name: JAVA_TOOL_OPTIONS #微服务启动参数增加javaagent:skywalking-agent.jar value: -javaagent:/usr/skywalking/agent/skywalking-agent.jar - name: SW_AGENT_NAME #skywalking的名字 value: {{.Release.Name}} - name: SW_AGENT_COLLECTOR_BACKEND_SERVICES #skywalking后端服务的地址:q! value: skywalking-oap:11800 volumeMounts: - name: app-logs mountPath: /serverlog - name: sw-agent #初始化容器挂载的skywalking文件 mountPath: /usr/skywalking/agent ports: - name: http containerPort: {{ .Values.service.targetPort | default 80 }} protocol: TCP livenessProbe: httpGet: path: /actuator/health/liveness port: {{ .Values.service.targetPort | default 80 }} initialDelaySeconds: 20 failureThreshold: 15 timeoutSeconds: 10 periodSeconds: 5 readinessProbe: httpGet: path: /actuator/health/readiness port: {{ .Values.service.targetPort | default 80 }} initialDelaySeconds: 20 failureThreshold: 15 timeoutSeconds: 10 periodSeconds: 5 resources: {{- toYaml .Values.resources | nindent 12 }} volumes: - name: app-logs emptyDir: {} - name: sw-agent #skywalking目录 emptyDir: {} - name: filebeat-{{.Release.Name}}-config configMap: name: filebeat-{{.Release.Name}}-config {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} ``` value.yaml新增skywalking的配置 ```yaml ... image3: name: skywalking-agent-sidecar repository: xxxxx-k8s-registry-test-registry-vpc.cn-shenzhen.cr.aliyuncs.com/xxxxx/skywalking-agent-sidecar pullPolicy: IfNotPresent imagePullPolicy: Always tag: "8.7.0-fixbug-1" ... ```
阅读 480 评论 0 收藏 0
阅读 480
评论 0
收藏 0

第 1 页 / 共 2 页
 
第 1 页 / 共 2 页