K8s 安装:修订间差异
跳到导航
跳到搜索
(→Node) |
无编辑摘要 |
||
(未显示同一用户的18个中间版本) | |||
第65行: | 第65行: | ||
mkdir /etc/containerd | mkdir /etc/containerd | ||
containerd config default > /etc/containerd/config.toml | containerd config default > /etc/containerd/config.toml | ||
cd /etc/containerd/ | cd /etc/containerd/ | ||
cp config.toml config.toml.orig | cp config.toml config.toml.orig | ||
第72行: | 第72行: | ||
SystemdCgroup = true # false 修改为 true | SystemdCgroup = true # false 修改为 true | ||
[plugins."io.containerd.grpc.v1.cri"] | [plugins."io.containerd.grpc.v1.cri"] | ||
# sandbox_image = "registry.k8s.io/pause:3. | # sandbox_image = "registry.k8s.io/pause:3.9" | ||
sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3. | sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9" | ||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors] | [plugins."io.containerd.grpc.v1.cri".registry.mirrors] | ||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] | ||
第141行: | 第141行: | ||
===== kube-flannel ===== | ===== kube-flannel ===== | ||
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml | wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml | ||
修改 net-conf.json:Network 与 pod-network-cidr 保持一致。 | |||
"Network": "10.2.0.0/16", | |||
kubectl apply -f kube-flannel.yml | kubectl apply -f kube-flannel.yml | ||
==== Node ==== | ==== Node ==== | ||
kubectl get cs | |||
kubectl get node | |||
kubectl label node ${NODE} node-role.kubernetes.io/worker=worker | |||
kubectl label node ${NODE} node-role.kubernetes.io/master=master | |||
kubectl label node ${NODE} node-role.kubernetes.io/worker- | |||
==== Node Join ==== | |||
kubeadm join 192.168.0.249:6443 --token z1q425.gy4kgpp491c8nkq2 --discovery-token-ca-cert-hash sha256:0b02fa4069856afb9d17dba76527b7e7c630d799cc3c00c3cc36c8beaec0128c | |||
==== Node delete ==== | |||
kubectl drain ${NODE} --delete-local-data --force --ignore-daemonsets | |||
kubectl delete node ${NODE} | |||
==== GUI ==== | |||
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml | |||
kubectl apply -f recommended.yaml | |||
kubectl proxy --address=0.0.0.0 --port=18001 --accept-hosts='^*$' & | |||
http://192.168.0.158:18001/ | |||
http://192.168.0.158:18001/api | |||
http://192.168.0.158:18001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#/login | |||
===== Getting a Bearer Token for ServiceAccount ===== | |||
* [https://kubernetes.io/zh-cn/docs/tasks/access-application-cluster/web-ui-dashboard/ 部署和访问 Kubernetes 仪表板(Dashboard)] | |||
* [https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md Creating sample user] | |||
====== Creating a Service Account ====== | |||
# y1.yaml | |||
apiVersion: v1 | |||
kind: ServiceAccount | |||
metadata: | |||
name: admin-user | |||
namespace: kubernetes-dashboard | |||
====== Creating a ClusterRoleBinding ====== | |||
# y2.yaml | |||
apiVersion: rbac.authorization.k8s.io/v1 | |||
kind: ClusterRoleBinding | |||
metadata: | |||
name: admin-user | |||
roleRef: | |||
apiGroup: rbac.authorization.k8s.io | |||
kind: ClusterRole | |||
name: cluster-admin | |||
subjects: | |||
- kind: ServiceAccount | |||
name: admin-user | |||
namespace: kube-system | |||
====== Creating a token ====== | |||
kubectl -n kube-system create token admin-user | |||
===== Getting a long-lived Bearer Token for ServiceAccount ===== | |||
# y3.yaml | |||
apiVersion: v1 | |||
kind: Secret | |||
metadata: | |||
name: admin-user | |||
namespace: kube-system | |||
annotations: | |||
kubernetes.io/service-account.name: "admin-user" | |||
type: kubernetes.io/service-account-token | |||
kubectl get secret admin-user -n kube-system -o jsonpath={".data.token"} | base64 -d | |||
===== Remove the admin ServiceAccount and ClusterRoleBinding ===== | |||
kubectl -n kube-system delete serviceaccount admin-user | |||
kubectl -n kube-system delete clusterrolebinding admin-user | |||
=== kubelet Guide === | |||
==== Create token ==== | |||
# Expires 24h | |||
kubeadm token create --print-join-command | |||
==== kubectl info ==== | |||
NODE=node3 | |||
# ENV(master): scp /etc/kubernetes/admin.conf ${NODE}:/root/.kube/confi | |||
kubectl version --client | |||
kubectl version --client --output=yaml | |||
kubectl get apiservices | |||
kubectl get cs | |||
kubectl get node | |||
kubectl describe node | |||
kubectl describe node nginx-deployment-86dcfdf4c6-5d47c | |||
kubectl describe deployment nginx-deployment | |||
kubectl get pods | |||
kubectl get pods -l app=nginx | |||
kubectl get pods --all-namespaces | |||
==== 暴露服务 ==== | |||
kubectl get svc | |||
# kubectl apply -f redis.yaml | |||
# kubectl delete svc redis-svc | |||
===== ClusterIP(集群内) ===== | |||
apiVersion: v1 | |||
kind: Service | |||
metadata: | |||
name: redis-svc | |||
spec: | |||
selector: | |||
app: redis-pod | |||
type: ClusterIP | |||
ports: | |||
- protocol: TCP | |||
port: 6379 | |||
targetPort: 6379 | |||
===== NodePort(集群外) ===== | |||
kubectl apply -f redis.yaml | |||
apiVersion: v1 | |||
kind: Service | |||
metadata: | |||
name: redis-svc | |||
spec: | |||
selector: | |||
app: redis-pod | |||
type: NodePort | |||
ports: | |||
- protocol: TCP | |||
port: 6379 | |||
nodePort: 32379 | |||
targetPort: 6379 | |||
==== pods ==== | |||
# kubectl apply -f https://k8s.io/examples/application/deployment.yaml | |||
# wget https://k8s.io/examples/application/deployment.yaml | |||
kubectl apply -f deployment.yaml | |||
# kubectl delete deployment nginx-deployment | |||
# kubectl delete pods nginx-deployment-86dcfdf4c6-5d47c | |||
-.OR.- | |||
kubectl run nginx --image=nginx --port=8180 | |||
=== ctr === | |||
ctr version | |||
ctr namespaces ls | |||
ctr container list | |||
ctr images pull docker.io/library/nginx:alpine # pull 镜像文件 | |||
# 本地 https 仓库 | |||
ctr images pull -k 192.168.0.242:8000/tongrds-center:latest | |||
ctr images tag 192.168.0.242:8000/tongrds-center:latest tongrds-center | |||
ctr images delete 192.168.0.242:8000/tongrds-center:latest | |||
# ctr image import postgres.tar # import 本地镜像 | |||
ctr images list # | |||
ctr run -d docker.io/library/nginx:alpine nginx # run | |||
ctr container list | |||
=== crictl === | |||
# 单节点可见 | |||
crictl pull bitnami/milvus | |||
crictl image | |||
=== Error === | |||
==== node NotReady ==== | |||
kubectl apply -f calico.yaml | |||
kubectl apply -f kube-flannel.yml | |||
<small><small>dorisfe1 NotReady <none> 8s v1.28.2</small></small> | |||
==== registry.mirrors ==== | |||
即将删除的配置项 | |||
<small><small>WARN[0000] DEPRECATION: The `mirrors` property of `[plugins."io.containerd.grpc.v1.cri".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.0. Use `config_path` instead. </small></small> | |||
==== kubectl get node ==== | |||
节点需要 /etc/kubernetes/admin.conf | |||
<small><small>E0325 16:13:46.489435 14081 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused</small></small> | |||
==== crictl info ==== | |||
# /etc/crictl.yaml | |||
runtime-endpoint: unix:///run/containerd/containerd.sock | |||
image-endpoint: unix:///run/containerd/containerd.sock | |||
timeout: 2 | |||
debug: false | |||
pull-image-on-create: false | |||
disable-pull-on-run: false | |||
<small><small>E0326 10:15:02.860305 3920 remote_runtime.go:616] "Status from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory\""</small></small> | |||
[[分类:Develop]] | [[分类:Develop]] | ||
[[分类:Linux]] | [[分类:Linux]] | ||
[[分类:Kubernetes]] |
2024年7月4日 (四) 09:52的最新版本
环境准备
- 关闭 selinux 及 firewalld
- 关闭 Swap
host
192.168.0.158 np0 192.168.0.229 np1 192.168.0.249 np2 192.168.0.148 np3
设置网桥参数
cat << EOF > /etc/sysctl.d/99-kubernetes-cri.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 user.max_user_namespaces=28633 EOF sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf
配置支持 IPVS
加载 ip_vs 内核模块。kube-proxy 通过采用 iptables + ipset + ipvs 的方式实现为符合条件的 Pod 提供负载均衡。否则 kube-proxy 会退回到 iptables 模式。
cat > /etc/modules-load.d/ip_vs.conf << EOF ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack_ipv4 EOF
modprobe ip_vs modprobe ip_vs_rr modprobe ip_vs_wrr modprobe ip_vs_sh modprobe nf_conntrack_ipv4
导入模块
cat << EOF > /etc/modules-load.d/containerd.conf overlay br_netfilter EOF
modprobe overlay modprobe br_netfilter
lsmod | grep overlay lsmod | grep br_netfilter
部署 Containerd
创建容器工具
wget https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 install -m 755 runc.amd64 /usr/local/sbin/runc
容器间网络通信
wget https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz mkdir -p /opt/cni/bin tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.2.0.tgz
Containerd
wget https://github.com/containerd/containerd/releases/download/v1.7.14/containerd-1.7.14-linux-amd64.tar.gz tar Cxzvf /usr/local containerd-1.7.14-linux-amd64.tar.gz wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service -o /usr/lib/systemd/system/containerd.service systemctl daemon-reload && systemctl enable containerd
mkdir /etc/containerd containerd config default > /etc/containerd/config.toml cd /etc/containerd/ cp config.toml config.toml.orig vi config.toml [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] SystemdCgroup = true # false 修改为 true [plugins."io.containerd.grpc.v1.cri"] # sandbox_image = "registry.k8s.io/pause:3.9" sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9" [plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] endpoint = ["http://mirrors.ustc.edu.cn"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."*"] endpoint = ["http://hub-mirror.c.163.com"]
systemctl restart containerd netstat -nlput | grep containerd
kubernetes
repo
cat > /etc/yum.repos.d/kubernetes.repo << EOF [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF
kubelet kubeadm kubectl
# yum list kubelet --showduplicates yum install kubelet kubeadm kubectl systemctl enable kubelet systemctl status kubelet 此时状态不正常,等到 init 或 join 后,状态正常。
master
INIT
安装 calico 网络插件需要 pod-network-cidr
kubeadm init \ --image-repository registry.aliyuncs.com/google_containers \ --kubernetes-version=1.28.2 \ --apiserver-advertise-address=192.168.0.249 \ --service-cidr=10.1.0.0/16 \ --pod-network-cidr=10.2.0.0/16
- apiserver-advertise-address: master 主机 IP 地址
- service-cidr: 内部 service 使用 IP 范围,不可与 pod 及 master 重复
- pod-network-cidr: k8s pod 节点之间网络通信使用 IP 范围
Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config Alternatively, if you are the root user, you can run: export KUBECONFIG=/etc/kubernetes/admin.conf You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.0.249:6443 --token z1q425.gy4kgpp491c8nkq2 --discovery-token-ca-cert-hash sha256:0b02fa4069856afb9d17dba76527b7e7c630d799cc3c00c3cc36c8beaec0128c
calico
wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/calico.yaml kubectl apply -f calico.yaml
kube-flannel
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 修改 net-conf.json:Network 与 pod-network-cidr 保持一致。 "Network": "10.2.0.0/16",
kubectl apply -f kube-flannel.yml
Node
kubectl get cs kubectl get node
kubectl label node ${NODE} node-role.kubernetes.io/worker=worker kubectl label node ${NODE} node-role.kubernetes.io/master=master kubectl label node ${NODE} node-role.kubernetes.io/worker-
Node Join
kubeadm join 192.168.0.249:6443 --token z1q425.gy4kgpp491c8nkq2 --discovery-token-ca-cert-hash sha256:0b02fa4069856afb9d17dba76527b7e7c630d799cc3c00c3cc36c8beaec0128c
Node delete
kubectl drain ${NODE} --delete-local-data --force --ignore-daemonsets kubectl delete node ${NODE}
GUI
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml kubectl apply -f recommended.yaml kubectl proxy --address=0.0.0.0 --port=18001 --accept-hosts='^*$' &
http://192.168.0.158:18001/ http://192.168.0.158:18001/api http://192.168.0.158:18001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#/login
Getting a Bearer Token for ServiceAccount
Creating a Service Account
# y1.yaml apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kubernetes-dashboard
Creating a ClusterRoleBinding
# y2.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kube-system
Creating a token
kubectl -n kube-system create token admin-user
Getting a long-lived Bearer Token for ServiceAccount
# y3.yaml apiVersion: v1 kind: Secret metadata: name: admin-user namespace: kube-system annotations: kubernetes.io/service-account.name: "admin-user" type: kubernetes.io/service-account-token kubectl get secret admin-user -n kube-system -o jsonpath={".data.token"} | base64 -d
Remove the admin ServiceAccount and ClusterRoleBinding
kubectl -n kube-system delete serviceaccount admin-user kubectl -n kube-system delete clusterrolebinding admin-user
kubelet Guide
Create token
# Expires 24h kubeadm token create --print-join-command
kubectl info
NODE=node3 # ENV(master): scp /etc/kubernetes/admin.conf ${NODE}:/root/.kube/confi
kubectl version --client kubectl version --client --output=yaml kubectl get apiservices kubectl get cs kubectl get node kubectl describe node kubectl describe node nginx-deployment-86dcfdf4c6-5d47c kubectl describe deployment nginx-deployment kubectl get pods kubectl get pods -l app=nginx kubectl get pods --all-namespaces
暴露服务
kubectl get svc # kubectl apply -f redis.yaml # kubectl delete svc redis-svc
ClusterIP(集群内)
apiVersion: v1 kind: Service metadata: name: redis-svc spec: selector: app: redis-pod type: ClusterIP ports: - protocol: TCP port: 6379 targetPort: 6379
NodePort(集群外)
kubectl apply -f redis.yaml apiVersion: v1 kind: Service metadata: name: redis-svc spec: selector: app: redis-pod type: NodePort ports: - protocol: TCP port: 6379 nodePort: 32379 targetPort: 6379
pods
# kubectl apply -f https://k8s.io/examples/application/deployment.yaml # wget https://k8s.io/examples/application/deployment.yaml kubectl apply -f deployment.yaml # kubectl delete deployment nginx-deployment # kubectl delete pods nginx-deployment-86dcfdf4c6-5d47c -.OR.- kubectl run nginx --image=nginx --port=8180
ctr
ctr version ctr namespaces ls ctr container list ctr images pull docker.io/library/nginx:alpine # pull 镜像文件 # 本地 https 仓库 ctr images pull -k 192.168.0.242:8000/tongrds-center:latest ctr images tag 192.168.0.242:8000/tongrds-center:latest tongrds-center ctr images delete 192.168.0.242:8000/tongrds-center:latest # ctr image import postgres.tar # import 本地镜像 ctr images list # ctr run -d docker.io/library/nginx:alpine nginx # run ctr container list
crictl
# 单节点可见 crictl pull bitnami/milvus crictl image
Error
node NotReady
kubectl apply -f calico.yaml kubectl apply -f kube-flannel.yml
dorisfe1 NotReady <none> 8s v1.28.2
registry.mirrors
即将删除的配置项
WARN[0000] DEPRECATION: The `mirrors` property of `[plugins."io.containerd.grpc.v1.cri".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.0. Use `config_path` instead.
kubectl get node
节点需要 /etc/kubernetes/admin.conf
E0325 16:13:46.489435 14081 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused
crictl info
# /etc/crictl.yaml runtime-endpoint: unix:///run/containerd/containerd.sock image-endpoint: unix:///run/containerd/containerd.sock timeout: 2 debug: false pull-image-on-create: false disable-pull-on-run: false
E0326 10:15:02.860305 3920 remote_runtime.go:616] "Status from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory\""