生产环境的Kubernetes搭建

需要支持systemd的系统

# 对应apiserver版本的可执行命令: https://www.downloadkubernetes.com/
# 下载慢的话使用这个项目进行下载: https://github.com/jcleng/downloadkubernetes
echo -n https://dl.k8s.io/v1.26.3/bin/linux/amd64/apiextensions-apiserver \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kube-aggregator \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kube-apiserver \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kube-controller-manager \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kube-log-runner \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kube-proxy \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kube-scheduler \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kubeadm \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kubectl \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kubectl-convert \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/kubelet \
https://dl.k8s.io/v1.26.3/bin/linux/amd64/mounter|xargs -d " " -n1 wget
# 查看/sys/fs/cgroup是否开启
ls /sys/fs/cgroup
# blkio  cpu  cpuacct  cpu,cpuacct  cpuset  devices  freezer  hugetlb  memory  net_cls  net_cls,net_prio  net_prio  perf_event  pids  systemd

# https://github.com/containerd/containerd/blob/main/docs/getting-started.md
# 运行一个containerd
nix-shell -p containerd
containerd config default > ./containerd.toml
# 修改是,配置 systemd cgroup 驱动, v1根据
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
  SystemdCgroup = true
# 重载沙箱(pause)镜像, 版本来自下面的images list
[plugins."io.containerd.grpc.v1.cri"]
  # sandbox_image = "docker.io/dyrnq/pause:3.9"
  sandbox_image = "dockerproxy.com/dyrnq/pause:3.9"
# containerd.toml配置的cni二进制文件需要下载解压到对应目录 https://github.com/containernetworking/plugins/releases
[plugins."io.containerd.grpc.v1.cri".cni]
  bin_dir = "/opt/cni/bin"

sudo containerd --config ./containerd.toml --log-level info
# 测试
# nerdctl工具: https://github.com/containerd/nerdctl/releases
export CONTAINERD_NAMESPACE=k8s.io
export CONTAINERD_ADDRESS=unix:///run/containerd/containerd.sock
# 可以修改[grpc]address的地址位置
# export CONTAINERD_ADDRESS=unix:///tmp/containerd.sock

nerdctl images
# bash只能补全
echo 'source <(kubectl completion bash)'>>~/.bashrc
# 使用 kubeadm 初始化集群: https://kubernetes.io/zh-cn/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#%E5%87%86%E5%A4%87%E5%BC%80%E5%A7%8B
# 在此之前请部署好容器运行时
# 这里直接通过配置文件来初始化
# 查看配置的默认值,分别对应init和join命令
kubeadm config print init-defaults>init.yaml
kubeadm config print join-defaults>join.yaml

# 配置api说明: https://kubernetes.io/zh-cn/docs/reference/config-api/kubeadm-config.v1beta3/
# 配置完成之后可以先拉镜像,根据配置文件的imageRepository镜像地址,可以配置为, 修改之后list的镜像地址就会全部变更
imageRepository: docker.io/dyrnq
                 dockerproxy.com/dyrnq
# 不检查swap
failSwapOn: false
# api地址, 可以访问的地址
advertiseAddress: 192.168.122.252
# 节点名称,主机名称是node才能加入, 节点名称修改,certificatesDir证书(/var/lib/kubelet/config.yaml /etc/kubernetes/pki/ca.crt)也要重新生成
# 需要和主机名称一致
nodeRegistration:
  name: node

kubeadm config images list --config=init.yaml
# 由 registry.k8s.io 修改为 docker.io/dyrnq
# docker.io/dyrnq/kube-apiserver:v1.26.0
# docker.io/dyrnq/kube-controller-manager:v1.26.0
# docker.io/dyrnq/kube-scheduler:v1.26.0
# docker.io/dyrnq/kube-proxy:v1.26.0
# docker.io/dyrnq/pause:3.9
# docker.io/dyrnq/etcd:3.5.6-0
# docker.io/dyrnq/coredns:v1.9.3

kubeadm config images pull --config=init.yaml
# 手动下载: https://hub.docker.com/u/dyrnq

nerdctl -n k8s.io images
kubeadm reset # 如果是二次安装删除原来的配置
# 初始化, 会提示哪步错误,按照需求修改调整, 提示失败也没有关系, 配置文件生成成功即可
kubeadm init --config=init.yaml --ignore-preflight-errors=FileContent--proc-sys-net-bridge-bridge-nf-call-iptables --ignore-preflight-errors=SystemVerification
# kubeadm join --config=join.yaml

# 常见错误:
# crictl is required by the container runtime: executable file not found in $PATH
# crictl工具: https://github.com/kubernetes-sigs/cri-tools/releases
nix-shell -p cri-tools

# conntrack not found in system path
# 源码 https://www.netfilter.org/projects/conntrack-tools/downloads.html
nix-shell -p conntrack-tools
# iptables not found in system path
nix-shell -p iptables
#
echo "1">/proc/sys/net/ipv4/ip_forward

# 查看容器状态
export CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock
crictl ps -a
# CONTAINER           IMAGE               CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
# f0ec1c04df6ac       5d7c5dfd3ba18       8 minutes ago       Running             kube-controller-manager   0                   c493a2d49780d       kube-controller-manager-ubu
# fde9a16c1047c       a31e1d84401e6       8 minutes ago       Running             kube-apiserver            0                   6d00d3bdcedba       kube-apiserver-ubu
# 7fb5966615daa       dafd8ad70b156       8 minutes ago       Running             kube-scheduler            0                   5d14ec9c34ca5       kube-scheduler-ubu
# 04c6df48eb275       fce326961ae2d       8 minutes ago       Running             etcd                      0                   6f5e16e1a9441       etcd-ubu

crictl stop 49f4315e5cf58
crictl rm 49f4315e5cf58


# 生成的文件, 重新生成需要整个删除
/etc/kubernetes/
/etc/kubernetes/admin.conf # .kube/config
/etc/kubernetes/manifests/kube-apiserver.yaml
/var/lib/kubelet/config.yaml
# 手动启动服务
swapoff -a # 或者启动增加 --fail-swap-on=false
# 修改主机名称
vim /etc/hostname
hostname node

# 启动
kubelet --config=/var/lib/kubelet/config.yaml \
--kubeconfig=/etc/kubernetes/kubelet.conf \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock

# kubectl 访问, 这一步很重要, 因为配置文件的 server: https://10.0.2.15:6443 而不是默认的8080
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
kubectl get all --all-namespaces
# "Attempting to register node" node="ubu"
kubectl get node
# NAME   STATUS   ROLES    AGE   VERSION
# node   Ready    <none>   11m   v1.26.3
#

# 网络(需要kube-proxy运行) https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
kubectl get all -n kube-flannel
# open /run/flannel/subnet.env: no such file or directory
# https://github.com/kubernetes/kubernetes/issues/70202#issuecomment-481173403
# pod报错, 创建/run/flannel/subnet.env文件,写入:
FLANNEL_NETWORK=10.244.0.0/16
FLANNEL_SUBNET=10.244.0.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true


# 用helm初始化一个test001服务
# https://github.com/helm/helm/releases
wget https://get.helm.sh/helm-v3.11.2-linux-amd64.tar.gz
# 测试访问: http://127.0.0.1:8080/
kubectl port-forward --address 0.0.0.0 service/test001 8080:80


# 节点加入
kubeadm token create

# 节点状态, 查看 STATUS NotReady 详细数据
kubectl describe node node
推荐traefik

# jcleng.gitee.io/minikube搭建k8s测试环境.md 配置ingress-nginx

# 问题排除 describe/logs
# 可以手动将文件对应的镜像进行修改后, 手动拉取

# helm安装好点
nerdctl -n k8s.io pull dyrnq/ingress-nginx-controller:v1.9.0-beta.0
nerdctl -n k8s.io pull dyrnq/kube-webhook-certgen:v20230407

# 如果 EXTERNAL-IP:PORT 无法访问, 使用服务的 Endpoints (Endpoints表示一个Service对应的所有Pod副本的访问地址) 排查pod是否
kubectl describe service traefik-dashboard-service
kubectl get endpoints
# CLUSTER-IP 用于集群内部pod通信, pod/node内是可以通信的,
  • containerd.tomlconf_dir 配置文件 /etc/cni/net.d/87-podman-bridge.conflist

{
    "cniVersion": "0.4.0",
    "name": "podman",
    "plugins": [
        {
            "type": "bridge",
            "bridge": "cni-podman0",
            "isGateway": true,
            "ipMasq": true,
            "ipam": {
                "type": "host-local",
                "routes": [
                    {
                        "dst": "0.0.0.0/0"
                    }
                ],
                "ranges": [
                    [
                        {
                            "subnet": "10.88.0.0/16",
                            "gateway": "10.88.0.1"
                        }
                    ]
                ]
            }
        },
        {
            "type": "portmap",
            "capabilities": {
                "portMappings": true
            }
        },
        {
            "type": "firewall"
        }
    ]
}