首页 Ubuntu安装k8s
文章
取消

Ubuntu安装k8s

设置路由规则

(master节点)

master节点开放6443、2379、2380、10250 端口。

1
# iptables -A INPUT -p tcp -m multiport --dports 6443,2379,2380,10250 -j ACCEPT

加载内核模块

(所有节点)

本文后面要使用 flannel 插件,因此依赖 br_netfilter 内核模块

添加模块:

1
2
3
4
5
6
# tee /etc/modules-load.d/containerd.conf<<EOF
overlay
br_netfilter
EOF
# modprobe overlay
# modprobe br_netfilter

设置内核参数

(所有节点)

本文后面要使用 flannel 插件,因此 net.bridge.bridge-nf-call-ip6tablesnet.bridge.bridge-nf-call-iptables 需要打开。

master 节点、从节点修改内核参数:

1
2
3
4
5
tee /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

应用内核参数:

1
# sysctl --system

关闭 swap 分区

(所有节点)

重要,当内存不足时,linux会自动使用swap,将部分内存数据存放到磁盘中,这个这样会使性能下降,为了性能考虑推荐关闭。 如果不关闭,kubelet 不能正常工作。

master 节点、从节点修改:

临时关闭:

1
# swapoff -a

永久关闭:

1
# sed -ri 's/.*swap.*/#&/' /etc/fstab

验证结果

1
2
3
4
# free -m
               total        used        free      shared  buff/cache   available
Mem:            1968         230         697           3        1041        1569
Swap:              0           0           0

Swap 行已经都为0,修改成功

设置静态 IP(可选)

(所有节点)

由于笔者使用虚拟机,需要将虚拟机的 IP 固定下来。如果读者已经是静态 IP,可跳过,如果是从 DHCP 获取,建议设置成静态 IP。

master 节点、从节点,修改 /etc/netplan/00-installer-config.yaml 为:

1
2
3
4
5
6
7
8
9
10
11
12
# This is the network config written by 'subiquity'
network:
  ethernets:
    enp0s5:
      addresses:
      - 10.211.55.11/24 # 本机 IP 及子网掩码
      gateway4: 10.211.55.1 # 网关
      nameservers:
        addresses:
        - 114.114.114.114 # dns 服务器
        search: []
  version: 2

使修改生效:

# netplan apply

有需要的话,可以设置 /etc/hosts,使节点之间方便通过主机名通信:

1
2
3
10.211.55.11  master
10.211.55.12  node1
10.211.55.13  node2

设置时间同步(可选)

(所有节点)

k8s 节点间需要时间同步。

1
2
3
# apt-get install -y chrony
# systemctl start chronyd
# systemctl enable chronyd

安装 containerd

(所有节点)

安装:

1
2
3
4
# curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
# add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
# apt-get update
# apt-get install -y containerd.io

生成 containerd 配置文件:

1
# containerd config default > /etc/containerd/config.toml

修改 /etc/containerd/config.toml,如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2

[cgroup]
  path = ""

[debug]
  address = ""
  format = ""
  gid = 0
  level = ""
  uid = 0

[grpc]
  address = "/run/containerd/containerd.sock"
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216
  tcp_address = ""
  tcp_tls_ca = ""
  tcp_tls_cert = ""
  tcp_tls_key = ""
  uid = 0

[metrics]
  address = ""
  grpc_histogram = false

[plugins]

  [plugins."io.containerd.gc.v1.scheduler"]
    deletion_threshold = 0
    mutation_threshold = 100
    pause_threshold = 0.02
    schedule_delay = "0s"
    startup_delay = "100ms"

  [plugins."io.containerd.grpc.v1.cri"]
    device_ownership_from_security_context = false
    disable_apparmor = false
    disable_cgroup = false
    disable_hugetlb_controller = true
    disable_proc_mount = false
    disable_tcp_service = true
    enable_selinux = false
    enable_tls_streaming = false
    enable_unprivileged_icmp = false
    enable_unprivileged_ports = false
    ignore_image_defined_volumes = false
    max_concurrent_downloads = 3
    max_container_log_line_size = 16384
    netns_mounts_under_state_dir = false
    restrict_oom_score_adj = false
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"
    selinux_category_range = 1024
    stats_collect_period = 10
    stream_idle_timeout = "4h0m0s"
    stream_server_address = "127.0.0.1"
    stream_server_port = "0"
    systemd_cgroup = false
    tolerate_missing_hugetlb_controller = true
    unset_seccomp_profile = ""

    [plugins."io.containerd.grpc.v1.cri".cni]
      bin_dir = "/opt/cni/bin"
      conf_dir = "/etc/cni/net.d"
      conf_template = ""
      ip_pref = ""
      max_conf_num = 1

    [plugins."io.containerd.grpc.v1.cri".containerd]
      default_runtime_name = "runc"
      disable_snapshot_annotations = true
      discard_unpacked_layers = false
      ignore_rdt_not_enabled_errors = false
      no_pivot = false
      snapshotter = "overlayfs"

      [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
        base_runtime_spec = ""
        cni_conf_dir = ""
        cni_max_conf_num = 0
        container_annotations = []
        pod_annotations = []
        privileged_without_host_devices = false
        runtime_engine = ""
        runtime_path = ""
        runtime_root = ""
        runtime_type = ""

        [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]

      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]

        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
          base_runtime_spec = ""
          cni_conf_dir = ""
          cni_max_conf_num = 0
          container_annotations = []
          pod_annotations = []
          privileged_without_host_devices = false
          runtime_engine = ""
          runtime_path = ""
          runtime_root = ""
          runtime_type = "io.containerd.runc.v2"

          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
            BinaryName = ""
            CriuImagePath = ""
            CriuPath = ""
            CriuWorkPath = ""
            IoGid = 0
            IoUid = 0
            NoNewKeyring = false
            NoPivotRoot = false
            Root = ""
            ShimCgroup = ""
            SystemdCgroup = true  # 使用 true

      [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
        base_runtime_spec = ""
        cni_conf_dir = ""
        cni_max_conf_num = 0
        container_annotations = []
        pod_annotations = []
        privileged_without_host_devices = false
        runtime_engine = ""
        runtime_path = ""
        runtime_root = ""
        runtime_type = ""

        [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]

    [plugins."io.containerd.grpc.v1.cri".image_decryption]
      key_model = "node"

    [plugins."io.containerd.grpc.v1.cri".registry]
      config_path = ""

      [plugins."io.containerd.grpc.v1.cri".registry.auths]

      [plugins."io.containerd.grpc.v1.cri".registry.configs]

      [plugins."io.containerd.grpc.v1.cri".registry.headers]

      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."mirror.ccs.tencentyun.com"]
          endpoint = ["https://mirror.ccs.tencentyun.com"]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.cn-hangzhou.aliyuncs.com/google_containers"]
          endpoint = ["https://registry.cn-hangzhou.aliyuncs.com/google_containers"]

    [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
      tls_cert_file = ""
      tls_key_file = ""

  [plugins."io.containerd.internal.v1.opt"]
    path = "/opt/containerd"

  [plugins."io.containerd.internal.v1.restart"]
    interval = "10s"

  [plugins."io.containerd.internal.v1.tracing"]
    sampling_ratio = 1.0
    service_name = "containerd"

  [plugins."io.containerd.metadata.v1.bolt"]
    content_sharing_policy = "shared"

  [plugins."io.containerd.monitor.v1.cgroups"]
    no_prometheus = false

  [plugins."io.containerd.runtime.v1.linux"]
    no_shim = false
    runtime = "runc"
    runtime_root = ""
    shim = "containerd-shim"
    shim_debug = false

  [plugins."io.containerd.runtime.v2.task"]
    platforms = ["linux/amd64"]
    sched_core = false

  [plugins."io.containerd.service.v1.diff-service"]
    default = ["walking"]

  [plugins."io.containerd.service.v1.tasks-service"]
    rdt_config_file = ""

  [plugins."io.containerd.snapshotter.v1.aufs"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.btrfs"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.devmapper"]
    async_remove = false
    base_image_size = ""
    discard_blocks = false
    fs_options = ""
    fs_type = ""
    pool_name = ""
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.native"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.overlayfs"]
    root_path = ""
    upperdir_label = false

  [plugins."io.containerd.snapshotter.v1.zfs"]
    root_path = ""

  [plugins."io.containerd.tracing.processor.v1.otlp"]
    endpoint = ""
    insecure = false
    protocol = ""

[proxy_plugins]

[stream_processors]

  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
    accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
    path = "ctd-decoder"
    returns = "application/vnd.oci.image.layer.v1.tar"

  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
    accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
    path = "ctd-decoder"
    returns = "application/vnd.oci.image.layer.v1.tar+gzip"

[timeouts]
  "io.containerd.timeout.bolt.open" = "0s"
  "io.containerd.timeout.shim.cleanup" = "5s"
  "io.containerd.timeout.shim.load" = "5s"
  "io.containerd.timeout.shim.shutdown" = "3s"
  "io.containerd.timeout.task.state" = "2s"

[ttrpc]
  address = ""
  gid = 0
  uid = 0

重启 containerd:

1
# systemctl restart containerd

安装 kubeadm,kubectl,kubelet

(所有节点)

安装使用 k8s apt 仓库所需的包:

1
# apt-get update && apt-get install -y apt-transport-https ca-certificates curl

下载阿里云公开密钥签名:

1
# curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -

添加 k8s 镜像源:

1
2
3
# cat > /etc/apt/sources.list.d/kubernetes.list <<EOF
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF

安装 kubeadm,kubectl,kubelet,也可指定版本进行安装:

1
# apt-get update && apt-get install -y kubeadm kubelet kubectl

锁定 kubeadm,kubectl,kubelet 版本:

1
# apt-mark hold kubelet kubeadm kubectl

设置 kubelet 系统自启:

1
# systemctl enable kubelet

master 节点初始化 k8s

(master节点)

在 master 节点上生成 kubeadm 配置:

1
# kubeadm config print init-defaults > kubeadm-config.yaml

修改 kubeadm-config.yaml 为:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef   # 从节点要加入集群所需的凭证,可通过 kubeadm token create 获得
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.211.55.11 # master 本机IP
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: master
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.26.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podsubnet: 10.244.0.0/16
scheduler: {}
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd   # 设置为systemd

部署 master 节点:

1
# kubeadm init --config kubeadm-config.yaml

从节点加入集群

(从节点)

在从节点上:

1
# kubeadm join 10.211.55.11:6443 --token abcdef.0123456789abcdef  --discovery-token-ca-cert-hash sha256:a0d243e7799b6156b2bf206d8932a5e7cea8c01d95a13420e31482bbb7005fc7 --cri-socket /run/containerd/containerd.sock --node-name node1

其中: --token 是前面 kubeadm-config.yaml 中定义的 token; --discovery-token-ca-cert-hash 可以在 kubeadm init --config kubeadm-config.yaml 的输出中找到,也可通过以下命令获得:

1
2
# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
   openssl dgst -sha256 -hex | sed 's/^.* //'

--node-name 指定当前节点名称;

安装插件

(master节点)

master 节点上:

1
2
# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# kubectl apply -f ./kube-flannel.yml

稍后查看节点情况

1
2
3
4
5
# kubectl get nodes
NAME     STATUS   ROLES           AGE     VERSION
master   Ready    control-plane   5h46m   v1.26.1
node1    Ready    <none>          3h4m    v1.26.1
node2    Ready    <none>          3h6m    v1.26.1

各节点状态均为 Ready 即k8s安装完成。

本文由作者按照 CC BY 4.0 进行授权