k8s install

test-env

kubernetes

prepare

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19

yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp

# run as root on each node
swapoff -a
systemctl disable firewalld
systemctl stop firewalld
iptables -F
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf

modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
lsmod | grep ip_vs

cfsssl (node1)

1
2
3
4
5
6
7
8
9
mkdir -p /opt/local/cfssl
cd /opt/local/cfssl
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
mv cfssl_linux-amd64 cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
mv cfssljson_linux-amd64 cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 cfssl-certinfo
chmod +x *

create certs (node1)

  • create certs
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    mkdir /opt/ssl
    cd /opt/ssl
    cat <<EOF > config.json
    {
    "signing": {
    "default": {
    "expiry": "87600h"
    },
    "profiles": {
    "kubernetes": {
    "usages": [
    "signing",
    "key encipherment",
    "server auth",
    "client auth"
    ],
    "expiry": "87600h"
    }
    }
    }
    }
    EOF
    cat <<EOF > csr.json
    {
    "CN": "kubernetes",
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Chongqing",
    "L": "Chongqing",
    "O": "k8s",
    "OU": "System"
    }
    ]
    }
    EOF
    /opt/local/cfssl/cfssl gencert -initca csr.json | /opt/local/cfssl/cfssljson -bare ca

    mkdir -p /etc/kubernetes/ssl

    cp *.pem /etc/kubernetes/ssl
    cp ca.csr /etc/kubernetes/ssl

    ssh root@node2 "mkdir -p /etc/kubernetes/ssl/"
    ssh root@node3 "mkdir -p /etc/kubernetes/ssl/"

    scp *.pem *.csr root@node2:/etc/kubernetes/ssl/
    scp *.pem *.csr root@node3:/etc/kubernetes/ssl/

    # etcd
    cd /opt/ssl
    cat <<EOF > etcd-csr.json
    {
    "CN": "etcd",
    "hosts": [
    "127.0.0.1",
    "172.17.3.108",
    "172.17.3.140",
    "172.17.3.223",
    "172.17.3.224",
    "172.17.3.225",
    "172.17.3.226",
    "172.17.3.227",
    "172.17.3.228",
    "172.17.3.229",
    "172.17.3.230"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Chongqing",
    "L": "Chongqing",
    "O": "k8s",
    "OU": "System"
    }
    ]
    }
    EOF
    /opt/local/cfssl/cfssl gencert -ca=/opt/ssl/ca.pem \
    -ca-key=/opt/ssl/ca-key.pem \
    -config=/opt/ssl/config.json \
    -profile=kubernetes etcd-csr.json | /opt/local/cfssl/cfssljson -bare etcd

    cp etcd*.pem /etc/kubernetes/ssl/
    chmod 644 /etc/kubernetes/ssl/etcd-key.pem

    scp etcd*.pem root@node2:/etc/kubernetes/ssl/
    ssh root@node2 "chmod 644 /etc/kubernetes/ssl/etcd-key.pem"

    scp etcd*.pem root@node3:/etc/kubernetes/ssl/
    ssh root@node3 "chmod 644 /etc/kubernetes/ssl/etcd-key.pem"

    # admin
    cd /opt/ssl
    cat <<EOF > admin-csr.json
    {
    "CN": "admin",
    "hosts": [],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Chongqing",
    "L": "Chongqing",
    "O": "system:masters",
    "OU": "System"
    }
    ]
    }
    EOF

    /opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
    -ca-key=/etc/kubernetes/ssl/ca-key.pem \
    -config=/opt/ssl/config.json \
    -profile=kubernetes admin-csr.json | /opt/local/cfssl/cfssljson -bare admin

    cp admin*.pem /etc/kubernetes/ssl/
    scp admin*.pem root@node2:/etc/kubernetes/ssl/
    scp admin*.pem root@node3:/etc/kubernetes/ssl/

    # kuernetes 证书
    cd /opt/ssl
    cat <<EOF > kubernetes-csr.json
    {
    "CN": "kubernetes",
    "hosts": [
    "127.0.0.1",
    "172.17.3.108",
    "172.17.3.140",
    "172.17.3.223",
    "10.254.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Chongqing",
    "L": "Chongqing",
    "O": "k8s",
    "OU": "System"
    }
    ]
    }
    EOF

    ## 这里 hosts 字段中 三个 IP 分别为 127.0.0.1 本机, 172.17.3.108 和 172.17.3.140 为 Master 的IP,多个Master需要写多个。 10.254.0.1 为 kubernetes SVC 的 IP, 一般是 部署网络的第一个IP , 如: 10.254.0.1 , 在启动完成后,我们使用 kubectl get svc , 就可以查看到

    /opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
    -ca-key=/etc/kubernetes/ssl/ca-key.pem \
    -config=/opt/ssl/config.json \
    -profile=kubernetes kubernetes-csr.json | /opt/local/cfssl/cfssljson -bare kubernetes

    cp kubernetes*.pem /etc/kubernetes/ssl/

    scp kubernetes*.pem root@node2:/etc/kubernetes/ssl/

    scp kubernetes*.pem root@node3:/etc/kubernetes/ssl/

    # kube-proxy
    cd /opt/ssl
    cat <<EOF > kube-proxy-csr.json
    {
    "CN": "system:kube-proxy",
    "hosts": [],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Chongqing",
    "L": "Chongqing",
    "O": "k8s",
    "OU": "System"
    }
    ]
    }
    EOF

    /opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
    -ca-key=/etc/kubernetes/ssl/ca-key.pem \
    -config=/opt/ssl/config.json \
    -profile=kubernetes kube-proxy-csr.json | /opt/local/cfssl/cfssljson -bare kube-proxy

    cp kube-proxy* /etc/kubernetes/ssl/
    scp kube-proxy* root@node2:/etc/kubernetes/ssl/
    scp kube-proxy* root@node3:/etc/kubernetes/ssl/

docker (each)

1
yum -y install docker

etcd (each)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
mkdir -p /tmp/k8s-deps; cd /tmp/k8s-deps; wget https://github.com/coreos/etcd/releases/download/v3.3.8/etcd-v3.3.8-linux-amd64.tar.gz
ssh root@node2 "mkdir -p /tmp/k8s-deps; cd /tmp/k8s-deps; wget https://github.com/coreos/etcd/releases/download/v3.3.8/etcd-v3.3.8-linux-amd64.tar.gz"
ssh root@node3 "mkdir -p /tmp/k8s-deps; cd /tmp/k8s-deps; wget https://github.com/coreos/etcd/releases/download/v3.3.8/etcd-v3.3.8-linux-amd64.tar.gz"

# etcd
cd /tmp/k8s-deps; tar zxf etcd-v3.3.8-linux-amd64.tar.gz; cd etcd-v3.3.8-linux-amd64; mv etcd etcdctl /usr/bin/

ssh root@node2 "cd /tmp/k8s-deps; tar zxf etcd-v3.3.8-linux-amd64.tar.gz; cd etcd-v3.3.8-linux-amd64; mv etcd etcdctl /usr/bin/"
ssh root@node3 "cd /tmp/k8s-deps; tar zxf etcd-v3.3.8-linux-amd64.tar.gz; cd etcd-v3.3.8-linux-amd64; mv etcd etcdctl /usr/bin/"

# etcd-1
useradd etcd || true; mkdir -p /opt/etcd; chown -R etcd:etcd /opt/etcd
# etcd-2
ssh root@node2 "useradd etcd || true; mkdir -p /opt/etcd; chown -R etcd:etcd /opt/etcd"
# etcd-3
ssh root@node3 "useradd etcd || true; mkdir -p /opt/etcd; chown -R etcd:etcd /opt/etcd"

cat <<EOF > /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/opt/etcd/
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \\
--name=etcd1 \\
--cert-file=/etc/kubernetes/ssl/etcd.pem \\
--key-file=/etc/kubernetes/ssl/etcd-key.pem \\
--peer-cert-file=/etc/kubernetes/ssl/etcd.pem \\
--peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \\
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\
--initial-advertise-peer-urls=https://172.17.3.108:2380 \\
--listen-peer-urls=https://172.17.3.108:2380 \\
--listen-client-urls=https://172.17.3.108:2379,http://127.0.0.1:2379 \\
--advertise-client-urls=https://172.17.3.108:2379 \\
--initial-cluster-token=k8s-etcd-cluster \\
--initial-cluster=etcd1=https://172.17.3.108:2380,etcd2=https://172.17.3.140:2380,etcd3=https://172.17.3.223:2380 \\
--initial-cluster-state=new \\
--data-dir=/opt/etcd/
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

cd /tmp/k8s-deps
cat <<EOF > etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/opt/etcd/
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \\
--name=etcd2 \\
--cert-file=/etc/kubernetes/ssl/etcd.pem \\
--key-file=/etc/kubernetes/ssl/etcd-key.pem \\
--peer-cert-file=/etc/kubernetes/ssl/etcd.pem \\
--peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \\
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\
--initial-advertise-peer-urls=https://172.17.3.140:2380 \\
--listen-peer-urls=https://172.17.3.140:2380 \\
--listen-client-urls=https://172.17.3.140:2379,http://127.0.0.1:2379 \\
--advertise-client-urls=https://172.17.3.140:2379 \\
--initial-cluster-token=k8s-etcd-cluster \\
--initial-cluster=etcd1=https://172.17.3.108:2380,etcd2=https://172.17.3.140:2380,etcd3=https://172.17.3.223:2380 \\
--initial-cluster-state=new \\
--data-dir=/opt/etcd/
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
scp etcd.service root@node2:/etc/systemd/system/

cat <<EOF > etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/opt/etcd/
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \\
--name=etcd3 \\
--cert-file=/etc/kubernetes/ssl/etcd.pem \\
--key-file=/etc/kubernetes/ssl/etcd-key.pem \\
--peer-cert-file=/etc/kubernetes/ssl/etcd.pem \\
--peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \\
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\
--initial-advertise-peer-urls=https://172.17.3.223:2380 \\
--listen-peer-urls=https://172.17.3.223:2380 \\
--listen-client-urls=https://172.17.3.223:2379,http://127.0.0.1:2379 \\
--advertise-client-urls=https://172.17.3.223:2379 \\
--initial-cluster-token=k8s-etcd-cluster \\
--initial-cluster=etcd1=https://172.17.3.108:2380,etcd2=https://172.17.3.140:2380,etcd3=https://172.17.3.223:2380 \\
--initial-cluster-state=new \\
--data-dir=/opt/etcd/
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
scp etcd.service root@node3:/etc/systemd/system/

echo "systemctl daemon-reload; systemctl enable etcd; systemctl start etcd; systemctl status etcd" > /tmp/init-etcd.sh && cd /tmp && nohup sh init-etcd.sh 2>&1 &

ssh root@node2 "echo \"systemctl daemon-reload; systemctl enable etcd; systemctl start etcd; systemctl status etcd\" > /tmp/init-etcd.sh && cd /tmp && nohup sh init-etcd.sh 2>&1 &"

ssh root@node3 "echo \"systemctl daemon-reload; systemctl enable etcd; systemctl start etcd; systemctl status etcd\" > /tmp/init-etcd.sh && cd /tmp && nohup sh init-etcd.sh 2>&1 &"

test etcd

1
2
3
4
5
etcdctl --endpoints=https://172.17.3.108:2379,https://172.17.3.140:2379,https://172.17.3.223:2379\
--cert-file=/etc/kubernetes/ssl/etcd.pem \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--key-file=/etc/kubernetes/ssl/etcd-key.pem \
cluster-health

master and node

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
cd /tmp && rm -rf kubernetes
wget https://dl.k8s.io/v1.11.1/kubernetes-server-linux-amd64.tar.gz
tar -xzvf kubernetes-server-linux-amd64.tar.gz; cd kubernetes
scp server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} root@node1:/usr/local/bin/
scp server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} root@node2:/usr/local/bin/
scp server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} root@node3:/usr/local/bin/

# api-server
TOKEN=`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`
echo $TOKEN",kubelet-bootstrap,10001,\"system:bootstrappers\"" > /etc/kubernetes/token.csv
scp /etc/kubernetes/token.csv root@node2:/etc/kubernetes/
scp /etc/kubernetes/token.csv root@node3:/etc/kubernetes/

# 生成高级审核配置文件
cd /etc/kubernetes
cat <<EOF > audit-policy.yaml
# Log all requests at the Metadata level.
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
- level: Metadata
EOF
scp audit-policy.yaml root@node2:/etc/kubernetes/
scp audit-policy.yaml root@node3:/etc/kubernetes/

# kube-apiserver.service
cat <<EOF > /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \\
--admission-control=MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \\
--advertise-address=172.17.3.108 \\
--allow-privileged=true \\
--apiserver-count=3 \\
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/audit.log \\
--authorization-mode=Node,RBAC \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--enable-swagger-ui=true \\
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
--etcd-certfile=/etc/kubernetes/ssl/etcd.pem \\
--etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \\
--etcd-servers=https://172.17.3.108:2379,https://172.17.3.140:2379,https://172.17.3.223:2379 \\
--event-ttl=1h \\
--kubelet-https=true \\
--insecure-bind-address=127.0.0.1 \\
--insecure-port=8080 \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--service-cluster-ip-range=10.254.0.0/18 \\
--service-node-port-range=1-65535 \\
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\
--enable-bootstrap-token-auth \\
--token-auth-file=/etc/kubernetes/token.csv \\
--v=1
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# k8s 1.8 开始需要 添加 --authorization-mode=Node
# k8s 1.8 开始需要 添加 --admission-control=NodeRestriction
# k8s 1.8 开始需要 添加 --audit-policy-file=/etc/kubernetes/audit-policy.yaml

# 这里面要注意的是 --service-node-port-range=30000-32000
# 这个地方是 映射外部端口时 的端口范围,随机映射也在这个范围内映射,指定映射端口必须也在这个范围内。

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

## for node2
cat <<EOF > /tmp/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \\
--admission-control=MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \\
--advertise-address=172.17.3.140 \\
--allow-privileged=true \\
--apiserver-count=3 \\
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/audit.log \\
--authorization-mode=Node,RBAC \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--enable-swagger-ui=true \\
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
--etcd-certfile=/etc/kubernetes/ssl/etcd.pem \\
--etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \\
--etcd-servers=https://172.17.3.108:2379,https://172.17.3.140:2379,https://172.17.3.223:2379 \\
--event-ttl=1h \\
--kubelet-https=true \\
--insecure-bind-address=127.0.0.1 \\
--insecure-port=8080 \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--service-cluster-ip-range=10.254.0.0/18 \\
--service-node-port-range=1-65535 \\
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\
--enable-bootstrap-token-auth \\
--token-auth-file=/etc/kubernetes/token.csv \\
--v=1
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

EOF

scp /tmp/kube-apiserver.service root@node2:/etc/systemd/system/
ssh root@node2 "systemctl daemon-reload; systemctl enable kube-apiserver; systemctl start kube-apiserver; systemctl status kube-apiserver"

## for node3
cat <<EOF > /tmp/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \\
--admission-control=MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \\
--advertise-address=172.17.3.223 \\
--allow-privileged=true \\
--apiserver-count=3 \\
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/audit.log \\
--authorization-mode=Node,RBAC \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--enable-swagger-ui=true \\
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
--etcd-certfile=/etc/kubernetes/ssl/etcd.pem \\
--etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \\
--etcd-servers=https://172.17.3.108:2379,https://172.17.3.140:2379,https://172.17.3.223:2379 \\
--event-ttl=1h \\
--kubelet-https=true \\
--insecure-bind-address=127.0.0.1 \\
--insecure-port=8080 \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--service-cluster-ip-range=10.254.0.0/18 \\
--service-node-port-range=1-65535 \\
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\
--enable-bootstrap-token-auth \\
--token-auth-file=/etc/kubernetes/token.csv \\
--v=1
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

EOF

scp /tmp/kube-apiserver.service root@node3:/etc/systemd/system/
ssh root@node3 "systemctl daemon-reload; systemctl enable kube-apiserver; systemctl start kube-apiserver; systemctl status kube-apiserver"

# kube-controller-manager

cat <<EOF > /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--address=0.0.0.0 \\
--master=http://127.0.0.1:8080 \\
--allocate-node-cidrs=true \\
--service-cluster-ip-range=10.254.0.0/18 \\
--cluster-cidr=10.254.64.0/18 \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--feature-gates=RotateKubeletServerCertificate=true \\
--experimental-cluster-signing-duration=86700h0m0s \\
--cluster-name=kubernetes \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--leader-elect=true \\
--node-monitor-grace-period=40s \\
--node-monitor-period=5s \\
--pod-eviction-timeout=5m0s \\
--v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

scp /etc/systemd/system/kube-controller-manager.service root@node2:/etc/systemd/system/
ssh root@node2 "systemctl daemon-reload; systemctl enable kube-controller-manager; systemctl start kube-controller-manager; systemctl status kube-controller-manager"

# scheduler
cat <<EOF > /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--address=0.0.0.0 \\
--master=http://127.0.0.1:8080 \\
--leader-elect=true \\
--v=1
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler

scp /etc/systemd/system/kube-scheduler.service root@node2:/etc/systemd/system/
ssh root@node2 "systemctl daemon-reload; systemctl enable kube-scheduler; systemctl start kube-scheduler; systemctl status kube-scheduler"

kubectl get componentstatuses

ssh root@node2 "kubectl get componentstatuses"

# kubelet
cd
# 先创建认证请求, user 为 master 中 token.csv 文件里配置的用户
# 只需创建一次就可以
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

# 配置集群
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://172.17.3.108:6443 \
--kubeconfig=bootstrap.kubeconfig

# 配置客户端认证
kubectl config set-credentials kubelet-bootstrap \
--token=`cat /etc/kubernetes/token.csv | awk '{split($0,a,",");print a[1]}'` \
--kubeconfig=bootstrap.kubeconfig

# 配置关联
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig

# 配置默认关联
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷贝生成的 bootstrap.kubeconfig 文件
cp bootstrap.kubeconfig /etc/kubernetes/

scp bootstrap.kubeconfig root@node2:/etc/kubernetes/
scp bootstrap.kubeconfig root@node3:/etc/kubernetes/

# kubelet config
mkdir -p /var/lib/kubelet
ssh root@node2 "mkdir -p /var/lib/kubelet"
ssh root@node3 "mkdir -p /var/lib/kubelet"

cat <<EOF > /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \\
--cgroup-driver=cgroupfs \\
--hostname-override=node1 \\
--pod-infra-container-image=k8s.gcr.io/pause-amd64:3.1 \\
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \\
--feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--cert-dir=/etc/kubernetes/ssl \\
--cluster_dns=10.254.0.2 \\
--cluster_domain=cluster.local. \\
--hairpin-mode promiscuous-bridge \\
--allow-privileged=true \\
--fail-swap-on=false \\
--serialize-image-pulls=false \\
--logtostderr=true \\
--max-pods=512 \\
--runtime-cgroups=/systemd/system.slice \\
--kubelet-cgroups=/systemd/system.slice \\
--v=2

[Install]
WantedBy=multi-user.target

EOF

# 如上配置:
# node1 本机hostname
# 10.254.0.2 预分配的 dns 地址
# cluster.local. 为 kubernetes 集群的 domain
# k8s.gcr.io/pause-amd64:3.1 这个是 pod 的基础镜像

systemctl daemon-reload; systemctl enable kubelet; systemctl start kubelet; systemctl status kubelet

cat <<EOF > /tmp/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \\
--cgroup-driver=cgroupfs \\
--hostname-override=node2 \\
--pod-infra-container-image=k8s.gcr.io/pause-amd64:3.1 \\
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \\
--feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--cert-dir=/etc/kubernetes/ssl \\
--cluster_dns=10.254.0.2 \\
--cluster_domain=cluster.local. \\
--hairpin-mode promiscuous-bridge \\
--allow-privileged=true \\
--fail-swap-on=false \\
--serialize-image-pulls=false \\
--logtostderr=true \\
--max-pods=512 \\
--runtime-cgroups=/systemd/system.slice \\
--kubelet-cgroups=/systemd/system.slice \\
--v=2

[Install]
WantedBy=multi-user.target

EOF
scp /tmp/kubelet.service root@node2:/etc/systemd/system/
ssh root@node2 "systemctl daemon-reload; systemctl enable kubelet; systemctl start kubelet; systemctl status kubelet"

cat <<EOF > /tmp/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \\
--cgroup-driver=cgroupfs \\
--hostname-override=node3 \\
--pod-infra-container-image=k8s.gcr.io/pause-amd64:3.1 \\
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \\
--feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--cert-dir=/etc/kubernetes/ssl \\
--cluster_dns=10.254.0.2 \\
--cluster_domain=cluster.local. \\
--hairpin-mode promiscuous-bridge \\
--allow-privileged=true \\
--fail-swap-on=false \\
--serialize-image-pulls=false \\
--logtostderr=true \\
--max-pods=512 \\
--runtime-cgroups=/systemd/system.slice \\
--kubelet-cgroups=/systemd/system.slice \\
--v=2

[Install]
WantedBy=multi-user.target

EOF
scp /tmp/kubelet.service root@node3:/etc/systemd/system/
ssh root@node3 "systemctl daemon-reload; systemctl enable kubelet; systemctl start kubelet; systemctl status kubelet"

# find all csrs and approve them
kubectl get csr | grep -v NAME | awk '{print $1}' | xargs kubectl certificate approve

# kube-proxy
# 配置3台主节点, server都传的本地127.0.0.1
# 配置集群
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-proxy.kubeconfig

# 配置客户端认证
kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
--client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig

# 配置关联
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig

# 配置默认关联
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷贝到需要的 node 端里
scp kube-proxy.kubeconfig root@node1:/etc/kubernetes/
scp kube-proxy.kubeconfig root@node2:/etc/kubernetes/
scp kube-proxy.kubeconfig root@node3:/etc/kubernetes/

mkdir -p /var/lib/kube-proxy
ssh root@node2 "mkdir -p /var/lib/kube-proxy"
ssh root@node3 "mkdir -p /var/lib/kube-proxy"

cat <<EOF > /etc/systemd/system/kube-proxy.service

[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \\
--bind-address=172.17.3.108 \\
--hostname-override=node1 \\
--cluster-cidr=10.254.64.0/18 \\
--masquerade-all \\
--proxy-mode=ipvs \\
--ipvs-min-sync-period=5s \\
--ipvs-sync-period=5s \\
--ipvs-scheduler=rr \\
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \\
--logtostderr=true \\
--v=1
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

EOF

cat <<EOF > /tmp/kube-proxy.service

[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \\
--bind-address=172.17.3.140 \\
--hostname-override=node2 \\
--cluster-cidr=10.254.64.0/18 \\
--masquerade-all \\
--proxy-mode=ipvs \\
--ipvs-min-sync-period=5s \\
--ipvs-sync-period=5s \\
--ipvs-scheduler=rr \\
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \\
--logtostderr=true \\
--v=1
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

EOF

scp /tmp/kube-proxy.service root@node2:/etc/systemd/system/

cat <<EOF > /tmp/kube-proxy.service

[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \\
--bind-address=172.17.3.223 \\
--hostname-override=node3 \\
--cluster-cidr=10.254.64.0/18 \\
--masquerade-all \\
--proxy-mode=ipvs \\
--ipvs-min-sync-period=5s \\
--ipvs-sync-period=5s \\
--ipvs-scheduler=rr \\
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \\
--logtostderr=true \\
--v=1
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

EOF

scp /tmp/kube-proxy.service root@node3:/etc/systemd/system/

ssh root@node1 "mkdir -p /var/lib/kube-proxy"
ssh root@node2 "mkdir -p /var/lib/kube-proxy"
ssh root@node3 "mkdir -p /var/lib/kube-proxy"

ssh root@node1 "systemctl daemon-reload; systemctl enable kube-proxy; systemctl start kube-proxy; systemctl status kube-proxy"
ssh root@node2 "systemctl daemon-reload; systemctl enable kube-proxy; systemctl start kube-proxy; systemctl status kube-proxy"
ssh root@node3 "systemctl daemon-reload; systemctl enable kube-proxy; systemctl start kube-proxy; systemctl status kube-proxy"


apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 172.17.3.223
clientConnection:
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 10.254.64.0/18
healthzBindAddress: 172.17.3.223:10256
hostnameOverride: node3
kind: KubeProxyConfiguration
metricsBindAddress: 172.17.3.223:10249
mode: "ipvs"

Calico

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# 注意修改如下选项:

# etcd 地址

etcd_endpoints: "https://172.17.3.108:2379,https://172.17.3.140:2379,https://172.17.3.223:2379"

# etcd 证书路径
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"

# etcd 证书 base64 地址 (执行里面的命令生成的证书 base64 码,填入里面)

data:
etcd-key: (cat /etc/kubernetes/ssl/etcd-key.pem | base64 -w 0)
etcd-cert: (cat /etc/kubernetes/ssl/etcd.pem | base64 -w 0)
etcd-ca: (cat /etc/kubernetes/ssl/ca.pem | base64 -w 0)


# 修改 pods 分配的 IP 段

- name: CALICO_IPV4POOL_CIDR
value: "10.254.64.0/18"
  • vi /etc/systemd/system/kubelet.service
    –network-plugin=cni \
    1
    2
    3
    4
    # 重新加载配置
    systemctl daemon-reload
    systemctl restart kubelet.service
    systemctl status kubelet.service

安装 calicoctl

1
2
3
4
5
6
7
8
9
10
``` 

### flannel
+ vi /usr/lib/systemd/system/docker.service
```bash
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}

EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock $DOCKER_NETWORK_OPTIONS
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
etcdctl --endpoints=https://172.17.3.108:2379,https://172.17.3.140:2379,https://172.17.3.223:2379\
--cert-file=/etc/kubernetes/ssl/etcd.pem \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--key-file=/etc/kubernetes/ssl/etcd-key.pem \
set /flannel/network/config \ '{"Network":"10.254.64.0/18","SubnetLen":24,"Backend":{"Type":"vxlan"}}'

# check
etcdctl --endpoints=https://172.17.3.108:2379,https://172.17.3.140:2379,https://172.17.3.223:2379 \
--cert-file=/etc/kubernetes/ssl/etcd.pem \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--key-file=/etc/kubernetes/ssl/etcd-key.pem \
get /flannel/network/config
# list
etcdctl --endpoints=https://172.17.3.108:2379,https://172.17.3.140:2379,https://172.17.3.223:2379 \
--cert-file=/etc/kubernetes/ssl/etcd.pem \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--key-file=/etc/kubernetes/ssl/etcd-key.pem \
ls /flannel/network/subnets

rpm -ivh flannel-0.10.0-1.x86_64.rpm

cat <<EOF > /etc/sysconfig/flanneld
# Flanneld configuration options

# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="https://172.17.3.108:2379,https://172.17.3.140:2379,https://172.17.3.223:2379"

# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/flannel/network"

# Any additional options that you want to pass
#FLANNEL_OPTIONS=""

FLANNEL_OPTIONS="-ip-masq=true -etcd-cafile=/etc/kubernetes/ssl/ca.pem -etcd-certfile=/etc/kubernetes/ssl/etcd.pem -etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem -iface=eth0"
EOF

ingress

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/namespace.yaml
curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/default-backend.yaml
curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/configmap.yaml
curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/tcp-services-configmap.yaml
curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/udp-services-configmap.yaml
curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/rbac.yaml

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/with-rbac.yaml


kubectl delete -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/namespace.yaml
kubectl delete -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/default-backend.yaml
kubectl delete -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/configmap.yaml
kubectl delete -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/tcp-services-configmap.yaml
kubectl delete -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/udp-services-configmap.yaml

# 部署 Ingress RBAC 认证
kubectl delete -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/rbac.yaml

# 部署 Ingress Controller 组件
kubectl delete -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/with-rbac.yaml

ref

rook-ceph debug

api/v1/minial 500

1
2
3
4
5
6
7
ceph dashboard ac-role-create admin-no-iscsi

for scope in dashboard-settings log rgw prometheus grafana nfs-ganesha manager hosts rbd-image config-opt rbd-mirroring cephfs user osd pool monitor; do
ceph dashboard ac-role-add-scope-perms admin-no-iscsi ${scope} create delete read update;
done

ceph dashboard ac-user-set-roles admin admin-no-iscsi

#redis-cluster

kubectl -n redis exec -it redis-cluster-0 – redis-cli –cluster create –cluster-replicas 1
$(kubectl -n redis get pods -l app=redis-cluster -o jsonpath=’{range.items[*]}{.status.podIP}:6379 ‘)

Bull.im接口文档

接口地址

https://api.bull.im

接口统一header返回说明

header 说明 备注
x-session session值
x-csrf csft token

接口统一header请求说明

主要公共参数:

header 说明 备注
x-session 从header同名参数取 (可选)默认使用cookie
x-csrf 从header同名参数取 post请求必填,防止xss攻击
uid 用户id (可选)
Locale 区域(86) (可选)
TimeZone 时区(+8) (可选)
Lang 语言(zh-CN) (可选)
IP ip(127.0.0.1) (可选)
DevID uuid (可选)
Platform “Android”,”iOS”,”Web” 必选
DevOS windows,ios 必选
DevHW {“cpu”:4,”gpu”:””,”mem”:”2GB”} 必选

统一消息说明

错误消息说明:
success: http.statuscode == 200
failed: http.statuscode != 200

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
{
"code": xxxxx, //错误码
"message":xxxx //错误消息
}

{
1001: "verify captcha code fail", // 验证码错误
1002: "need captcha code", // 需要验证码
1003: "email/sms verifycode fail", // 邮件/sms验证码错误
1004: "email/sms verifycode timeout", // 邮件/sms验证码错误
1005: "update error", // 更新失败
1006: "update success", // 更新成功

1101: "user exist", // 该账号已经存在
1102: "user not found", // 账号不存在
1103: "user auth error", // 用户验证失败
1104: "login failed", // 登录失败
1105: "register failed", // 注册失败
1106: "verify salt failed", //验证salt
1107: "Username exists", //用户名存在
1108: "Email exists", //邮箱存在
1109: "Phone exists", //电话存在
1110: "Send sms/email failed", //发送验证码失败

1201: "2FA Has Enabled", // 2fa验证已经开启
1202: "2FA Verify Fail", // 2fa验证失败
1203: "2FA Bind Fail", // 2fa绑定失败
1204: "2FA Has Disabled", // 2fa未开启
1205: "2FA need", // 2fa需要验证
1206: "Verify user need", // 验证用户需要
1207: "Verify user failed", // 严重用户失败
}

所有post/put等接口都后续都会做频率验证码验证,一开始没有验证,但当触发后会返回1002错误码请求验证验证码。目前有几个接口默认有图片验证码

验证码参数:
参数名称 | 类型 | 是否必须 | 说明
:—: | :—: | :—: | :—
generate_id|string| true| 20位长度
generate_code|string| true| 4位长度

1.请求图片验证码id

  • 接口地址

GET /v1/captcha/generate

  • 请求参数: 无

  • 返回参数

1
2
3
4
5
6
7
{
"code": 200,
"data": {
"generate_id": "rbrQk24qJbykUDIw4kCr",
"expire": 3600
}
}

2.获取验证码图片

  • 接口地址

GET /v1/captcha/generate/${captcha_id}.png

  • 请求参数: 无

3.获取qrcode图片

  • 接口地址

GET /v1/qrcode?text=${otpauth}

  • 请求参数: 无

4.获取国家码

  • 接口地址

GET /v1/tools/areas

  • 请求参数: 无

5.发送注册验证码

  • 接口地址

POST /v1/register/start

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im

6.重新发送注册验证码

  • 接口地址

POST /v1/register/start/vcode

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im

7.注册账号

  • 接口地址

POST /v1/register

  • 请求参数: 无
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im
password string true md5(inputpassword+salt)
salt string true md5(code)
code string true len(6)

8.登陆前获取账户信息(salt)

  • 接口地址

GET /v1/login/start

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im
  • 返回数据:
1
2
3
{
"salt": "xxxxx"
}

9.账号密码登录/退出

  • 接口地址

POST /v1/login 登录
GET /v1/logout 退出

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im
password string true md5(inputpassword+salt)
salt string true
totp_code string falase 这是二次验证码需要以后开启后才有,登录时会提示输入验证码,那个时候再弹出二次验证输入框 加上这个字段后重新提交 可以参考big.one的二次验证弹窗

10.验证码登陆发送验证码

  • 接口地址

POST /v1/login/start/vcode

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im

11.验证码登陆

  • 接口地址

POST /v1/login/code

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im
code string true len(6)
totp_code string falase 这是二次验证码需要以后开启后才有,登录时会提示输入验证码,那个时候再弹出二次验证输入框 加上这个字段后重新提交 可以参考big.one的二次验证弹窗

12.找回密码前校验账号

  • 接口地址

POST /v1/forget/password/start

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im

13.找回密码重新发送验证码

  • 接口地址

POST /v1/forget/password/start/vcode

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im

14.找回密码重置密码

  • 接口地址

POST /v1/forget/password

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im
code string true len(6)
salt string true md5(code)
password string true md5(inputpassword+salt)

15.获取当前用户信息

  • 登录用户: true

  • 接口地址

GET /v1/account/

  • 请求参数: 无

16.登录用户发送验证码

  • 登录用户: true

  • 接口地址

POST /v1/account/vcode

  • 请求参数:
参数名称 类型 要求 说明
type string true 目前只有2个值email/phone
这个主要是登录用户修改一些东西需要验证码时发送验证码 发送验证码前tab让他选择验证方式 返回的用户信息里有phone_status,email_status表示对应可用状态。一般用什么方式注册默认这个状态就是true

17.通过密码修改密码

  • 登录用户: true

  • 接口地址

POST /v1/account/reset/password

  • 请求参数:
参数名称 类型 要求 说明
salt string true md5(code)这是新的salt
new_salt string true md5(code)这是新的salt
password string true md5(inputmassword+salt)
new_password string true md5(inputmassword+newsalt)
generate_id string false 20位长度(后期根据请求频率出现,默认不需要)
generate_code string false 6位长度(后期根据请求频率出现,默认不需要)

18.通过发送验证码修改密码

  • 登录用户: true

  • 接口地址

POST /v1/account/reset/password/code

  • 请求参数:
参数名称 类型 要求 说明
type string true 目前只有2个值email/phone,前面登录用户用什么方式发送的验证码这里就填什么
code string true len(6)
salt string true md5(code)
password string true md5(inputpassword+salt)

19.二次验证totp 获取qrcodeUrl&&secret

  • 登录用户: true

  • 接口地址

GET /v1/acccount/totp/start

  • 请求参数: 无
  • 返回数据:
1
2
3
4
5
6
7
{
"code": 200,
"data": {
"otpauth": "otpauth%3A%2F%2Ftotp%2FBULL.im%3A8615102366689%3Fissuer%3DBULL.im%26secret%3DHLBUICZJTZBPSBIK",
"secret": "HLBUICZJTZBPSBIK"
}
}

optauth这个放到前面生成二维码的参数text 显示绑定二次验证的二维码
secret是密钥 用户自己保存好

20.二次验证开启

  • 登录用户: true

  • 接口地址

POST /v1/account/totp/enable

  • 请求参数:
参数名称 类型 要求 说明
code string true len(6)

21.二次验证关闭

  • 登录用户: true

  • 接口地址:

POST /v1/accoount/totp/disable

  • 请求参数:
参数名称 类型 要求 说明
code string true len(6)

22.更新用户通用信息

  • 登录用户: true

  • 接口地址:

PUT /v1/accoount

  • 请求参数:
参数名称 类型 要求 说明
name string false 用户昵称
nation string false 国家地区
timezone string false 时区
avatar string false 头像图片地址
lang string false 语言

需要更新什么对应传对应的字段

23.登录用户修改邮箱和电话发送验证码后的验证码验证

  • 登录用户: true

  • 接口地址:

POST /v1/accoount/verify/code

  • 请求参数:
参数名称 类型 要求 说明
type string true 目前只有2个值email/phone,前面登录用户用什么方式发送的验证码这里就填什么
code string true len(6)

24.登录用户修改邮箱

  • 登录用户: true

  • 接口地址:

PUT /v1/accoount/email

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im
code string true len(6)

注意这里的账号验证码是新的 修改前的验证用前面的/account/vcode发送验证码和/account/verify/code验证验证码后才能在这里提交新账号修改

24.登录用户修改电话号码

  • 登录用户: true

  • 接口地址:

PUT /v1/accoount/phone

  • 请求参数:
参数名称 类型 要求 说明
account string true phone: +86|15102366688 email: admin@bull.im
code string true len(6)

注意这里的账号验证码是新的 修改前的验证用前面的/account/vcode发送验证码和/account/verify/code验证验证码后才能在这里提交新账号修改

25.登录用户获取上传头像token

  • 登录用户: true

  • 接口地址:

GET /v1/accoount/upload/token

  • 请求参数: 无

Redis Cluster

Redis cluster

A redis cluster running in Kubernetes.

:warning: Note: this repository is no longer actively maintained. While it served as a nice example to run Redis Cluster in Kubernetes when I wrote it, there are currently more stable solutions to spin up a cluster. I recommend looking at community-built Kubernetes Operators for Redis, or an actively maintained Helm chart.

If the cluster configuration of a redis node is lost in some way, it will come back with a different ID, which upsets the balance in the cluster (and probably in the Force). To prevent this, the setup uses a combination of Kubernetes StatefulSets and PersistentVolumeClaims to make sure the state of the cluster is maintained after rescheduling or failures.

Setup

1
kubectl apply -f redis-cluster.yml

This will spin up 6 redis-cluster pods one by one, which may take a while. After all pods are in a running state, you can itialize the cluster using the redis-cli in any of the pods. After the initialization, you will end up with 3 master and 3 slave nodes.

1
2
kubectl exec -it redis-cluster-0 -- redis-cli --cluster create --cluster-replicas 1 \
$(kubectl get pods -l app=redis-cluster -o jsonpath='{range.items[*]}{.status.podIP}:6379 ')

Adding nodes

Adding nodes to the cluster involves a few manual steps. First, let’s add two nodes:

1
kubectl scale statefulset redis-cluster --replicas=8

Have the first new node join the cluster as master:

1
2
3
kubectl exec redis-cluster-0 -- redis-cli --cluster add-node \
$(kubectl get pod redis-cluster-6 -o jsonpath='{.status.podIP}'):6379 \
$(kubectl get pod redis-cluster-0 -o jsonpath='{.status.podIP}'):6379

The second new node should join the cluster as slave. This will automatically bind to the master with the least slaves (in this case, redis-cluster-6)

1
2
3
kubectl exec redis-cluster-0 -- redis-cli --cluster add-node --cluster-slave \
$(kubectl get pod redis-cluster-7 -o jsonpath='{.status.podIP}'):6379 \
$(kubectl get pod redis-cluster-0 -o jsonpath='{.status.podIP}'):6379

Finally, automatically rebalance the masters:

1
2
kubectl exec redis-cluster-0 -- redis-cli --cluster rebalance --cluster-use-empty-masters \
$(kubectl get pod redis-cluster-0 -o jsonpath='{.status.podIP}'):6379

Removing nodes

Removing slaves

Slaves can be deleted safely. First, let’s get the id of the slave:

1
2
$ kubectl exec redis-cluster-7 -- redis-cli cluster nodes | grep myself
3f7cbc0a7e0720e37fcb63a81dc6e2bf738c3acf 172.17.0.11:6379 myself,slave 32f250e02451352e561919674b8b705aef4dbdc6 0 0 0 connected

Then delete it:

1
2
3
kubectl exec redis-cluster-0 -- redis-cli --cluster del-node \
$(kubectl get pod redis-cluster-0 -o jsonpath='{.status.podIP}'):6379 \
3f7cbc0a7e0720e37fcb63a81dc6e2bf738c3acf

Removing a master

To remove master nodes from the cluster, we first have to move the slots used by them to the rest of the cluster, to avoid data loss.

First, take note of the id of the master node we are removing:

1
2
$ kubectl exec redis-cluster-6 -- redis-cli cluster nodes | grep myself
27259a4ae75c616bbde2f8e8c6dfab2c173f2a1d 172.17.0.10:6379 myself,master - 0 0 9 connected 0-1364 5461-6826 10923-12287

Also note the id of any other master node:

1
2
3
4
$ kubectl exec redis-cluster-6 -- redis-cli cluster nodes | grep master | grep -v myself
32f250e02451352e561919674b8b705aef4dbdc6 172.17.0.4:6379 master - 0 1495120400893 2 connected 6827-10922
2a42aec405aca15ec94a2470eadf1fbdd18e56c9 172.17.0.6:6379 master - 0 1495120398342 8 connected 12288-16383
0990136c9a9d2e48ac7b36cfadcd9dbe657b2a72 172.17.0.2:6379 master - 0 1495120401395 1 connected 1365-5460

Then, use the reshard command to move all slots from redis-cluster-6:

1
2
3
4
5
kubectl exec redis-cluster-0 -- redis-cli --cluster reshard --cluster-yes \
--cluster-from 27259a4ae75c616bbde2f8e8c6dfab2c173f2a1d \
--cluster-to 32f250e02451352e561919674b8b705aef4dbdc6 \
--cluster-slots 16384 \
$(kubectl get pod redis-cluster-0 -o jsonpath='{.status.podIP}'):6379

After resharding, it is safe to delete the redis-cluster-6 master node:

1
2
3
kubectl exec redis-cluster-0 -- redis-cli --cluster del-node \
$(kubectl get pod redis-cluster-0 -o jsonpath='{.status.podIP}'):6379 \
27259a4ae75c616bbde2f8e8c6dfab2c173f2a1d

Finally, we can rebalance the remaining masters to evenly distribute slots:

1
2
kubectl exec redis-cluster-0 -- redis-cli --cluster rebalance --cluster-use-empty-masters \
$(kubectl get pod redis-cluster-0 -o jsonpath='{.status.podIP}'):6379

Scaling down

After the master has been resharded and both nodes are removed from the cluster, it is safe to scale down the statefulset:

1
kubectl scale statefulset redis-cluster --replicas=6

Cleaning up

``` bash
kubectl delete statefulset,svc,configmap,pvc -l app=redis-cluster

AES RSA api接口数据加密

解决方案
需要联合使用对称加密AES与非对称加密RSA

每次调用客户端随机产生一个aes密码,并把调用明文加密成调用密文,然后将aes密码用rsa公玥加密成密码密文后连同调用调用密文一起传给后端,后端使用rsa私玥对密码密文进行解密aes密码,然后aes密码解密调用密文获得调用明文。然后进行业务处理。后端在返回响应数据时,使用刚才的aes密码,将响应明文加密成响应密文,传递给客户端。客户端收到响应密文后,使用aes密码解密得到响应明文。

20180227173359994_biliyun

1
2
明文数据使用 AES 对称加密算法进行加密, AES 密钥使用 RSA 进行加密传输并对数据进行数据签名
加密结果密文数据结构: [数字签名(AES密钥密文+明文数据密文)][AES密钥密文][明文密文]

参考
使用AES ECB PKCS5Padding+RSA对接口进行签名及加密的go代码实现
使用 AES 和 RSA 对数据进行加密和解密
RSA总结

aes cfb参考nodejs

aes gcm cbc cfb等5种模式简介