Tag Archives: etcd

[How to Modify] etcd-server-8-12: ERROR (spawn error)

My problem is here

 vi etcd-server-startup.sh

#This is wrong

[program:etcd-server-7-12]
command=/opt/etcd/etcd-server-startup.sh              ; the program (relative uses PATH, can take args)
numprocs=1                                            ; number of processes copies to start (def 1)
directory=/opt/etcd                                   ; directory to cwd to before exec (def no cwd)
autostart=true                                        ; start at supervisord start (default: true)
autorestart=true                                      ; retstart at unexpected quit (default: true)
startsecs=30                                          ; number of secs prog must stay running (def. 1)
startretries=3                                        ; max # of serial start failures (default 3)
exitcodes=0,2                                         ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                       ; signal used to kill process (default TERM)
stopwaitsecs=10                                       ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                             ; setuid to this UNIX account to run the program
redirect_stderr=true                                  ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                          ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=5                              ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                           ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                           ; emit events on stdout writes (default false)


#Right
```bash
#!/bin/sh
./etcd --name etcd-server-8-12 \
    --data-dir /data/etcd/etcd-server \
    --listen-peer-urls https://192.168.118.12:2380 \
    --listen-client-urls https://192.168.118.12:2379,http://127.0.0.1:2379 \
    --quota-backend-bytes 8000000000 \
    --initial-advertise-peer-urls https://192.168.118.12:2380 \
    --advertise-client-urls https://192.168.118.12:2379,http://127.0.0.1:2379 \
    --initial-cluster  etcd-server-8-12=https://192.168.118.12:2380,etcd-server-8-21=https://192.168.118.21:2380,etcd-server-8-22=https://192.168.22:2380 \
    --ca-file ./certs/ca.pem \
    --cert-file ./certs/etcd-peer.pem \
    --key-file ./certs/etcd-peer-key.pem \
    --client-cert-auth  \
    --trusted-ca-file ./certs/ca.pem \
    --peer-ca-file ./certs/ca.pem \
    --peer-cert-file ./certs/etcd-peer.pem \
    --peer-key-file ./certs/etcd-peer-key.pem \
    --peer-client-cert-auth \
    --peer-trusted-ca-file ./certs/ca.pem \
    --log-output stdout
~                                                                                                                             
~                                                     

Etcd start stop command

 ~]# supervisorctl start etcd-server-7-12
 ~]# supervisorctl stop etcd-server-7-12
 ~]# supervisorctl restart etcd-server-7-12
 ~]# supervisorctl status etcd-server-7-12

[Solved] Elasticsearch-7.2.1 startup error: ERROR: [1] bootstrap checks failed

1、elasticsearch-7.2.1 startup error: the default discovery settings are unsuitable for production use; at least one of [discovery.seed_hosts, discovery.seed_providers, cluster.initial_master_nodes] must be configured。

 1 [elsearch@slaver2 elasticsearch-7.2.1]$ ./bin/elasticsearch
 2 future versions of Elasticsearch will require Java 11; your Java version from [/usr/local/soft/jdk1.8.0_281/jre] does not meet this requirement
 3 [2021-03-23T15:13:43,592][INFO ][o.e.e.NodeEnvironment    ] [slaver2] using [1] data paths, mounts [[/ (/dev/mapper/centos-root)]], net usable_space [1.1gb], net total_space [9.9gb], types [xfs]
 4 [2021-03-23T15:13:43,599][INFO ][o.e.e.NodeEnvironment    ] [slaver2] heap size [990.7mb], compressed ordinary object pointers [true]
 5 [2021-03-23T15:13:43,605][INFO ][o.e.n.Node               ] [slaver2] node name [slaver2], node ID [FsI1qieBQ5Kn4MYh001oHQ], cluster name [elasticsearch]
 6 [2021-03-23T15:13:43,607][INFO ][o.e.n.Node               ] [slaver2] version[7.2.1], pid[10143], build[default/tar/fe6cb20/2019-07-24T17:58:29.979462Z], OS[Linux/3.10.0-1160.el7.x86_64/amd64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_281/25.281-b09]
 7 [2021-03-23T15:13:43,610][INFO ][o.e.n.Node               ] [slaver2] JVM home [/usr/local/soft/jdk1.8.0_281/jre]
 8 [2021-03-23T15:13:43,612][INFO ][o.e.n.Node               ] [slaver2] JVM arguments [-Xms1g, -Xmx1g, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -Des.networkaddress.cache.ttl=60, -Des.networkaddress.cache.negative.ttl=10, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Djava.io.tmpdir=/tmp/elasticsearch-6519446121284753262, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -XX:+PrintGCDetails, -XX:+PrintGCDateStamps, -XX:+PrintTenuringDistribution, -XX:+PrintGCApplicationStoppedTime, -Xloggc:logs/gc.log, -XX:+UseGCLogFileRotation, -XX:NumberOfGCLogFiles=32, -XX:GCLogFileSize=64m, -Dio.netty.allocator.type=unpooled, -XX:MaxDirectMemorySize=536870912, -Des.path.home=/usr/local/soft/elasticsearch-7.2.1, -Des.path.conf=/usr/local/soft/elasticsearch-7.2.1/config, -Des.distribution.flavor=default, -Des.distribution.type=tar, -Des.bundled_jdk=true]
 9 [2021-03-23T15:13:49,428][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [aggs-matrix-stats]
10 [2021-03-23T15:13:49,429][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [analysis-common]
11 [2021-03-23T15:13:49,431][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [data-frame]
12 [2021-03-23T15:13:49,433][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [ingest-common]
13 [2021-03-23T15:13:49,434][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [ingest-geoip]
14 [2021-03-23T15:13:49,435][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [ingest-user-agent]
15 [2021-03-23T15:13:49,435][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [lang-expression]
16 [2021-03-23T15:13:49,436][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [lang-mustache]
17 [2021-03-23T15:13:49,438][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [lang-painless]
18 [2021-03-23T15:13:49,439][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [mapper-extras]
19 [2021-03-23T15:13:49,441][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [parent-join]
20 [2021-03-23T15:13:49,443][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [percolator]
21 [2021-03-23T15:13:49,445][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [rank-eval]
22 [2021-03-23T15:13:49,446][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [reindex]
23 [2021-03-23T15:13:49,447][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [repository-url]
24 [2021-03-23T15:13:49,448][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [transport-netty4]
25 [2021-03-23T15:13:49,448][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-ccr]
26 [2021-03-23T15:13:49,448][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-core]
27 [2021-03-23T15:13:49,449][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-deprecation]
28 [2021-03-23T15:13:49,449][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-graph]
29 [2021-03-23T15:13:49,449][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-ilm]
30 [2021-03-23T15:13:49,450][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-logstash]
31 [2021-03-23T15:13:49,450][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-ml]
32 [2021-03-23T15:13:49,450][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-monitoring]
33 [2021-03-23T15:13:49,451][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-rollup]
34 [2021-03-23T15:13:49,451][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-security]
35 [2021-03-23T15:13:49,452][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-sql]
36 [2021-03-23T15:13:49,456][INFO ][o.e.p.PluginsService     ] [slaver2] loaded module [x-pack-watcher]
37 [2021-03-23T15:13:49,460][INFO ][o.e.p.PluginsService     ] [slaver2] no plugins loaded
38 [2021-03-23T15:13:59,813][INFO ][o.e.x.s.a.s.FileRolesStore] [slaver2] parsed [0] roles from file [/usr/local/soft/elasticsearch-7.2.1/config/roles.yml]
39 [2021-03-23T15:14:01,757][INFO ][o.e.x.m.p.l.CppLogMessageHandler] [slaver2] [controller/10234] [Main.cc@110] controller (64 bit): Version 7.2.1 (Build 4ad685337be7fd) Copyright (c) 2019 Elasticsearch BV
40 [2021-03-23T15:14:03,624][DEBUG][o.e.a.ActionModule       ] [slaver2] Using REST wrapper from plugin org.elasticsearch.xpack.security.Security
41 [2021-03-23T15:14:05,122][INFO ][o.e.d.DiscoveryModule    ] [slaver2] using discovery type [zen] and seed hosts providers [settings]
42 [2021-03-23T15:14:09,123][INFO ][o.e.n.Node               ] [slaver2] initialized
43 [2021-03-23T15:14:09,125][INFO ][o.e.n.Node               ] [slaver2] starting ...
44 [2021-03-23T15:14:09,472][INFO ][o.e.t.TransportService   ] [slaver2] publish_address {192.168.110.135:9300}, bound_addresses {192.168.110.135:9300}
45 [2021-03-23T15:14:09,504][INFO ][o.e.b.BootstrapChecks    ] [slaver2] bound or publishing to a non-loopback address, enforcing bootstrap checks
46 ERROR: [1] bootstrap checks failed
47 [1]: the default discovery settings are unsuitable for production use; at least one of [discovery.seed_hosts, discovery.seed_providers, cluster.initial_master_nodes] must be configured
48 [2021-03-23T15:14:09,550][INFO ][o.e.n.Node               ] [slaver2] stopping ...
49 [2021-03-23T15:14:09,627][INFO ][o.e.n.Node               ] [slaver2] stopped
50 [2021-03-23T15:14:09,629][INFO ][o.e.n.Node               ] [slaver2] closing ...
51 [2021-03-23T15:14:09,681][INFO ][o.e.n.Node               ] [slaver2] closed
52 [2021-03-23T15:14:09,690][INFO ][o.e.x.m.p.NativeController] [slaver2] Native controller process has stopped - no new native processes can be started

Solution:

In the config directory of elasticsearch, modify the elasticsearch.yml configuration file and add the following configuration to the configuration file:

1 ip replace host1, etc., multiple nodes please add more than one ip address, single node can be written by default to
2 # configure the following three, at least one of them #[discovery.seed_hosts, discovery.seed_providers, cluster.initial_master_nodes]
3 #cluster.initial_master_nodes: ["node-1", "node-2"]
4 cluster.initial_master_nodes: ["192.168.110.135"]

[Solved] panic: proto: duplicate enum registered: raftpb.EntryType

  Error information:

A future release will panic on registration conflicts. See:
https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict

panic: proto: duplicate enum registered: raftpb.EntryType

goroutine 1 [running]:
github.com/golang/protobuf/proto.RegisterEnum(0x2461722, 0x10, 0xc0002b29f0, 0xc0002b2a20)
	/user/local/gopath/pkg/mod/github.com/golang/[email protected]/proto/registry.go:104 +0x11f
github.com/coreos/etcd/raft/raftpb.init.0()
	/user/local/gopath/pkg/mod/github.com/coreos/[email protected]+incompatible/raft/raftpb/raft.pb.go:508 +0x52
make: *** [show] error 2

Solution: add to go.mod

replace go.etcd.io/etcd => github.com/coreos/etcd v3.3.10+incompatible

The deployment of etcd storage and flannel network configuration for kubernetes / k8s multi node deployment

article directory

    • 1. Project requirement analysis:
    • 2. Project step deployment (master node) :
      • 5 [7] etcd binary upload
      • 6

      • 7 [8] create configuration file, command file, Certificate
      • [9] use another terminal to copy the certificate and systemctl management service script to other nodes
      • [10] and modify the configuration file under CFG
      • [11] to check whether the cluster state is healthy
  • k8s multi-node deployment of flannel network configuration
      • [1] write the allocated subnet segment to etcd, For the use of flannel
      • [2] view written information
      • [3] above all node node deployment flannel component
      • [4] create k8s working directory, copy command file
      • [5] write flannel components executing scripts to start the node node are the same 】
      • [6] open flannel component network function
      • 【 7 】 configuration docker connection flannel components [all node node a
      • [8] view bip subnet
      • [9] restart docker service
      • [10] view flannel network
      • [11] test connectivity between nodes
  • k8s multi-node deployment etcd storage deployment

    1. Project demand analysis: 192.168.60.100 is the node1 node kubelet; kubelet kube-proxy docker flannel etcd
    [3] 192.168.60.60 is the node2 node kubelet kube-proxy docker flannel etcd

    Ii. Project step deployment (master node) :

    //master master node configuration

    [1] download certificate making tool

    [root@localhost ~]# hostnamectl set-hostname master
    [root@localhost ~]# su
    [root@master ~]# cd /usr/local/bin
    [root@master bin]# chmod +x *
    [root@master bin]# ls
    cfssl  cfssl-certinfo  cfssljson
    

    [2] ca certificate

    is defined

    [root@master ~]#mkdir -p k8s/etcd-cert
    [root@master etcd-cert]#cat > ca-config.json <<EOF
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "www": {
             "expiry": "87600h",
             "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"     
            ]  
          } 
        }         
      }
    }
    EOF
    

    [3] realize certificate signature

    [root@master etcd-cert]#cat > ca-csr.json <<EOF 
    {   
        "CN": "etcd CA",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Beijing",
                "ST": "Beijing"
            }
        ]
    }
    EOF
    

    [4] generate certificate

    [root@master etcd-cert]#cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    

    [5] specifies the communication verification between the three etcd nodes

    [root@master etcd-cert]#cat > server-csr.json <<EOF
    {
        "CN": "etcd",
        "hosts": [
        "192.168.60.10",
        "192.168.60.100",
        "192.168.60.60"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "BeiJing",
                "ST": "BeiJing"
            }
        ]
    }
    EOF
    

    [6] generate etcd certificate server

    [root@master etcd-cert]#cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
    

    [7] etcd binary upload

    [root@master k8s]# ls
    etcd-cert  etcd-v3.3.10-linux-amd64  etcd-v3.3.10-linux-amd64.tar.gz
    

    [8] create configuration file, command file, certificate

    [root@master k8s]# mkdir -p /opt/etcd/{cfg,bin,ssl}
    //命令文件
    [root@master k8s]# cp etcd-v3.3.10-linux-amd64/etcd etcd-v3.3.10-linux-amd64/etcdctl /opt/etcd/bin/
    //证书
    [root@master k8s]# cp etcd-cert/*.pem /opt/etcd/ssl/
    //上传etcd.sh脚本,配置文件的生成以及systemctl管理服务文件生成
    [root@master k8s]# ls
    etcd-cert  etcd.sh  etcd-v3.3.10-linux-amd64  etcd-v3.3.10-linux-amd64.tar.gz
    [root@master k8s]#sh etcd.sh etcd01 192.168.60.10 etcd02=https://192.168.60.60:2380,etcd03=https://192.168.60.100:2380
    //查看etcd的进程是否启动
    [root@master ~]# ps -ef | grep etcd
    

    [9] use another terminal to copy the certificate and systemctl management service script to other nodes

    [root@master ~]# scp -r /opt/etcd/ [email protected]:/opt/
    [root@master ~]# scp -r /opt/etcd/ [email protected]:/opt/
    //启动脚本拷贝到其他节点
    scp /usr/lib/systemd/system/etcd.service [email protected]:/usr/lib/systemd/system/
    scp /usr/lib/systemd/system/etcd.service [email protected]:/usr/lib/systemd/system/
    

    [10] modify the configuration file under CFG in the other two nodes

    //在192.168.60.60节点修改,主要是修改name和IP地址
    [root@node1 ~]# cd /opt/etcd/cfg/
    [root@node1 cfg]# ls
    etcd
    [root@node1 cfg]# vim etcd 
    #[Member]
    ETCD_NAME="etcd02"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.60.60:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.60.60:2379"
    
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.60.60:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.60.60:2379"
    ETCD_INITIAL_CLUSTER="etcd01=https://192.168.60.10:2380,etcd02=https://192.168.60.60:2380,etcd03=https://192.168.60.100:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    [root@node1 cfg]# systemctl start etcd.service 
    [root@node1 cfg]# systemctl status etcd.service
    
    
    //在192.168.60.100节点修改,主要是修改name和IP地址
    [root@node2 ~]# cd /opt/etcd/cfg/
    [root@node2 cfg]# ls
    etcd
    [root@node2 cfg]# vim etcd 
    #[Member]
    ETCD_NAME="etcd03"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.60.100:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.60.100:2379"
    
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.60.100:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.60.100:2379"
    ETCD_INITIAL_CLUSTER="etcd01=https://192.168.60.10:2380,etcd02=https://192.168.60.60:2380,etcd03=https://192.168.60.100:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    [root@node2 cfg]# systemctl start etcd.service 
    [root@node2 cfg]# systemctl status etcd.service
    

    [11] check whether the cluster state is healthy

    [root@master etcd-cert]# /opt/etcd//bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoint="https://192.168.60.10:2379,https://192.168.60.60:2379,https://192.168.60.100:2379" cluster-health
    member 59173e3f8aecc6c3 is healthy: got healthy result from https://192.168.60.100:2379
    member 8da25ad72397ec6e is healthy: got healthy result from https://192.168.60.10:2379
    member a21e580b9191cb20 is healthy: got healthy result from https://192.168.60.60:2379
    cluster is healthy
    [root@master etcd-cert]# 
    

    — — — — — — — — — — — — — — — — — — — —

    k8s multi-node deployment of flannel network configuration deployment

    1. Project demand analysis: 192.168.60.100 is the node1 node kubelet; kubelet kube-proxy docker flannel etcd
    [3] 192.168.60.60 is the node2 node kubelet kube-proxy docker flannel etcd

    Ii. Project step deployment:

    [1] write the allocated subnet segment into the etcd for flannel to use

    [root@master etcd-cert]# /opt/etcd/bin/etcdctl \
    --ca-file=ca.pem \
    --cert-file=server.pem \
    --key-file=server-key.pem \
    --endpoint="https://192.168.60.10:2379,https://192.168.60.60:2379,https://192.168.60.100:2379" \
    set /coreos.com/network/config '{"Network":"172.17.0.0/16","Backenf":{"Type":"vxlan"}}'
    

    [2] view the written information

    [root@master etcd-cert]# /opt/etcd/bin/etcdctl \
    --ca-file=ca.pem \
    --cert-file=server.pem \
    --key-file=server-key.pem \
    --endpoint="https://192.168.60.10:2379,https://192.168.60.60:2379,https://192.168.60.100:2379" \
    get /coreos.com/network/config
    

    [3] deploy flannel component

    on all node nodes

    //在192.168.60.60节点
    [root@node1 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz 
    flanneld
    mk-docker-opts.sh
    README.md
    //在192.168.60.100节点
    [root@node2 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz 
    flanneld
    mk-docker-opts.sh
    README.md
    

    [4] create k8s working directory, copy the command file

    //在192.168.60.60节点下
    [root@node1 ~]# mkdir -p /opt/kubernetes/{cfg,bin,ssl}
    [root@node1 ~]# mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
    //在192.168.60.100节点下
    [root@node2 ~]# mkdir -p /opt/kubernetes/{cfg,bin,ssl}
    [root@node2 ~]# mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
    

    [5] write the flannel component startup execution script [node node is the same]

    [root@node1 ~]# vim flannel.sh
    #!/bin/bash
    ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}
    cat <<EOF >/opt/kubernetes/cfg/flanneld
    FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
    -etcd-cafile=/opt/etcd/ssl/ca.pem \
    -etcd-certfile=/opt/etcd/ssl/server.pem \
    -etcd-keyfile=/opt/etcd/ssl/server-key.pem"
    EOF
    
    cat <<EOF >/usr/lib/systemd/system/flanneld.service
    [Unit]
    Description=Flanneld overlay address etcd agent
    After=network-online.target network.target
    Before=docker.service
    [Service]
    Type=notify
    EnvironmentFile=/opt/kubernetes/cfg/flanneld
    ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
    ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
    Restart=on-failure
    [Install]
    WantedBy=multi-user.target
    EOF
    
    systemctl daemon-reload
    systemctl enable flanneld
    systemctl restart flanneld
    

    [6] enable the flannel component network function

    [root@node1 ~]# sh flannel.sh https://192.168.60.10:2379,https://192.168.60.60:2379,https://192.168.60.100:2379
    

    [7] configure docker to connect flannel component [all node nodes are the same]

    [root@node1 ~]# vim /usr/lib/systemd/system/docker.service 
    14 EnvironmentFile=/run/flannel/subnet.env
    15 ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/contain    erd.sock
    

    [8] view the subnet

    specified for bip startup

    //在192.168.60.60节点node1
    [root@node1 ~]# cat /run/flannel/subnet.env 
    DOCKER_OPT_BIP="--bip=172.17.39.1/24"
    DOCKER_OPT_IPMASQ="--ip-masq=false"
    DOCKER_OPT_MTU="--mtu=1472"
    DOCKER_NETWORK_OPTIONS=" --bip=172.17.39.1/24 --ip-masq=false --mtu=1472"
    //在192.168.60.100节点node2
    [root@node2 ~]# cat /run/flannel/subnet.env 
    DOCKER_OPT_BIP="--bip=172.17.85.1/24"
    DOCKER_OPT_IPMASQ="--ip-masq=false"
    DOCKER_OPT_MTU="--mtu=1472"
    DOCKER_NETWORK_OPTIONS=" --bip=172.17.85.1/24 --ip-masq=false --mtu=1472"
    

    [9] restart docker service

    [root@node1 ~]# systemctl daemon-reload 
    [root@node1 ~]# systemctl restart docker.service 
    

    [10] see flannel network

    //在node1节点192.168.60.60
    [root@node1 ~]# ifconfig 
    docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
            inet 172.17.39.1  netmask 255.255.255.0  broadcast 172.17.39.255
            ether 02:42:b1:19:5b:a1  txqueuelen 0  (Ethernet)
            RX packets 0  bytes 0 (0.0 B)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 0  bytes 0 (0.0 B)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    //在node2节点192.168.60.100
    [root@node2 ~]# ifconfig 
    docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
            inet 172.17.85.1  netmask 255.255.255.0  broadcast 172.17.85.255
            ether 02:42:b5:54:91:f1  txqueuelen 0  (Ethernet)
            RX packets 0  bytes 0 (0.0 B)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 0  bytes 0 (0.0 B)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    

    [11] test the connectivity between nodes

    // at 192.168.60.60 node

    [root@node1 ~]# docker run -it centos:7 /bin/bash
    [root@2bbac9ebdc96 /]# yum install -y net-tools
    [root@2bbac9ebdc96 /]# ifconfig 
    eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1472
            inet 172.17.39.2  netmask 255.255.255.0  broadcast 172.17.39.255
            ether 02:42:ac:11:27:02  txqueuelen 0  (Ethernet)
            RX packets 15198  bytes 12444271 (11.8 MiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 7322  bytes 398889 (389.5 KiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    [root@2bbac9ebdc96 /]# ping 172.17.85.2
    PING 172.17.85.2 (172.17.85.2) 56(84) bytes of data.
    64 bytes from 172.17.85.2: icmp_seq=1 ttl=60 time=1.08 ms
    64 bytes from 172.17.85.2: icmp_seq=2 ttl=60 time=0.523 ms
    64 bytes from 172.17.85.2: icmp_seq=3 ttl=60 time=0.619 ms
    64 bytes from 172.17.85.2: icmp_seq=4 ttl=60 time=2.24 ms
    

    // at 192.168.60.100 node

    [root@node2 ~]# docker run -it centos:7 /bin/bash
    [root@79995e04b320 /]# yum install -y net-tools
    [root@79995e04b320 /]# ifconfig 
    eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1472
            inet 172.17.85.2  netmask 255.255.255.0  broadcast 172.17.85.255
            ether 02:42:ac:11:55:02  txqueuelen 0  (Ethernet)
            RX packets 15299  bytes 12447552 (11.8 MiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 5864  bytes 320081 (312.5 KiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    [root@79995e04b320 /]# ping 172.17.39.2
    PING 172.17.39.2 (172.17.39.2) 56(84) bytes of data.
    64 bytes from 172.17.39.2: icmp_seq=1 ttl=60 time=0.706 ms
    64 bytes from 172.17.39.2: icmp_seq=2 ttl=60 time=0.491 ms
    64 bytes from 172.17.39.2: icmp_seq=3 ttl=60 time=0.486 ms
    64 bytes from 172.17.39.2: icmp_seq=4 ttl=60 time=0.528 ms