一、安装etcd集群

[root@k8s-master ~]# yum install -y etcd 
[root@k8s-node-1 ~]# yum install -y etcd 
[root@k8s-node-2 ~]# yum install -y etcd 
[root@k8s-master ~]# grep -Ev "^$|#" /etc/etcd/etcd.conf 
ETCD_DATA_DIR="/var/lib/etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="node1"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.0.0.11:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.11:2379"
ETCD_INITIAL_CLUSTER="node1=http://10.0.0.11:2380,node2=http://10.0.0.12:2380,node3=http://10.0.0.13:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@k8s-node-1 ~]# grep -Ev "^$|#" /etc/etcd/etcd.conf 
ETCD_DATA_DIR="/var/lib/etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="node2"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.0.0.12:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.12:2379"
ETCD_INITIAL_CLUSTER="node1=http://10.0.0.11:2380,node2=http://10.0.0.12:2380,node3=http://10.0.0.13:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@k8s-node-2 ~]# grep -Ev "^$|#" /etc/etcd/etcd.conf 
ETCD_DATA_DIR="/var/lib/etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="node3"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.0.0.13:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.13:2379"
ETCD_INITIAL_CLUSTER="node1=http://10.0.0.11:2380,node2=http://10.0.0.12:2380,node3=http://10.0.0.13:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@k8s-master ~]# systemctl enable  etcd
[root@k8s-master ~]# systemctl restart  etcd
[root@k8s-node-1 ~]# systemctl enable  etcd
[root@k8s-node-1 ~]# systemctl restart  etcd
[root@k8s-node-2 ~]# systemctl enable  etcd
[root@k8s-node-2 ~]# systemctl restart  etcd

二、配置flannel

[root@k8s-master ~]# grep -Ev "^$|#"  /etc/sysconfig/flanneld 
FLANNEL_ETCD_ENDPOINTS="http://10.0.0.11:2379,http://10.0.0.12:2379,http://10.0.0.13:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@k8s-master ~]# etcdctl mk /atomic.io/network/config   '{ "Network": "172.18.0.0/16" }'
{ "Network": "172.18.0.0/16" }
[root@k8s-master ~]# systemctl  restart flanneld
[root@k8s-master ~]# systemctl  restart docker
[root@k8s-node-1 ~]# grep -Ev "^$|#"  /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://10.0.0.11:2379,http://10.0.0.12:2379,http://10.0.0.13:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@k8s-node-1 ~]# systemctl  restart flanneld
[root@k8s-node-1 ~]# systemctl  restart docker
[root@k8s-node-2 ~]# grep -Ev "^$|#"  /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://10.0.0.11:2379,http://10.0.0.12:2379,http://10.0.0.13:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@k8s-node-2 ~]# systemctl  restart flanneld
[root@k8s-node-2 ~]# systemctl  restart docker

三、修改master1节点的配置

[root@k8s-master ~]# grep -Ev "^$|#" /etc/kubernetes/apiserver
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://10.0.0.11:2379,http://10.0.0.12:2379,http://10.0.0.13:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS="--service-node-port-range=3000-50000"
[root@k8s-master ~]# grep -Ev "^$|#" /etc/kubernetes/config 
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://127.0.0.1:8080"
[root@k8s-master ~]# systemctl restart kube-apiserver.service 
[root@k8s-master ~]# systemctl restart kube-controller-manager.service kube-scheduler.service 

四、给master2节点进行安装软件并配置

[root@k8s-node-1 ~]# yum install kubernetes-master.x86_64 -y
[root@k8s-node-1 ~]# scp -rp 10.0.0.11:/etc/kubernetes/apiserver /etc/kubernetes/apiserver
[root@k8s-node-1 ~]#  scp -rp 10.0.0.11:/etc/kubernetes/config /etc/kubernetes/config 
[root@k8s-node-1 ~]# systemctl enable kube-apiserver.service
[root@k8s-node-1 ~]# systemctl restart kube-apiserver.service
[root@k8s-node-1 ~]# systemctl enable kube-controller-manager.service
[root@k8s-node-1 ~]# systemctl restart kube-controller-manager.service
[root@k8s-node-1 ~]# systemctl enable kube-scheduler.service
[root@k8s-node-1 ~]# systemctl restart kube-scheduler.service

五、为master1和master2安装keepalived并配置

[root@k8s-master ~]# yum install keepalived.x86_64 -y
[root@k8s-master ~]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL_11
}
vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.0.0.10
    }
}
[root@k8s-master ~]# systemctl  enable  keepalived
[root@k8s-master ~]# systemctl  start   keepalived
[root@k8s-node-1 ~]# yum install keepalived.x86_64 -y
[root@k8s-node-1 ~]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL_12
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 51
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.0.0.10
    }
}
[root@k8s-node-1 ~]# systemctl  enable  keepalived
[root@k8s-node-1 ~]# systemctl  start   keepalived

六、所有node节点kubelet,kube-proxy指向api-server的vip

[root@k8s-node-2 ~]# grep -Ev "^$|#" /etc/kubernetes/kubelet 
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=10.0.0.13"
KUBELET_API_SERVER="--api-servers=http://10.0.0.10:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=10.0.0.11:5000/pod-infrastructure:latest"
KUBELET_ARGS="--cluster_dns=10.254.230.254 --cluster_domain=cluster.local"
[root@k8s-node-2 ~]# grep -Ev "^$|#" /etc/kubernetes/config 
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://10.0.0.10:8080"

七、查看vip

[root@k8s-master ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:66:1b:c0 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.11/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.10/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe66:1bc0/64 scope link 
       valid_lft forever preferred_lft forever
4: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1472 qdisc noqueue state UP group default 
    link/ether 02:42:19:04:47:9a brd ff:ff:ff:ff:ff:ff
    inet 172.18.82.1/24 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:19ff:fe04:479a/64 scope link 
       valid_lft forever preferred_lft forever
10: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
    link/none 
    inet 172.18.82.0/16 scope global flannel0
       valid_lft forever preferred_lft forever
    inet6 fe80::2b78:92fc:6fe9:8144/64 scope link flags 800 
       valid_lft forever preferred_lft forever
12: vethde643da@if11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1472 qdisc noqueue master docker0 state UP group default 
    link/ether 1a:ec:35:53:0b:ae brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet6 fe80::18ec:35ff:fe53:bae/64 scope link 
       valid_lft forever preferred_lft forever

ps:k8s-node-1在这里用作master2节点

Categories: docker&k8s

0 Comments

发表评论

Avatar placeholder

邮箱地址不会被公开。 必填项已用*标注