Kubeadm Builds k8s Cluster

Note: The Kubernetes series uses 1.15 versions of kuberntetes.+

1 Master Node Installation

1.1 System Environment Configuration

1.1.1 Setting Host Name

hostnamectl set-hostname kmaster-01
hostnamectl set-hostname knode-01
hostnamectl set-hostname knode-02

vi /etc/hosts

192.168.190.163 knode-01
192.168.190.164 knode-01
192.168.190.165 kmaster-01

1.1.2 Close Firewall

# Close the firewall and prohibit booting
systemctl stop firewalld && systemctl disable firewalld

# View firewall status
systemctl status firewalld

# The status information is as follows
[root@kmaster-01 ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:firewalld(1)

Aug 05 15:21:17 localhost.localdomain systemd[1]: Starting firewalld - dynamic firewall daemon...
Aug 05 15:21:18 localhost.localdomain systemd[1]: Started firewalld - dynamic firewall daemon.
Aug 12 14:22:20 kmaster-01 systemd[1]: Stopping firewalld - dynamic firewall daemon...
Aug 12 14:22:21 kmaster-01 systemd[1]: Stopped firewalld - dynamic firewall daemon.

1.1.3 Disable SELINUX

#Temporary shutdown, used to close selinux firewall, but failed after restart
setenforce 0

#Close selinux and change selinux = en forcing to disabled = permanent closure
vi /etc/selinux/config

SELINUX=disabled

#View status information for selinux
/usr/sbin/sestatus

#The status information for selinux is as follows
[root@kmaster-01 ~]# /usr/sbin/sestatus
SELinux status:                 enabled
SELinuxfs mount:                /sys/fs/selinux
SELinux root directory:         /etc/selinux
Loaded policy name:             targeted
Current mode:                   permissive
Mode from config file:          disabled
Policy MLS status:              enabled
Policy deny_unknown status:     allowed
Max kernel policy version:      31

1.1.4 comment out the automatic mounting of SWAP

vi  /etc/fstab
 #
 # /etc/fstab
 # Created by anaconda on Mon Jan 21 19:19:41 2019
 #
 # Accessible filesystems, by reference, are maintained under '/dev/disk'
 # See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
 #
 /dev/mapper/centos-root /                       xfs     defaults        0 0
 UUID=214b916c-ad23-4762-b916-65b53fce1920 /boot                   xfs     defaults        0     0
 #/dev/mapper/centos-swap swap                    swap    defaults        0 0
 

1.1.5 Create k8s.conf file

vim /etc/sysctl.d/k8s.conf

#Close swap to ensure that kubelet works correctly
swapoff -a

#Create k8s.conf file
vi /etc/sysctl.d/k8s.conf

#Document content
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0 

#Execute the order to make the modification effective
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf

1.1.6 kube-proxy preconditions for opening ipvs

#Ensure that the required modules can be loaded automatically after the node restarts
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

# Add executive authority
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

#Check to see if the required kernel modules have been loaded correctly
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

#ipset package installed
yum -y install ipset

#To facilitate viewing the proxy rules of ipvs, install the management tool ipvsadm
yum -y install ipvsadm

TIPS: If the above preconditions are not met, even if the configuration of kube-proxy turns on the ipvs mode, it will return to the iptables mode.

1.1.7 Synchronization Time

1. Install ntpdate tool

yum -y install ntp ntpdate

2. Setting up synchronization between system time and network time

ntpdate cn.pool.ntp.org

3. Write system time to hardware time

hwclock --systohc

1.2 Installation Configuration Docker

1.2.1 Install Docker

Remove the old version

$ sudo yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-selinux \
                  docker-engine-selinux \
                  docker-engine

Install some necessary system tools

sudo yum install -y yum-utils device-mapper-persistent-data lvm2

Adding Software Source Information

sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

Update yum cache

sudo yum makecache fast

Install Docker-ce

sudo yum -y install docker-ce

1.2.2 Configure Docker

# Edit file
vim /etc/docker/daemon.json

# Simple configuration
{
 "registry-mirrors": ["https://1bbsr4jc.mirror.aliyuncs.com","https://registry.docker-cn.com"],
  "insecure-registries": ["192.168.190.164:5000"]
}



# Advanced Point Configuration
{
  "graph": "/data/docker",
  "storage-driver": "overlay",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"],
  "bip": "172.7.21.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}

Be careful:

1. Here bip changes according to host ip, such as host 10.4.7.128 => docker network can be configured to 172.7.128.1/24.

2. insecure-registries: Configure the private mirror warehouse address here

Verify that the default policy (pllicy) for the FOWARD chain in the iptables filter table is ACCEPT

# Verify that the default policy (pllicy) for the FOWARD chain in the iptables filter table is ACCEPT
iptables -nvL

Chain INPUT (policy ACCEPT 9 packets, 760 bytes)
 pkts bytes target     prot opt in     out     source            destination         
 
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination
 
#If not ACCEPT, modify
iptables  -P FORWARD  ACCEPT

1.3 Installation Configuration of kubeadm

1.3.1 Installation of kubeadm

1. Create configuration files

#Create file commands, here using Aliyun's, you can also use other
vim /etc/yum.repos.d/kubernetes.repo

#Document content
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=1
enable=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
	https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

2. Install kubelet, kubeadm, kubectl

#Create metadata cache and install kubelet, kubeadm, kubectl

# Update yum source cache
yum -y makecache fast

#If no version is specified, the latest version is installed by default, currently version 1.15.2, where 14.3 is installed.
yum install -y kubelet-1.14.3 kubeadm-1.14.3 kubectl-1.14.3

yum install -y kubelet-1.15.2 kubeadm-1.15.2 kubectl-15.2

# Default installation of the latest version
yum install -y kubelet kubeadm kubectl

3. Setting up kubelet boot-up

# Set up kubelet self-startup and start kubelet
systemctl enable kubelet && systemctl start kubelet

1.3.2 Configuration of kubeadm

Installing kubernetes is mainly to install its various images, and kubeadm has integrated the basic images needed to run kubernetes for us. However, due to the domestic network reasons, it is impossible to pull these mirrors when setting up the environment. At this point, we only need to modify the mirror service provided by Aliyun to solve this problem.

1. Create configuration
# Export configuration file
kubeadm config print init-defaults --kubeconfig ClusterConfiguration > kubeadm.yml
2. Modify configuration
[root@k8s-master-01 kubernates]# vim kubeadm.yml 

apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  # ip modified to server
  advertiseAddress: 10.4.7.128
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master-01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: ""
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
# Mirror Warehouse Address: Aliyun
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
# Modified version to kubeadm kubectl kubelet version
kubernetesVersion: v1.15.2
networking:
  dnsDomain: cluster.local
  # Cluster Network
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
# Specify the communication policy between Node's pods as ipvs
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

3. View/pull mirror
# Enter the directory where kubeadm.yaml is located
cd /usr/local/kubernetes

# View the list of required images
kubeadm config images list --config kubeadm.yml

# Pull mirror image
kubeadm config images pull --config kubeadm.yml

1.4 Initialization of Master Nodes

Execute the following command to initialize the primary node, which specifies the configuration file to be used for initialization, and add the -- experiment-upload-certs parameter to automatically distribute the certificate file when subsequent execution joins the node. The additional tee kubeadm-init.log is used to output the log.

1.4.1 Installation and Certification

1, installation
kubeadm init --config=kubeadm.yml --experimental-upload-certs | tee kubeadm-init.log
2. Installation process
[root@k8s-master-01 kubernates]# kubeadm init --config=kubeadm.yml --experimental-upload-certs | tee kubeadm-init.log

## setup script
[init] Using Kubernetes version: v1.14.1
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master-01 localhost] and IPs [10.4.7.128 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master-01 localhost] and IPs [10.4.7.128 127.0.0.1 ::1]
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master-01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.4.7.128]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 33.503334 seconds
[upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in ConfigMap "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
37b163df1ac944283189da3b1dc294b46d8cb20ae62786b033857a88a9818594
[mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.4.7.128:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:73104432e55bb8aeba8ed3daaa302ecae4be1134dc3d00d7c48b673521a3be03
3. Configuring kubectl
mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
4. Verification of success
kubectl get node

# Being able to print out node information means success
NAME                STATUS     ROLES    AGE     VERSION
kubernetes-master   NotReady   master   8m40s   v1.14.1
5. kubeadm Initialization Cluster Process
  • init: Initialize the specified version
  • preflight: Docker image file needed to check and download before initialization
  • kubelet-start: The configuration file var/lib/kubelet/config.yaml that generates kubelet cannot be started without this file, so the kubelet before initialization will not actually start successfully
  • Certificates: Generate the certificates used by Kubernetes and store them in the / etc/kubernetes/pki directory
  • KubeConfig: Generate KubeConfig files and store them in the / etc/kubernetes directory. Communication between components requires the use of corresponding files
  • control-plane: Install Master components using YAML files in the / etc/kubernetes/manifest directory
  • Etcd: Install Etcd services using / etc/kubernetes/manifest/etcd.yaml
  • wait-control-plane: Waiting for the Master component deployed by control-plan to start
  • apiclient: Check Master component service status.
  • uploadconfig: Update configuration
  • Kubelet: configure kubelet with configMap
  • patchnode: Update CNI information to Node and record it by annotation
  • mark-control-plane: Label the current node, the role Master, and the non-schedulable label, so that by default the Master node will not be used to run Pod
  • bootstrap-token: Generate token records and use kubeadm join later to add nodes to the cluster
  • addons: Install add-ons CoreDNS and kube-proxy

Installation of Node Node

2.1 Basic Configuration

It is very simple to add slave nodes to the cluster. Just install kubeadm, kubectl and kubelet tools on the slave server, and then use the kubeadm join command to join. The preparations are as follows:

  • Modify host name
  • Close firewall, disable selinux, disable swap
  • Configure k8s.conf
  • Open ipvs
  • Install and configure docker
  • Install three tools

Since the previous chapters have explained the steps of operation, we will not repeat them here.

2.2 Add slave to the cluster

# Join order
kubeadm join 10.4.7.128:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:73104432e55bb8aeba8ed3daaa302ecae4be1134dc3d00d7c48b673521a3be03
    
# Join Process Log
[root@node-01 ~]# kubeadm join 10.4.7.128:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:73104432e55bb8aeba8ed3daaa302ecae4be1134dc3d00d7c48b673521a3be03


[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

Explain:

  • token
    • You can view token information from the log when you install master
    • Token information can be printed through the kubeadm token list command
    • If token expires, you can use the kubeadm token create command to create a new token
  • discovery-token-ca-cert-hash
    • sha256 information can be viewed from the log when the master is installed
    • Information on sha256 can be viewed through the command OpenSSL x509-pubkey-in/etc/kubernetes/pki/ca.crt | OpenSSL rsa-pubin-outform der 2>/dev/null | OpenSSL dgst-sha256-hex | sed's/^. */'

2.3 Verify the success of joining

2.3.1 View node status

[root@k8s-master-01 kubernates]# kubectl get nodes
NAME            STATUS     ROLES    AGE   VERSION
kmaster-01   NotReady   master      26m   v1.15.2
knode-01         NotReady   <none>   79s   v1.15.2
knode-02         NotReady   <none>   13s   v1.15.2

2.3.2 Adding Failure Solutions

If there is a configuration problem when slave node joins master, you can reset the configuration using kubeadm reset on slave node and re-join using kubeadm join command. If you want to delete nodes from the master node, you can use kubeadm delete nodes < NAME > to delete them.

2.3.3 Pod Status View

kubectl get pod -n kube-system -o wide

As you can see, coredns is not running yet, and we need to install network plug-ins at this time.

3 Configuration Cluster Network

3.1 CNI

3.1.1 Container Network

Container network is the mechanism by which containers choose to connect to other containers, hosts and external networks. Container runtime provides a variety of network modes, each of which generates a different experience. For example, Docker can configure the following network for containers by default:

  • none: Add the container to a container-specific network stack without external connection.

  • Host: adds the container to the host's network stack without isolation.

  • default bridge: default network mode. Each container can be connected to each other through an IP address.

  • Custom Bridge: User-defined Bridge with more flexibility, isolation and other convenience functions

3.1.2 What is CNI

CNI(Container Network Interface) is a standard, universal interface. In container platform, Docker, Kubernetes, Mesos container network solutions flannel, calico, weave. As long as a standard interface is provided, network functions can be provided for all container platforms that meet the same protocol, and CNI is such a standard interface protocol.

3.1.3 CNI Plug-in in Kubernetes

The original intention of CNI is to create a framework for dynamically configuring appropriate network configuration and resources when configuring or destroying containers. Plug-ins are responsible for configuring and managing IP addresses for interfaces, and usually provide functions related to IP management, IP allocation for each container, and multi-host connections. When the container runs, it calls the network plug-in to allocate the IP address and configure the network when the container starts, and then calls it again when the container is deleted to clean up these resources.

The runtime or coordinator determines which network the container should join and which plug-in it needs to call. The plug-in then adds the interface to the container network namespace as one side of the veth pair. It then makes changes on the host, including connecting the rest of veth to the bridge. Later, it assigns IP addresses and sets routing by calling a separate IPAM (IP Address Management) plug-in.

In Kubernetes, kubelet can call the plug-ins it finds at the right time to automatically configure the network for pod s launched through kubelet.

The optional CNI plug-ins in Kubernetes are as follows:

  • Flannel
  • Calico
  • Canal
  • Weave

3.2 Calico

3.2.1 What is Calico

Calico provides a secure network connection solution for containers and virtual machines, and has been proven by mass production (in public clouds and across thousands of cluster nodes) to integrate with Kubernetes, OpenShift, Docker, Mesos, DC/OS and OpenStack.

Calico also provides dynamic implementation of network security rules. Using Calico's simple policy language, you can achieve fine-grained control over communication between containers, virtual machine workloads, and bare-machine host endpoints.

3.2.2 Installation of Calico

Installation of Reference Official Documents: https://docs.projectcalico.org/v3.7/get-start/kubernetes/

# The Master node can be operated on.
kubectl apply -f https://docs.projectcalico.org/v3.7/manifests/calico.yaml

# The following output is displayed at installation time
[root@k8s-master-01 kubernates]# kubectl apply -f https://docs.projectcalico.org/v3.7/manifests/calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.extensions/calico-node created
serviceaccount/calico-node created
deployment.extensions/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

Verify that the installation was successful

watch kubectl get pods --all-namespaces

# You need to wait for all States to be Running, and notice what it might be like for a long time, 3 - 5 minutes.
Every 2.0s: kubectl get pods --all-namespaces                                                                                                    kubernetes-master: Fri May 10 18:16:51 2019

NAMESPACE     NAME                                        READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-8646dd497f-g2lln    1/1     Running   0          50m
kube-system   calico-node-8jrtp                           1/1     Running   0          50m
kube-system   coredns-8686dcc4fd-mhwfn                    1/1     Running   0          51m
kube-system   coredns-8686dcc4fd-xsxwk                    1/1     Running   0          51m
kube-system   etcd-kubernetes-master                      1/1     Running   0          50m
kube-system   kube-apiserver-kubernetes-master            1/1     Running   0          51m
kube-system   kube-controller-manager-kubernetes-master   1/1     Running   0          51m
kube-system   kube-proxy-p8mdw                            1/1     Running   0          51m
kube-system   kube-scheduler-kubernetes-master            1/1     Running   0          51m

So far, the basic environment has been deployed.

4 Run the first kubernetes container

4.1 Check component running status

kubectl get cs

# Output is as follows
NAME                 STATUS    MESSAGE             ERROR
# Scheduling services, the main role is to schedule POD to Node
scheduler            Healthy   ok                  
# The main function of automatic repair service is to repair Node automatically after the downtime and return to normal working state.
controller-manager   Healthy   ok                  
# Service Registration and Discovery
etcd-0               Healthy   {"health":"true"} 

4.2 Check Master status

kubectl cluster-info

# Output is as follows
# Main node status
Kubernetes master is running at https://192.168.190.165:6443

# DNS state
KubeDNS is running at https://192.168.190.165:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

4.3 Check the status of Nodes

kubectl get nodes

# The output is as follows: STATUS is Ready, which is normal.
[root@kmaster-01 calico]# kubectl get nodes
NAME         STATUS   ROLES    AGE   VERSION
kmaster-01   Ready    master   22m   v1.15.2
knode-01     Ready    <none>   11m   v1.15.2

4.4 Run the first container instance

# Use the kubectl command to create two ginx Pods (the smallest unit of the Kubernetes runtime container) that listen to port 80
kubectl run nginx --image=nginx --replicas=2 --port=80

# Output is as follows
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created

4.5 View the status of all Pods

kubectl get pods

# The output is as follows, waiting for a short period of practice, STATUS for Running is running successfully.
NAME                     READY   STATUS    RESTARTS   AGE
nginx-755464dd6c-qnmwp   1/1     Running   0          90m
nginx-755464dd6c-shqrp   1/1     Running   0          90m

4.6 View Deployed Services

kubectl get deployment

# Output is as follows
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
nginx   2/2     2            2           91m

4.7 Mapping Services for Users to Access

kubectl expose deployment nginx --port=80 --type=LoadBalancer

# Output is as follows
service/nginx exposed

4.8 View published services

kubectl get services

# Output is as follows
NAME         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP      10.96.0.1        <none>        443/TCP        44h
# Thus, the Nginx service has been successfully released and mapped 80 ports to 31738.
nginx        LoadBalancer   10.108.121.244   <pending>     80:31738/TCP   88m

4.9 See Service Details

kubectl describe service nginx

# Output is as follows
Name:                     nginx
Namespace:                default
Labels:                   run=nginx
Annotations:              <none>
Selector:                 run=nginx
Type:                     LoadBalancer
IP:                       10.108.121.244
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
NodePort:                 <unset>  31738/TCP
Endpoints:                192.168.17.5:80,192.168.8.134:80
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>

4.10 Verify Success

Accessing Master Server through Browser

http://192.168.190.165:31738/

At this point, Kubernetes will access the deployed Nginx service in a load-balanced manner, and it will be successful to see the welcome page of Nginx normally. Containers are actually deployed on other Node nodes, and access to the Node node's IP:Port is also possible.

4.11 Stop Service

kubectl delete deployment nginx

# Output is as follows
deployment.extensions "nginx" deleted
kubectl delete service nginx

# Output is as follows
service "nginx" deleted
Logo

权威|前沿|技术|干货|国内首个API全生命周期开发者社区

更多推荐