Install k8s master node on Ubuntu 20.04 VM server.
sudo swapoff -a
sudo apt update
sudo apt install -y docker.io
sudo systemctl enable docker
Change kernel parameters and open ports for master node.
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
sudo modprobe br_netfilter
sudo ufw allow 8080/tcp #Kubernetes API Server
sudo ufw allow 64430:64439/tcp #Kubernetes API Server
sudo ufw allow 2379:2380/tcp #etcd server client API
sudo ufw allow 10250/tcp #Kublet API
sudo ufw allow 10251/tcp #kube-scheduler
sudo ufw allow 10252/tcp # kube-controller-manager
sudo ufw allow 6443/tcp #Kubernetes API server
I use kubenetes-xenial in focal, but as of 2020/09/17 and 2020/06/01 I can’t find any issue.
sudo apt update && sudo apt install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt update
sudo apt install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
There is no configuration as a default.
$ kubectl config view
apiVersion: v1
clusters: null
contexts: null
current-context: ""
kind: Config
preferences: {}
users: null
$ sudo kubeadm init
....
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join {{ ip_of_your_server }}:6443 --token nydo9q.6pof.... \
--discovery-token-ca-cert-hash sha256:632f805fa......
Follow instruction.
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
https://stackoverflow.com/questions/52893111/no-endpoints-available-for-service-kubernetes-dashboard https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network
You must deploy a Container Network Interface (CNI) based Pod network add-on so that your Pods can communicate with each other.
Cluster DNS (CoreDNS) will not start up before a network is installed.
kubectl apply -f https://docs.projectcalico.org/v3.14/manifests/calico.yaml
https://docs.projectcalico.org/getting-started/kubernetes/requirements
A table.
Configuration Host(s) Connection type Port/protocol
Calico networking (BGP) All Bidirectional TCP 179
Calico networking with IP-in-IP enabled (default) All Bidirectional IP-in-IP, often represented by its protocol number 4
Calico networking with VXLAN enabled All Bidirectional UDP 4789
Calico networking with Typha enabled Typha agent hosts Incoming TCP 5473 (default)
flannel networking (VXLAN) All Bidirectional UDP 4789
All kube-apiserver host Incoming Often TCP 443 or 6443*
etcd datastore etcd hosts Incoming Officially TCP 2379 but can vary
Without this perimission, NodePorts can’t proxy pass to the other nodes.
sudo ufw allow 179/tcp
sudo ufw allow 4789/udp
sudo ufw allow 5473/tcp
sudo ufw allow 443/tcp
#sudo ufw allow 6443/tcp
#sudo ufw allow 2379/tcp
https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/ https://github.com/kubernetes/dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.4/aio/deploy/recommended.yaml
The environment is created in its own namespace kubernetes-dashboard
.
https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md
Creat an user.
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
Set up RBAC.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
Apply both above.
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
sudo service kubelet start
sudo ufw allow 8001/tcp
kubectl proxy # Your terminal would be occupied by process. Open other terminals.
Access to http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/
and give a token.
Done!!
As a default, it is enabled.
$ kubectl api-versions
...
rbac.authorization.k8s.io/v1
rbac.authorization.k8s.io/v1beta1
...
https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/
mkdir config-exercise
cd config-exercise
vim config-demo
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
name: development
- cluster:
name: scratch
users:
- name: developer
- name: experimenter
contexts:
- context:
name: dev-frontend
- context:
name: dev-storage
- context:
name: exp-scratch
kubectl config --kubeconfig=config-demo set-cluster development --server=https://1.2.3.4 --certificate-authority=fake-ca-file
#kubectl config --kubeconfig=config-demo set-cluster scratch --server=https://5.6.7.8 --insecure-skip-tls-verify
kubectl config --kubeconfig=config-demo set-credentials developer --client-certificate=fake-cert-file --client-key=fake-key-seefile
kubectl config --kubeconfig=config-demo set-credentials experimenter --username=exp --password=some-password
kubectl config --kubeconfig=config-demo set-context dev-frontend --cluster=development --namespace=frontend --user=developer
kubectl config --kubeconfig=config-demo set-context dev-storage --cluster=development --namespace=storage --user=developer
kubectl config --kubeconfig=config-demo set-context exp-scratch --cluster=scratch --namespace=default --user=experimenter
kubectl config --kubeconfig=config-demo view
apiVersion: v1
clusters:
- cluster:
certificate-authority: fake-ca-file
server: https://{{ ip_of_your_server }}
name: development
- cluster:
insecure-skip-tls-verify: true
server: https://{{ ip_of_your_server }}
name: scratch
contexts:
- context:
cluster: development
namespace: frontend
user: developer
name: dev-frontend
- context:
cluster: development
namespace: storage
user: developer
name: dev-storage
- context:
cluster: scratch
namespace: default
user: experimenter
name: exp-scratch
current-context: ""
kind: Config
preferences: {}
users:
- name: developer
user:
client-certificate: fake-cert-file
client-key: fake-key-seefile
- name: experimenter
user:
password: some-password
username: exp
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR NumCPU]: the number of available CPUs 1 is less than the required 2
Because I ran a DNS server on port 53 before installation, and it prevent to run CoreDNS on the port.
The dashboard aren’t running eventhough I follow this manual.
$ kubectl describe pod/coredns-f9fd979d6-tptp7 -n kube-system
...
Warning FailedCreatePodSandBox 4m50s (x915 over 64m) kubelet, myhost.com (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "e35ec7621189a105239aa646442f96f95c6a15e379b150da8e4ca1d9865ac264" network for pod "coredns-f9fd979d6-tptp7": networkPlugin cni failed to set up pod "coredns-f9fd979d6-tptp7_kube-system" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/
Kill the Pod doesn’t work.
$ kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6dfcd885bf-qm2hj 0/1 ContainerCreating 0 55m
calico-node-44bk4 0/1 CrashLoopBackOff 17 55m
coredns-f9fd979d6-tptp7 0/1 ContainerCreating 0 57m
coredns-f9fd979d6-wr5vc 0/1 ContainerCreating 0 57m
etcd-myhost.com 1/1 Running 0 57m
kube-apiserver-myhost.com 1/1 Running 0 57m
kube-controller-myhost.com 1/1 Running 0 57m
kube-proxy-nwxcc 1/1 Running 0 57m
kube-scheduler-myhost.com 1/1 Running 0 57m
$ kubectl delete pod/calico-node-44bk4 -n kube-system
pod "calico-node-44bk4" deleted
$ kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6dfcd885bf-qm2hj 0/1 ContainerCreating 0 60m
calico-node-7hwxs 0/1 Running 3 3m53s
coredns-f9fd979d6-tptp7 0/1 ContainerCreating 0 61m
coredns-f9fd979d6-wr5vc 0/1 ContainerCreating 0 61m
etcd-myhost.com 1/1 Running 0 61m
kube-apiserver-myhost.com 1/1 Running 0 61m
kube-controller-manager-myhost.com 1/1 Running 0 61m
kube-proxy-nwxcc 1/1 Running 0 61m
kube-scheduler-myhost.com 1/1 Running 0 61m
$ kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6dfcd885bf-qm2hj 0/1 ContainerCreating 0 74m
calico-node-7hwxs 0/1 CrashLoopBackOff 7 18m
coredns-f9fd979d6-tptp7 0/1 ContainerCreating 0 76m
coredns-f9fd979d6-wr5vc 0/1 ContainerCreating 0 76m
etcd-myhost.com 1/1 Running 0 76m
kube-apiserver-myhost.com 1/1 Running 0 76m
kube-controller-manager-myhost.com 1/1 Running 0 76m
kube-proxy-nwxcc 1/1 Running 0 76m
kube-scheduler-myhost.com 1/1 Running 0 76m