Skip to content

Files

Latest commit

dff5425 · Aug 31, 2023

History

History
575 lines (447 loc) · 17.2 KB

01.md

File metadata and controls

575 lines (447 loc) · 17.2 KB

---===---

  1. K8s Core Concepts/useful-links.md
K8s Components
K8s Architecture
kubectl and K8s configuration

---===--- 02. Install Cluster/commands.md

Provision Infrastructure

move private key to .ssh folder and restrict access
mv ~/Downloads/k8s-node.pem ~/.ssh
chmod 400 ~/.ssh/k8s-node.pem
ssh into ec2 instance with its public ip
ssh -i ~/.ssh/k8s-node.pem [email protected]

Configure Infrastructure

sudo swapoff -a
set host names of nodes
sudo vim /etc/hosts
get priavate ips of each node and add this to each server
45.14.48.178 master
45.14.48.179 worker1
45.14.48.180 worker2
we can now use these names instead of typing the IPs, when nodes talk to each other. After that, assign a hostname to each of these servers.
on master server
sudo hostnamectl set-hostname master
on worker1 server
sudo hostnamectl set-hostname worker1
on worker2 server
sudo hostnamectl set-hostname worker2

Initialize K8s cluster

sudo kubeadm init

Check kubelet process running

service kubelet status
systemctl status kubelet

Check extended logs of kubelet service

journalctl -u kubelet

Access cluster as admin

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Kubectl commands

get node information
kubectl get node
get pods in kube-system namespace
kubectl get pod -n kube-system
get pods from all namespaces
kubectl get pod -A
get wide output
kubectl get pod -n kube-system -o wide

Install pod network plugin

download and install the manifest
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml

Link to the Weave-net installation guide

check weave net status
kubectl exec -n kube-system weave-net-1jkl6 -c weave -- /home/weave/weave --local status

Join worker nodes

create and execute script
vim install-containerd.sh
chmod u+x install-containerd.sh
./install-containerd.sh
on master
kubeadm token create --help
kubeadm token create --print-join-command
copy the output command and execute on worker node as ROOT
sudo kubeadm join 172.31.43.99:6443 --token 9bds1l.3g9ypte9gf69b5ft --discovery-token-ca-cert-hash sha256:xxxx
start a test pod
kubectl run test --image=nginx

---===--- 02. Install Cluster/install-containerd.sh

#!/bin/bash

Install and configure prerequisites

load the necessary modules for Containerd

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf overlay br_netfilter EOF

sudo modprobe overlay sudo modprobe br_netfilter

sysctl params required by setup, params persist across reboots

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.ipv4.ip_forward = 1 EOF

Apply sysctl params without reboot

sudo sysctl --system

Install containerd

sudo apt-get update sudo apt-get -y install containerd

Configure containerd with defaults and restart with this config

sudo mkdir -p /etc/containerd containerd config default | sudo tee /etc/containerd/config.toml sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml sudo systemctl restart containerd ---===--- 02. Install Cluster/install-k8s-components.sh

Install packages needed to use the Kubernetes apt repository

sudo apt-get update sudo apt-get install -y apt-transport-https ca-certificates curl

Download the Google Cloud public signing key

sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg

Add the Kubernetes apt repository

echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list

Install kubelet, kubeadm & kubectl, and pin their versions

sudo apt-get update

check available kubeadm versions (when manually executing)

apt-cache madison kubeadm

Install version 1.21.0 for all components

sudo apt-get install -y kubelet=1.21.0-00 kubeadm=1.21.0-00 kubectl=1.21.0-00 sudo apt-mark hold kubelet kubeadm kubectl

apt-mark hold prevents package from being automatically upgraded or removed---===---

  1. Install Cluster/useful-links.md
provision infrastructure on aws
k8s installation
configure Infrastructure
container runtime
kubeadm
kubeconfig
kube-system namespace
networking in K8s - CNI
join worker nodes

---===--- 03. Deploy Application/commands.md

Kubectl commands

apply manifests
kubectl apply -f nginx-deployment.yaml
kubectl apply -f nginx-service.yaml
labels
kubectl get svc
kubectl describe svc {svc-name}
kubectl get ep

kubectl get svc --show-labels
kubectl get svc -l app=nginx

kubectl get pod --show-labels
kubectl get pod -l app=nginx
kubectl logs -l app=nginx

kubectl get pod -n kube-system --show-labels
kubectl logs -n kube-system -l="name=weave-net" -c weave

kubectl get node —show-labels
scaling deployments
kubectl scale --help
kubectl scale deployment {depl-name} --replicas 4
kubectl scale deployment {depl-name} --replicas 3

kubectl scale deployment {depl-name} --replicas 5 --record
kubectl rollout history deployment {depl-name}
create pods
kubectl run test-nginx-service --image=busybox
kubectl exec -it {pod-name} -- bash

---===--- 03. Deploy Application/nginx-deployment.yaml

apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 2 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:1.20

---===--- 03. Deploy Application/nginx-service.yaml

apiVersion: v1 kind: Service metadata: name: nginx-service labels: app: nginx svc: test-nginx spec: selector: app: nginx ports: - protocol: TCP port: 8080 targetPort: 80 ---===--- 03. Deploy Application/useful-links.md

labels
DNS in K8s
service IP address
working with kubectl

---===--- 04. External Access/ingress.yaml

apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: my-app-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: / kubernetes.io/ingress.class: "nginx" spec: rules:

  • host: your-load-balancer-domain-name http: paths:
    • backend: service: name: nginx-service port: number: 8080 path: /my-app pathType: Exact ---===---
  1. External Access/load-balancer-svc.yaml

apiVersion: v1 kind: Service metadata: name: nginx-service labels: app: nginx spec: type: LoadBalancer selector: app: nginx ports: - protocol: TCP port: 8080 targetPort: 80 nodePort: 30000 ---===--- 04. External Access/node-port-svc.yaml

apiVersion: v1 kind: Service metadata: name: nginx-service labels: app: nginx spec: type: NodePort selector: app: nginx ports: - protocol: TCP port: 8080 targetPort: 80 nodePort: 30000 ---===--- 04. External Access/README.md

If you get an error on creating ingress component related to "nginx-controller-admission" webhook, than manually delete the ValidationWebhook and try again. To delete the ValidationWebhook:

kubectl get ValidatingWebhookConfiguration
kubectl delete ValidatingWebhookConfiguration {name}

Link to a more detailed description of the issue ---===--- 04. External Access/useful-links.md

service types
ingress
helm
install nginx-ingress-controller with helm
  1. Users and Permissions/cicd-binding.yaml

apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: cicd-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: cicd-role subjects:

  • kind: ServiceAccount name: jenkins namespace: default ---===---
  1. Users and Permissions/cicd-role.yaml

apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: cicd-role rules:

  • apiGroups:
    • "" resources:
    • services verbs:
    • create
    • update
    • list
  • apiGroups:
    • apps resources:
    • deployments verbs:
    • create
    • update
    • list ---===---
  1. Users and Permissions/commands.md

Create client certificate

create 2048-bit RSA key
openssl genrsa -out dev-tom.key 2048
create user Certificate Signing Request
openssl req -new -key dev-tom.key -subj "/CN=tom" -out dev-tom.csr
get the base64 encoded value of the csr file
cat dev-tom.csr | base64 | tr -d "\n"
create CSR in k8s
kubectl apply -f dev-tom-csr.yaml
review CSR
kubectl get csr
approve CSR
kubectl certificate approve dev-tom
get dev-tom's signed certificate
kubectl get csr dev-tom -o yaml
save decoded cert to dev-tom.crt file
connect to cluster as dev-tom user
kubectl --server={api-server-address} \
--certificate-authority=/etc/kubernetes/pki/ca.crt \
--client-certificate=dev-tom.crt \
--client-key=dev-tom.key \
get pods
create cluster role & binding
kubectl create clusterrole dev-cr --dry-run=client -o yaml > dev-cr.yaml
kubectl create clusterrolebinding dev-crb --dry-run=client -o yaml > dev-crb.yaml
check user permissions as dev-tom
kubectl auth can-i get pod
check user permissions as admin
kubectl auth can-i get pod —-as {user-name}

Create Service Account with Permissions

kubectl create serviceaccount jenkins-sa --dry-run=client -o yaml > jenkins-sa.yaml

kubectl create role cicd-role

kubectl create clusterrolebinding cicd-binding \
--clusterrole=cicd-role \
--serviceaccount=default:jenkins

Access with service account token

kubectl options

kubectl --server $server \
--certificate-authority /etc/kubernetes/pki/ca.crt \
--token $token \
--user jenkins \
get pods

---===--- 05. Users and Permissions/dev-crb.yaml

apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: dev-crb roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: dev-cr subjects:

  • apiGroup: rbac.authorization.k8s.io kind: User name: tom ---===---
  1. Users and Permissions/dev-cr.yaml

apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: dev-cr rules:

  • apiGroups:
    • "" resources:
    • pods
    • services verbs: ["*"]
  • apiGroups:
    • apps resources:
    • deployments
    • statefulSets verbs:
    • get
    • list
    • create ---===---
  1. Users and Permissions/dev-tom-csr.yaml

apiVersion: certificates.k8s.io/v1 kind: CertificateSigningRequest metadata: name: dev-tom spec: request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZqQ0NBVDRDQVFBd0VURVBNQTBHQTFVRUF3d0dZVzVuWld4aE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRgpBQU9DQVE4QU1JSUJDZ0tDQVFFQTByczhJTHRHdTYxakx2dHhWTTJSVlRWMDNHWlJTWWw0dWluVWo4RElaWjBOCnR2MUZtRVFSd3VoaUZsOFEzcWl0Qm0wMUFSMkNJVXBGd2ZzSjZ4MXF3ckJzVkhZbGlBNVhwRVpZM3ExcGswSDQKM3Z3aGJlK1o2MVNrVHF5SVBYUUwrTWM5T1Nsbm0xb0R2N0NtSkZNMUlMRVI3QTVGZnZKOEdFRjJ6dHBoaUlFMwpub1dtdHNZb3JuT2wzc2lHQ2ZGZzR4Zmd4eW8ybmlneFNVekl1bXNnVm9PM2ttT0x1RVF6cXpkakJ3TFJXbWlECklmMXBMWnoyalVnald4UkhCM1gyWnVVV1d1T09PZnpXM01LaE8ybHEvZi9DdS8wYk83c0x0MCt3U2ZMSU91TFcKcW90blZtRmxMMytqTy82WDNDKzBERHk5aUtwbXJjVDBnWGZLemE1dHJRSURBUUFCb0FBd0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBR05WdmVIOGR4ZzNvK21VeVRkbmFjVmQ1N24zSkExdnZEU1JWREkyQTZ1eXN3ZFp1L1BVCkkwZXpZWFV0RVNnSk1IRmQycVVNMjNuNVJsSXJ3R0xuUXFISUh5VStWWHhsdnZsRnpNOVpEWllSTmU3QlJvYXgKQVlEdUI5STZXT3FYbkFvczFqRmxNUG5NbFpqdU5kSGxpT1BjTU1oNndLaTZzZFhpVStHYTJ2RUVLY01jSVUyRgpvU2djUWdMYTk0aEpacGk3ZnNMdm1OQUxoT045UHdNMGM1dVJVejV4T0dGMUtCbWRSeEgvbUNOS2JKYjFRQm1HCkkwYitEUEdaTktXTU0xMzhIQXdoV0tkNjVoVHdYOWl4V3ZHMkh4TG1WQzg0L1BHT0tWQW9FNkpsYWFHdTlQVmkKdjlOSjVaZlZrcXdCd0hKbzZXdk9xVlA3SVFjZmg3d0drWm89Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo= signerName: kubernetes.io/kube-apiserver-client

expirationSeconds: 8640000

usages:

  • client auth---===---
  1. Users and Permissions/jenkins-sa.yaml

apiVersion: v1 kind: ServiceAccount metadata: name: jenkins ---===--- 05. Users and Permissions/useful-links.md

RBAC
certificates API
API groups
service account