โŽˆ

Kubernetes

kubectl commands and YAML patterns for pods, deployments, services, networking, RBAC and cluster operations

Context & Cluster

Switch clusters, set defaults and inspect nodes

bashยทList all contexts
kubectl config get-contexts
bashยทSwitch context
kubectl config use-context <context>
bashยทShow current context
kubectl config current-context
bashยทSet default namespace for context
kubectl config set-context --current --namespace=<namespace>
bashยทList nodes
kubectl get nodes -o wide
bashยทDescribe a node
kubectl describe node <node>
bashยทShow node resource usage
kubectl top nodes
bashยทCordon node (stop scheduling)
kubectl cordon <node>
bashยทDrain node for maintenance
kubectl drain <node> --ignore-daemonsets --delete-emptydir-data
bashยทUncordon node
kubectl uncordon <node>
bashยทGet cluster info
kubectl cluster-info
bashยทCheck API server version
kubectl version --short
bashยทList all API resources
kubectl api-resources

Namespaces

Create and manage namespaces

bashยทList namespaces
kubectl get namespaces
bashยทCreate namespace
kubectl create namespace staging
bashยทDelete namespace (and all its resources)
kubectl delete namespace staging
bashยทList all resources in a namespace
kubectl get all -n <namespace>
bashยทList resources across all namespaces
kubectl get pods -A
yamlยทNamespace manifest
apiVersion: v1
kind: Namespace
metadata:
  name: staging
  labels:
    env: staging
    team: platform

Pods

Run, inspect, debug and delete pods

bashยทList pods
kubectl get pods -n <namespace>
bashยทList pods with node and IP
kubectl get pods -n <namespace> -o wide
bashยทWatch pods in real time
kubectl get pods -n <namespace> -w
bashยทDescribe pod
kubectl describe pod <pod> -n <namespace>
bashยทStream pod logs
kubectl logs -f <pod> -n <namespace>
bashยทLogs for specific container in pod
kubectl logs -f <pod> -c <container> -n <namespace>
bashยทPrevious container logs (after crash)
kubectl logs <pod> -n <namespace> --previous
bashยทExec into pod
kubectl exec -it <pod> -n <namespace> -- /bin/sh
bashยทRun ephemeral debug container
kubectl debug -it <pod> -n <namespace> --image=busybox --target=<container>
bashยทRun throwaway debug pod
kubectl run debug --image=busybox --rm -it --restart=Never -- /bin/sh
bashยทCopy file from pod
kubectl cp <namespace>/<pod>:/path/file ./file
bashยทCopy file into pod
kubectl cp ./file <namespace>/<pod>:/path/file
bashยทDelete pod
kubectl delete pod <pod> -n <namespace>
bashยทForce delete stuck terminating pod
kubectl delete pod <pod> -n <namespace> --grace-period=0 --force
bashยทShow pod resource usage
kubectl top pods -n <namespace> --sort-by=cpu

Deployments

Deploy, scale, update and roll back applications

bashยทList deployments
kubectl get deployments -n <namespace>
bashยทDescribe deployment
kubectl describe deployment <name> -n <namespace>
bashยทApply manifest
kubectl apply -f deployment.yaml
bashยทScale deployment
kubectl scale deployment <name> --replicas=5 -n <namespace>
bashยทUpdate image (rolling update)
kubectl set image deployment/<name> <container>=myrepo/myapp:2.0.0 -n <namespace>
bashยทForce rolling restart
kubectl rollout restart deployment/<name> -n <namespace>
bashยทCheck rollout status
kubectl rollout status deployment/<name> -n <namespace>
bashยทView rollout history
kubectl rollout history deployment/<name> -n <namespace>
bashยทRoll back to previous revision
kubectl rollout undo deployment/<name> -n <namespace>
bashยทRoll back to specific revision
kubectl rollout undo deployment/<name> --to-revision=3 -n <namespace>
bashยทPatch deployment inline
kubectl patch deployment <name> -n <namespace> -p '{"spec":{"replicas":3}}'
yamlยทDeployment manifest with probes and limits
apiVersion: apps/v1
kind: Deployment
metadata:
  name: api
  namespace: production
  labels:
    app: api
spec:
  replicas: 3
  selector:
    matchLabels:
      app: api
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
  template:
    metadata:
      labels:
        app: api
    spec:
      containers:
        - name: api
          image: myrepo/api:1.0.0
          ports:
            - containerPort: 3000
          env:
            - name: NODE_ENV
              value: production
            - name: DB_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: db-secret
                  key: password
          resources:
            requests:
              cpu: 100m
              memory: 128Mi
            limits:
              cpu: 500m
              memory: 512Mi
          livenessProbe:
            httpGet:
              path: /health
              port: 3000
            initialDelaySeconds: 15
            periodSeconds: 20
          readinessProbe:
            httpGet:
              path: /ready
              port: 3000
            initialDelaySeconds: 5
            periodSeconds: 10
      topologySpreadConstraints:
        - maxSkew: 1
          topologyKey: kubernetes.io/hostname
          whenUnsatisfiable: DoNotSchedule
          labelSelector:
            matchLabels:
              app: api

Services & Ingress

Expose workloads internally and externally

bashยทList services
kubectl get svc -n <namespace>
bashยทExpose deployment as ClusterIP
kubectl expose deployment <name> --port=80 --target-port=3000 -n <namespace>
bashยทPort-forward to service
kubectl port-forward svc/<name> 8080:80 -n <namespace>
bashยทPort-forward to pod
kubectl port-forward pod/<pod> 8080:3000 -n <namespace>
bashยทList ingresses
kubectl get ingress -A
bashยทDescribe ingress
kubectl describe ingress <name> -n <namespace>
yamlยทClusterIP + Ingress with TLS
apiVersion: v1
kind: Service
metadata:
  name: api
  namespace: production
spec:
  selector:
    app: api
  ports:
    - port: 80
      targetPort: 3000
  type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: api
  namespace: production
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
    cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
  ingressClassName: nginx
  tls:
    - hosts:
        - api.example.com
      secretName: api-tls
  rules:
    - host: api.example.com
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: api
                port:
                  number: 80

ConfigMaps & Secrets

Inject configuration and sensitive data into workloads

bashยทCreate ConfigMap from literals
kubectl create configmap app-config --from-literal=LOG_LEVEL=info --from-literal=PORT=3000 -n <namespace>
bashยทCreate ConfigMap from file
kubectl create configmap nginx-config --from-file=nginx.conf -n <namespace>
bashยทCreate Secret from literals
kubectl create secret generic db-secret --from-literal=password=mysecret -n <namespace>
bashยทCreate Secret from .env file
kubectl create secret generic app-secrets --from-env-file=.env -n <namespace>
bashยทCreate TLS secret from cert and key
kubectl create secret tls api-tls --cert=tls.crt --key=tls.key -n <namespace>
bashยทDecode a secret value
kubectl get secret db-secret -n <namespace> -o jsonpath='{.data.password}' | base64 -d
bashยทList all secrets
kubectl get secrets -n <namespace>
yamlยทConfigMap mounted as env vars and volume
apiVersion: v1
kind: ConfigMap
metadata:
  name: app-config
  namespace: production
data:
  LOG_LEVEL: info
  PORT: "3000"
  config.yaml: |
    server:
      timeout: 30s
    cache:
      ttl: 300
---
# In a Pod spec:
spec:
  containers:
    - name: api
      envFrom:
        - configMapRef:
            name: app-config       # all keys as env vars
      env:
        - name: LOG_LEVEL          # single key
          valueFrom:
            configMapKeyRef:
              name: app-config
              key: LOG_LEVEL
      volumeMounts:
        - name: config-vol
          mountPath: /etc/config
  volumes:
    - name: config-vol
      configMap:
        name: app-config

Autoscaling

HPA, VPA and cluster autoscaler patterns

bashยทCreate HPA (CPU-based)
kubectl autoscale deployment <name> --cpu-percent=70 --min=2 --max=20 -n <namespace>
bashยทList HPAs
kubectl get hpa -n <namespace>
bashยทDescribe HPA (see current metrics)
kubectl describe hpa <name> -n <namespace>
bashยทDelete HPA
kubectl delete hpa <name> -n <namespace>
yamlยทHPA with CPU and memory metrics
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: api
  namespace: production
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: api
  minReplicas: 2
  maxReplicas: 20
  metrics:
    - type: Resource
      resource:
        name: cpu
        target:
          type: Utilization
          averageUtilization: 70
    - type: Resource
      resource:
        name: memory
        target:
          type: Utilization
          averageUtilization: 80
  behavior:
    scaleDown:
      stabilizationWindowSeconds: 300
      policies:
        - type: Pods
          value: 2
          periodSeconds: 60
    scaleUp:
      stabilizationWindowSeconds: 0
      policies:
        - type: Pods
          value: 4
          periodSeconds: 30
yamlยทPodDisruptionBudget
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: api-pdb
  namespace: production
spec:
  minAvailable: 2      # or use maxUnavailable: 1
  selector:
    matchLabels:
      app: api

RBAC

Roles, ClusterRoles, bindings and service accounts

bashยทList roles in namespace
kubectl get roles -n <namespace>
bashยทList cluster roles
kubectl get clusterroles
bashยทList role bindings
kubectl get rolebindings -n <namespace>
bashยทCreate service account
kubectl create serviceaccount <name> -n <namespace>
bashยทCheck what a user can do
kubectl auth can-i --list --as=<user>
bashยทCheck specific permission
kubectl auth can-i create pods --as=<user> -n <namespace>
yamlยทRole + RoleBinding for a service account
apiVersion: v1
kind: ServiceAccount
metadata:
  name: app-sa
  namespace: production
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: app-role
  namespace: production
rules:
  - apiGroups: [""]
    resources: ["pods", "pods/log"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["apps"]
    resources: ["deployments"]
    verbs: ["get", "list", "watch", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: app-role-binding
  namespace: production
subjects:
  - kind: ServiceAccount
    name: app-sa
    namespace: production
roleRef:
  kind: Role
  apiGroup: rbac.authorization.k8s.io
  name: app-role

Jobs & CronJobs

Run one-off and scheduled batch workloads

bashยทList jobs
kubectl get jobs -n <namespace>
bashยทList cronjobs
kubectl get cronjobs -n <namespace>
bashยทTrigger CronJob manually
kubectl create job --from=cronjob/<name> <job-name> -n <namespace>
bashยทSuspend a CronJob
kubectl patch cronjob <name> -p '{"spec":{"suspend":true}}' -n <namespace>
bashยทDelete completed jobs
kubectl delete jobs --field-selector status.successful=1 -n <namespace>
yamlยทJob manifest
apiVersion: batch/v1
kind: Job
metadata:
  name: db-migrate
  namespace: production
spec:
  backoffLimit: 3
  ttlSecondsAfterFinished: 600
  template:
    spec:
      restartPolicy: OnFailure
      containers:
        - name: migrate
          image: myrepo/api:1.0.0
          command: ["python", "manage.py", "migrate"]
          envFrom:
            - secretRef:
                name: app-secrets
yamlยทCronJob manifest
apiVersion: batch/v1
kind: CronJob
metadata:
  name: cleanup
  namespace: production
spec:
  schedule: "0 2 * * *"          # daily at 02:00 UTC
  concurrencyPolicy: Forbid       # don't overlap runs
  successfulJobsHistoryLimit: 3
  failedJobsHistoryLimit: 1
  jobTemplate:
    spec:
      backoffLimit: 2
      template:
        spec:
          restartPolicy: OnFailure
          containers:
            - name: cleanup
              image: myrepo/worker:latest
              command: ["python", "-m", "tasks.cleanup"]

Debugging & Events

Diagnose failures with events, jsonpath and explain

bashยทList events sorted by time
kubectl get events -n <namespace> --sort-by=.lastTimestamp
bashยทList Warning events cluster-wide
kubectl get events -A --field-selector type=Warning
bashยทExplain a resource field
kubectl explain deployment.spec.strategy
bashยทGet resource as YAML
kubectl get deployment <name> -n <namespace> -o yaml
bashยทJSONPath query โ€” get image names
kubectl get pods -n <namespace> -o jsonpath='{.items[*].spec.containers[*].image}'
bashยทCustom columns output
kubectl get pods -n <namespace> -o custom-columns='NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName'
bashยทList pods in CrashLoopBackOff
kubectl get pods -A | grep CrashLoopBackOff
bashยทList pods not Running
kubectl get pods -A --field-selector=status.phase!=Running
bashยทDiff live state vs local manifest
kubectl diff -f deployment.yaml
bashยทDry-run apply (validate manifest)
kubectl apply -f deployment.yaml --dry-run=server
bashยทLabel a resource
kubectl label pod <pod> -n <namespace> env=debug
bashยทAnnotate a resource
kubectl annotate deployment <name> -n <namespace> kubernetes.io/change-cause='bump to v2'