k8s-2. 使用

338 阅读2分钟

1. command

# 查看集群配置
kubectl config view
# 代理访问
nohup kubectl proxy --address=0.0.0.0 -p 8080  --accept-hosts='^*$' </dev/null >/dev/null 2>&1 &
nohup kubectl port-forward --namespace logging es-elasticsearch-master-0 9300:9300 --address 0.0.0.0 </dev/null >/dev/null 2>&1 &
#设置API server接收所有主机的请求:
kubectl proxy --address='0.0.0.0' --accept-hosts='^*$' --port=8009
# k8s获得角色权限
kubectl get rolebindings,clusterrolebindings --all-namespaces -o custom-columns='KIND:kind,NAMESPACE:metadata.namespace,NAME:metadata.name,SERVICE_ACCOUNTS:subjects[?(@.kind=="ServiceAccount")].name'

## exec
kubectl exec -it -n kuai-app ambassador-5555d7d565-2988b -- cat /etc/resolv.conf  
kubectl exec -it -n kuai-app ambassador-5555d7d565-2988b -- curl -k https://10.43.0.1

# k8s查看pod按照时间排序
kubectl get pods -n model-deployment --sort-by=.metadata.creationTimestamp
# k8s查看pod按照镜像过滤
kubectl get pods -n spark002 -o=jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' | sort

# 获取pod name id
kubectl get pods --all-namespaces -o go-template='{{range .items}}{{.metadata.uid}}    {{.metadata.name}}{{"\n"}}{{end}}'
# 查看错误日志
kubectl get po -n spark001 |grep spark-shell |awk '{print $1}'|tail -n 1|xargs kubectl logs -n spark001
# 批量查看日志
for p in $(kubectl get po -n model-deployment |grep kuai-websocket-|awk '{print $1}'); do kubectl logs $p --since=1h -n model-deployment; done
# k8s查看namespace资源限制及使用情况
kubectl get quota -n spark000 --output=yaml quota
# k8s查看node列表显示标签
kubectl get nodes --show-labels
# k8s获得secret解码文本
kubectl get --namespace=monitoring secret grafana-datasources -o yaml
kubectl get secret regcred --output="jsonpath={.data.\.dockerconfigjson}" | base64 --decode
echo "encode" |base64 -d

#修改pod镜像
kubectl patch pod xxx -n kubeflow -p '{"spec":{"containers":[{"name":"xxx","image":"xxx"}]}}'
kubectl scale deployment.v1.apps/nginx-deployment --replicas=10


# 批量删除
kubectl get pods -n spark-prod| grep Error | cut -d' ' -f 1  | xargs kubectl delete pod -n spark-prod
# 强制删除
kubectl delete pod <PODNAME> --grace-period=0 --force --namespace <NAMESPACE>
kubectl patch pvc xxx -n kubeflow -p '{"metadata":{"finalizers": []}}' --type=merge
kubectl patch pv <pv_name> -p '{"metadata": {"finalizers": null}}'
kubectl delete pv <pv_name> --grace-period=0 --force
# 清理k8s
for i in $(/usr/bin/systemctl list-unit-files --no-legend --no-pager -l | grep --color=never -o .*.slice | grep kubepod);do systemctl stop $i;done


# k8s命令提示
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
# helm 
helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com
helm install incubator/etcd --version 0.6.3
helm fetch incubator/etcd --version 0.6.3

create

create pod

cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
  labels:
    app: busybox
spec:
  containers:
  - image: harbor01.io/kuai/busybox:latest
    command:
      - sleep
      - "3600000"
    imagePullPolicy: IfNotPresent
    name: busybox
  restartPolicy: Always
EOF

create pv

cat <<EOF | kubectl apply -f -
kind: PersistentVolume
apiVersion: v1
metadata:
  name: hostpath2
  labels:
    type: local
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  reclaimPolicy:
    - Recycle
  hostPath:
    path: "/tmp/data1"
EOF

2. create serviceacount kubeconfig

# your server name goes here
server=https://localhost:8443
# the name of the secret containing the service account token goes here
name=default-token-sg96k

ca=$(kubectl get secret/$name -o jsonpath='{.data.ca\.crt}')
token=$(kubectl get secret/$name -o jsonpath='{.data.token}' | base64 --decode)
namespace=$(kubectl get secret/$name -o jsonpath='{.data.namespace}' | base64 --decode)

echo "
apiVersion: v1
kind: Config
clusters:
- name: default-cluster
  cluster:
    certificate-authority-data: ${ca}
    server: ${server}
contexts:
- name: default-context
  context:
    cluster: default-cluster
    namespace: default
    user: default-user
current-context: default-context
users:
- name: default-user
  user:
    token: ${token}
" > sa.kubeconfig

3. API开发

  1. 反向代理访问ApiServer
kubectl proxy --address='0.0.0.0' --port=8002 --accept-hosts='.*'
  1. 客户端库 k8smeetup.github.io/docs/refere…

  2. 使用api访问集群

APISERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
TOKEN=$(kubectl get secret $(kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode )
# 无校验证书模式
$ curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
{
  "kind": "APIVersions",
  "versions": [
    "v1"
  ],
  "serverAddressByClientCIDRs": [
    {
      "clientCIDR": "0.0.0.0/0",
      "serverAddress": "10.0.1.149:443"
    }
  ]
}
# 校验证书:
curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --cacert /etc/kubernetes/pki/ca.crt        
  1. admin api token 建立service account :k8s-admin kubectl apply -f k8s-kube-admin.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: k8s-admin
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
subjects:
  - kind: ServiceAccount
    name: kuai-admin
    namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: k8s-admin
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile

k8s_admin_secret_name=kubectl get secret -n kube-system |grep k8s-admin|awk '{ print $1 }'

k8s_kube_admin_token=kubectl get secret -o jsonpath={.data.token} -n kube-system $k8s_admin_secret_name |base64 -d