@TOC
1. 资源类型与别名
资源类型 |
缩写别名 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2. 资源操作
其中关于 kubectl 的命令,不同 k8s 版本,可能会存在命令被废弃或者不支持的情况;
- 使用已被废弃的命令,k8s 会提示:
Flag --record has been deprecated, --record will be removed in the future
; - 在旧版本上使用新版本才有的命令会提示:
Error: unknown flag: --include-uninitialized
See 'kubectl get --help' for usage.
2.1 创建对象
$ kubectl create -f ./my-manifest.yaml # 创建资源
$ kubectl create -f ./my1.yaml -f ./my2.yaml # 使用多个文件创建资源
$ kubectl create -f ./dir # 使用目录下的所有清单文件来创建资源
$ kubectl create -f https://git.io/vPieo # 使用 url 来创建资源
$ kubectl run nginx --image=nginx # 启动一个 nginx 实例
$ kubectl explain pods,svc # 获取 pod 和 svc 的文档
# 从 stdin 输入中创建多个 YAML 对象
$ cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Namespace
metadata:
name: my-namespace-1
---
apiVersion: v1
kind: Namespace
metadata:
name: my-namespace-2
EOF
2.2 显示和查找资源
# Get commands with basic output
$ kubectl get services # 列出所有 namespace 中的所有 service
$ kubectl get pods --all-namespaces # 列出所有 namespace 中的所有 pod
$ kubectl get pods -o wide # 列出所有 pod 并显示详细信息
$ kubectl get deployment my-dep # 列出指定 deployment
$ kubectl get pods --include-uninitialized # 列出该 namespace 中的所有 pod 包括未初始化的
# 使用详细输出来描述命令
$ kubectl describe nodes my-node
$ kubectl describe pods my-pod
$ kubectl get services --sort-by=.metadata.name # List Services Sorted by Name
# 根据重启次数排序列出 pod
$ kubectl get pods --sort-by='.status.containerStatuses[0].restartCount'
# 获取所有具有 app=cassandra 的 pod 中的 version 标签
$ kubectl get pods --selector=app=cassandra rc -o \
jsonpath='{.items[*].metadata.labels.version}'
# 获取所有节点的 ExternalIP
$ kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="ExternalIP")].address}'
# 列出属于某个 PC 的 Pod 的名字
# “jq”命令用于转换复杂的 jsonpath,参考 https://stedolan.github.io/jq/
$ sel=${$(kubectl get rc my-rc --output=json | jq -j '.spec.selector | to_entries | .[] | "\(.key)=\(.value),"')%?}
$ echo $(kubectl get pods --selector=$sel --output=jsonpath={.items..metadata.name})
# 查看哪些节点已就绪
$ JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' \
&& kubectl get nodes -o jsonpath="$JSONPATH" | grep "Ready=True"
# 列出当前 Pod 中使用的 Secret
$ kubectl get pods -o json | jq '.items[].spec.containers[].env[]?.valueFrom.secretKeyRef.name' | grep -v null | sort | uniq
2.3 更新资源
$ kubectl rolling-update frontend-v1 -f frontend-v2.json # 滚动更新 pod frontend-v1
$ kubectl rolling-update frontend-v1 frontend-v2 --image=image:v2 # 更新资源名称并更新镜像
$ kubectl rolling-update frontend --image=image:v2 # 更新 frontend pod 中的镜像
$ kubectl rolling-update frontend-v1 frontend-v2 --rollback # 退出已存在的进行中的滚动更新
$ cat pod.json | kubectl replace -f - # 基于 stdin 输入的 JSON 替换 pod
# 强制替换,删除后重新创建资源。会导致服务中断。
$ kubectl replace --force -f ./pod.json
# 为 nginx RC 创建服务,启用本地 80 端口连接到容器上的 8000 端口
$ kubectl expose rc nginx --port=80 --target-port=8000
# 更新单容器 pod 的镜像版本(tag)到 v4
$ kubectl get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | kubectl replace -f -
$ kubectl label pods my-pod new-label=awesome # 添加标签
$ kubectl annotate pods my-pod icon-url=http://goo.gl/XXBTWq # 添加注解
$ kubectl autoscale deployment foo --min=2 --max=10 # 自动扩展 deployment “foo”
2.4 修补资源
$ kubectl patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' # 部分更新节点
# 更新容器镜像; spec.containers[*].name 是必须的,因为这是合并的关键字
$ kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}'
# 使用具有位置数组的 json 补丁更新容器镜像
$ kubectl patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]'
# 使用具有位置数组的 json 补丁禁用 deployment 的 livenessProbe
$ kubectl patch deployment valid-deployment --type json -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/livenessProbe"}]'
2.5 编辑资源
$ kubectl edit svc/docker-registry # 编辑名为 docker-registry 的 service
$ KUBE_EDITOR="nano" kubectl edit svc/docker-registry # 使用其它编辑器
2.6 scale 资源
$ kubectl scale --replicas=3 rs/foo # Scale a replicaset named 'foo' to 3
$ kubectl scale --replicas=3 -f foo.yaml # Scale a resource specified in "foo.yaml" to 3
$ kubectl scale --current-replicas=2 --replicas=3 deployment/mysql # If the deployment named mysql's current size is 2, scale mysql to 3
$ kubectl scale --replicas=5 rc/foo rc/bar rc/baz # Scale multiple replication controllers
在使用 scale 命令的时候需注意一点,scale 命令不支持对po进行操作,因为 po 属于最小的执行单元了,如果想对某个 pod 资源进行扩容,可操作 pod 对应的 deployment。
[root@docker-54 resource]# kubectl get po
NAME READY STATUS RESTARTS AGE
httpd-deployment-59fc85cfcd-hg9p4 1/1 Running 0 6d2h
nfs-test-pd1 1/1 Running 0 22h
nfs-test-pd2 1/1 Running 0 22h
nginx-deployment2-dcbf9ff88-v2pzj 1/1 Running 0 6d2h
[root@docker-54 resource]#
[root@docker-54 resource]# kubectl scale po --replicas=3 nginx-deployment2-dcbf9ff88-v2pzj
Error from server (NotFound): the server could not find the requested resource
[root@docker-54 resource]#
[root@docker-54 resource]# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
httpd-deployment 1/1 1 1 93d
nginx-deployment2 1/1 1 1 91d
[root@docker-54 resource]#
[root@docker-54 resource]# kubectl scale deploy --replicas=2 nginx-deployment2
deployment.apps/nginx-deployment2 scaled
[root@docker-54 resource]#
[root@docker-54 resource]# kubectl get po
NAME READY STATUS RESTARTS AGE
httpd-deployment-59fc85cfcd-hg9p4 1/1 Running 0 6d2h
nfs-test-pd1 1/1 Running 0 22h
nfs-test-pd2 1/1 Running 0 22h
nginx-deployment2-dcbf9ff88-qxtxn 0/1 ContainerCreating 0 6s
nginx-deployment2-dcbf9ff88-v2pzj 1/1 Running 0 6d2h
[root@docker-54 resource]#
[root@docker-54 resource]# kubectl get po
NAME READY STATUS RESTARTS AGE
httpd-deployment-59fc85cfcd-hg9p4 1/1 Running 0 6d2h
nfs-test-pd1 1/1 Running 0 22h
nfs-test-pd2 1/1 Running 0 22h
nginx-deployment2-dcbf9ff88-qxtxn 0/1 ContainerCreating 0 11s
nginx-deployment2-dcbf9ff88-v2pzj 1/1 Running 0 6d2h
[root@docker-54 resource]#
[root@docker-54 resource]# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
httpd-deployment 1/1 1 1 93d
nginx-deployment2 2/2 2 2 91d
[root@docker-54 resource]#
2.7 删除资源
$ kubectl delete -f ./pod.json # 删除 pod.json 文件中定义的类型和名称的 pod
$ kubectl delete pod,service baz foo # 删除名为“baz”的 pod 和名为“foo”的 service
$ kubectl delete pods,services -l name=myLabel # 删除具有 name=myLabel 标签的 pod 和 serivce
$ kubectl delete pods,services -l name=myLabel --include-uninitialized # 删除具有 name=myLabel 标签的 pod 和 service,包括尚未初始化的
$ kubectl -n my-ns delete po,svc --all # 删除 my-ns namespace 下的所有 pod 和 serivce,包括尚未初始化的
在使用过程中,如果忘记了某个命令的用法,可以使用 --help
或 -h
选项:大多数 Kubernetes 命令都支持 --help 或 -h 选项。通过在命令后面添加 --help 或 -h,您可以获取该命令的帮助信息和使用方式示例。例如:
kubectl apply --help
使用 kubectl explain
命令:kubectl explain
命令非常有用,它可以提供有关 Kubernetes API 对象和字段的详细说明。通过指定对象类型和字段名称,您可以获取关于它们的定义、类型、说明等信息;
kubectl explain pod.spec.containers
3. 格式化输出
- 输出 json 格式
-o json
- 仅打印资源名称
-o name
- 以纯文本格式输出所有信息
-o wide
- 输出 yaml 格式
-o yaml
当我们部署了某个 deployment 之后,但是我忘记了 创建这个资源的 yaml 文件的位置,我想看下创建时是如何定义的,那么就可以使用 格式化输出中的 yaml 格式:
[root@docker-54 resource]# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
httpd-deployment 1/1 1 1 93d
nginx-deployment2 2/2 2 2 91d
[root@docker-54 resource]#
[root@docker-54 resource]# kubectl get deploy nginx-deployment2 -o yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"name":"nginx-deployment2","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"nginx:latest","name":"nginx2","ports":[{"containerPort":80}],"resources":{"limits":{"cpu":"0.5","memory":"512Mi"}}}]}}}}
creationTimestamp: "2023-07-04T05:50:42Z"
generation: 2
name: nginx-deployment2
namespace: default
resourceVersion: "5717734"
uid: b47a00ed-4c34-4da2-a882-a6988f2be4cb
spec:
progressDeadlineSeconds: 600
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx2
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: nginx2
spec:
containers:
- image: nginx:latest
imagePullPolicy: Always
name: nginx2
ports:
- containerPort: 80
protocol: TCP
resources:
limits:
cpu: 500m
memory: 512Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 2
conditions:
- lastTransitionTime: "2023-07-04T05:50:42Z"
lastUpdateTime: "2023-07-04T05:51:15Z"
message: ReplicaSet "nginx-deployment2-dcbf9ff88" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
- lastTransitionTime: "2023-10-03T08:55:09Z"
lastUpdateTime: "2023-10-03T08:55:09Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 2
readyReplicas: 2
replicas: 2
updatedReplicas: 2
[root@docker-54 resource]# grep -r "b47a00ed-4c34-4da2-a882-a6988f2be4cb" /etc/kubernetes/manifests/
[root@docker-54 resource]#
这里包含了默认自定义的内容,比如 metadata.uid、status 这些,在创建的时候无需指定。并且这个输出的yaml文件为当前资源的最新定义的描述。比如这里是 replicas: 2
,接着修改副本数为1之后,再次执行输出:
[root@docker-54 resource]# kubectl scale --replicas=1 deploy nginx-deployment2
deployment.apps/nginx-deployment2 scaled
[root@docker-54 resource]#
[root@docker-54 resource]# kubectl get deploy nginx-deployment2 -o yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"name":"nginx-deployment2","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"nginx2"}},"template":{"metadata":{"labels":{"app":"nginx2"}},"spec":{"containers":[{"image":"nginx:latest","name":"nginx2","ports":[{"containerPort":80}],"resources":{"limits":{"cpu":"0.5","memory":"512Mi"}}}]}}}}
creationTimestamp: "2023-07-04T05:50:42Z"
generation: 3
name: nginx-deployment2
namespace: default
resourceVersion: "5721829"
uid: b47a00ed-4c34-4da2-a882-a6988f2be4cb
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx2
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: nginx2
spec:
containers:
- image: nginx:latest
imagePullPolicy: Always
name: nginx2
ports:
- containerPort: 80
protocol: TCP
resources:
limits:
cpu: 500m
memory: 512Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 1
conditions:
- lastTransitionTime: "2023-07-04T05:50:42Z"
lastUpdateTime: "2023-07-04T05:51:15Z"
message: ReplicaSet "nginx-deployment2-dcbf9ff88" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
- lastTransitionTime: "2023-10-03T08:55:09Z"
lastUpdateTime: "2023-10-03T08:55:09Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 3
readyReplicas: 1
replicas: 1
updatedReplicas: 1
[root@docker-54 resource]#
最新输出结果中 replicas: 1
,可见即使使用命令对原先定义好的资源进行了修改,输出的是最新的资源定义。