env: vagrant + virtualbox
I have priority classes:
root@k8s-master:/# kubectl get priorityclass
NAME VALUE GLOBAL-DEFAULT AGE
cluster-health-priority 1000000000 false 33m < -- created by me
default-priority 100 true 33m < -- created by me
system-cluster-critical 2000000000 false 33m < -- system
system-node-critical 2000001000 false 33m < -- system
default-priority - has been set as globalDefault
root@k8s-master:/# kubectl get priorityclass default-priority -o yaml
apiVersion: scheduling.k8s.io/v1
description: Used for all Pods without priorityClassName
globalDefault: true <------------------
kind: PriorityClass
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"scheduling.k8s.io/v1","description":"Used for all Pods without priorityClassName","globalDefault":true,"kind":"PriorityClass","metadata":{"annotations":{},"labels":{"addonmanager.kubernetes.io/mode":"Reconcile"},"name":"default-priority"},"value":100}
creationTimestamp: "2019-07-15T16:48:23Z"
generation: 1
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: default-priority
resourceVersion: "304"
selfLink: /apis/scheduling.k8s.io/v1/priorityclasses/default-priority
uid: 5bea6f73-a720-11e9-8343-0800278dc04d
value: 100
I have some pods, which were created after policy classes creation
This
kube-state-metrics-874ccb958-b5spd 1/1 Running 0 9m18s 10.20.59.67 k8s-master <none> <none>
And this
tmp-shell-one-59fb949cb5-b8khc 1/1 Running 1 47s 10.20.59.73 k8s-master <none> <none>
kube-state-metrics pod is using priorityClass cluster-health-priority
root@k8s-master:/etc/kubernetes/addons# kubectl -n kube-system get pod kube-state-metrics-874ccb958-b5spd -o yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2019-07-15T16:48:24Z"
generateName: kube-state-metrics-874ccb958-
labels:
k8s-app: kube-state-metrics
pod-template-hash: 874ccb958
name: kube-state-metrics-874ccb958-b5spd
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: kube-state-metrics-874ccb958
uid: 5c64bf85-a720-11e9-8343-0800278dc04d
resourceVersion: "548"
selfLink: /api/v1/namespaces/kube-system/pods/kube-state-metrics-874ccb958-b5spd
uid: 5c88143e-a720-11e9-8343-0800278dc04d
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kube-role
operator: In
values:
- master
containers:
- image: gcr.io/google_containers/kube-state-metrics:v1.6.0
imagePullPolicy: Always
name: kube-state-metrics
ports:
- containerPort: 8080
name: http-metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-state-metrics-token-jvz5b
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: k8s-master
nodeSelector:
namespaces/default: "true"
priorityClassName: cluster-health-priority <------------------------
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: kube-state-metrics
serviceAccountName: kube-state-metrics
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: master
- key: CriticalAddonsOnly
operator: Exists
volumes:
- name: kube-state-metrics-token-jvz5b
secret:
defaultMode: 420
secretName: kube-state-metrics-token-jvz5b
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2019-07-15T16:48:24Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2019-07-15T16:48:58Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2019-07-15T16:48:58Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2019-07-15T16:48:24Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://a736dce98492b7d746079728b683a2c62f6adb1068075ccc521c5e57ba1e02d1
image: gcr.io/google_containers/kube-state-metrics:v1.6.0
imageID: docker-pullable://gcr.io/google_containers/kube-state-metrics@sha256:c98991f50115fe6188d7b4213690628f0149cf160ac47daf9f21366d7cc62740
lastState: {}
name: kube-state-metrics
ready: true
restartCount: 0
state:
running:
startedAt: "2019-07-15T16:48:46Z"
hostIP: 10.0.2.15
phase: Running
podIP: 10.20.59.67
qosClass: BestEffort
startTime: "2019-07-15T16:48:24Z"
tmp-shell
pod has nothing about priority classes at all:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2019-07-15T16:56:49Z"
generateName: tmp-shell-one-59fb949cb5-
labels:
pod-template-hash: 59fb949cb5
run: tmp-shell-one
name: tmp-shell-one-59fb949cb5-b8khc
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: tmp-shell-one-59fb949cb5
uid: 89c3caa3-a721-11e9-8343-0800278dc04d
resourceVersion: "1350"
selfLink: /api/v1/namespaces/monitoring/pods/tmp-shell-one-59fb949cb5-b8khc
uid: 89c71bad-a721-11e9-8343-0800278dc04d
spec:
containers:
- args:
- /bin/bash
image: nicolaka/netshoot
imagePullPolicy: Always
name: tmp-shell-one
resources: {}
stdin: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
tty: true
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-g9lnc
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: k8s-master
nodeSelector:
namespaces/default: "true"
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
volumes:
- name: default-token-g9lnc
secret:
defaultMode: 420
secretName: default-token-g9lnc
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2019-07-15T16:56:49Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2019-07-15T16:57:20Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2019-07-15T16:57:20Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2019-07-15T16:56:49Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://545d4d029b440ebb694386abb09e0377164c87d1170ac79704f39d3167748bf5
image: nicolaka/netshoot:latest
imageID: docker-pullable://nicolaka/netshoot@sha256:b3e662a8730ee51c6b877b6043c5b2fa61862e15d535e9f90cf667267407753f
lastState:
terminated:
containerID: docker://dfdfd0d991151e94411029f2d5a1a81d67b5b55d43dcda017aec28320bafc7d3
exitCode: 130
finishedAt: "2019-07-15T16:57:17Z"
reason: Error
startedAt: "2019-07-15T16:57:03Z"
name: tmp-shell-one
ready: true
restartCount: 1
state:
running:
startedAt: "2019-07-15T16:57:19Z"
hostIP: 10.0.2.15
phase: Running
podIP: 10.20.59.73
qosClass: BestEffort
startTime: "2019-07-15T16:56:49Z"
According to the docs:
The globalDefault field indicates that the value of this PriorityClass should be used for Pods without a priorityClassName
and
Pod priority is specified by setting the priorityClassName field of podSpec. The integer value of priority is then resolved and populated to the priority field of podSpec
So, the questions are:
tmp-shell
pod is not using priorityClass default-priority
, even it created after priority class with globalDefault to true
?kube-state-metrics
pod does not have field priority
with parsed value from the priority class cluster-health-priority
in podSpec?(look at .yaml above)The only way I can reproduce it is by disabling the Priority
Admission Controller by adding this argument --disable-admission-plugins=Priority
to the kube-api-server
definition which is under /etc/kubernetes/manifests/kube-apiserver.yaml
of the Host running the API Server.
According to the documentation in v1.14 this is enabled by default. Please make sure that it is enabled in your cluster as well.