I have made a custom scheduler like below:
root@kmaster:~# cat /etc/kubernetes/manifests/my-scheduler.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-scheduler
tier: control-plane
name: my-scheduler
namespace: kube-system
spec:
containers:
- command:
- kube-scheduler
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
- --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
- --bind-address=127.0.0.1
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=false
- --port=10261
- --secure-port=10269
env:
- name: no_proxy
value: ',10.74.46.2,10.74.46.3,10.74.46.4'
- name: NO_PROXY
value: ',10.74.46.2,10.74.46.3,10.74.46.4'
- name: HTTPS_PROXY
value: http://127.0.0.1:3129
- name: HTTP_PROXY
value: http://127.0.0.1:3129
image: k8s.gcr.io/kube-scheduler:v1.22.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10269
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: my-scheduler
resources:
requests:
cpu: 100m
startupProbe:
failureThreshold: 24
httpGet:
host: 127.0.0.1
path: /healthz
port: 10269
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/kubernetes/scheduler.conf
name: kubeconfig
readOnly: true
hostNetwork: true
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/kubernetes/scheduler.conf
type: FileOrCreate
name: kubeconfig
status: {}
root@kmaster:~#
My customer scheduler pod is running successfully:
root@kmaster:~# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-78fcd69978-6dcft 1/1 Running 10 (2d19h ago) 47d
coredns-78fcd69978-8224d 1/1 Running 10 (2d19h ago) 47d
etcd-kmaster 1/1 Running 4 (21h ago) 15d
kube-apiserver-kmaster 1/1 Running 68 (2d19h ago) 47d
kube-controller-manager-kmaster 1/1 Running 33 (21h ago) 47d
kube-flannel-ds-nvpgz 1/1 Running 11 (2d19h ago) 47d
kube-flannel-ds-xnvvw 1/1 Running 11 (2d19h ago) 47d
kube-flannel-ds-ztgql 1/1 Running 4 (2d19h ago) 47d
kube-proxy-h2t7s 1/1 Running 4 (2d19h ago) 47d
kube-proxy-pq9t4 1/1 Running 10 (2d19h ago) 47d
kube-proxy-vgcw7 1/1 Running 8 (2d19h ago) 47d
kube-scheduler-kmaster 1/1 Running 0 21h
my-scheduler-kmaster 1/1 Running 0 7h17m
Then created a pod using that scheduler:
root@kmaster:~# cat my_pod.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
run: webapp-color
name: webapp-color
spec:
containers:
- image: 10.74.46.13:5000/webapp-color:v1
name: webapp-color
schedulerName: my-scheduler
root@kmaster:~#
But my pod is in pending state:
root@kmaster:~# kubectl get pod
NAME READY STATUS RESTARTS AGE
webapp-color 0/1 Pending 0 4h12m
root@kmaster:~#
Describe the pod:
root@kmaster:~# kubectl describe pod webapp-color
Name: webapp-color
Namespace: default
Priority: 0
Node: <none>
Labels: run=webapp-color
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Containers:
webapp-color:
Image: 10.74.46.13:5000/webapp-color:v1
Port: <none>
Host Port: <none>
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mkh4z (ro)
Volumes:
kube-api-access-mkh4z:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events: <none>
root@kmaster:~#
Kubernetes version:
root@kmaster:~# kubectl version --short
Client Version: v1.22.0
Server Version: v1.22.0
root@kmaster:~#
Please help me to find why pod is in pending state. Not getting any clue.
Pending means custom scheduler is not working. But not getting any reason.
Regards
The job of the scheduler is to decide what node a pod is going to run on. Until the scheduler decides this, the pod will be in pending state. It seems that your scheduler is not working properly.
Try looking into the logs with: kubectl logs kube-scheduler-kmaster -n kube-system
To see if the scheduler is making any decisions.