For some reasons, Pod [itom-itsma-backup-deployment-989867dc5-9w84b] is stuck in Terminating status. I have a Java client to monitor the Pod status using io.fabric8.kubernetes-client Java code like below:
List<Pod> pods = client.pods().inNamespace(namespace).list().getItems();
But it returns the response like below.
status.phase is "Running" & status.containerStatuses.ready is "false"
I think the phase is not correct. It should be "Terminating". Get the same result if I use "kubectl get pod itom-itsma-backup-deployment-989867dc5-9w84b -n itsma1 -o yaml"
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"creationTimestamp": "2018-08-20T07:58:16Z",
"deletionGracePeriodSeconds": 30,
"deletionTimestamp": "2018-08-27T04:00:22Z",
"finalizers": [],
"generateName": "itom-itsma-backup-deployment-989867dc5-",
"labels": {
"app": "itom-itsma-backup",
"itsmaRelease": "2018.05",
"itsmaService": "itom-itsma-backup",
"name": "itom-itsma-backup-pod",
"pod-template-hash": "545423871"
},
"name": "itom-itsma-backup-deployment-989867dc5-9w84b",
"namespace": "itsma1",
"ownerReferences": [
{
"apiVersion": "extensions/v1beta1",
"kind": "ReplicaSet",
"blockOwnerDeletion": true,
"controller": true,
"name": "itom-itsma-backup-deployment-989867dc5",
"uid": "cba0ffdf-a44e-11e8-8740-005056b02030"
}
],
"resourceVersion": "1056727",
"selfLink": "/api/v1/namespaces/itsma1/pods/itom-itsma-backup-deployment-989867dc5-9w84b",
"uid": "cba3627a-a44e-11e8-8740-005056b02030"
},
"spec": {
"containers": [
{
"args": [],
"command": [],
"env": [
{
"name": "SUITE_TIME_ZONE",
"value": "Asia/Shanghai"
}
],
"envFrom": [],
"image": "shc-harbor-dev.hpeswlab.net/itsma/itom-itsma-backup:0.0.35",
"imagePullPolicy": "IfNotPresent",
"livenessProbe": {
"failureThreshold": 6,
"httpGet": {
"httpHeaders": [],
"path": "/health",
"port": 8081,
"scheme": "HTTP"
},
"initialDelaySeconds": 180,
"periodSeconds": 30,
"successThreshold": 1,
"timeoutSeconds": 10
},
"name": "itom-itsma-backup-pod",
"ports": [
{
"containerPort": 8081,
"name": "backend-port",
"protocol": "TCP"
}
],
"readinessProbe": {
"exec": {
"command": [
"/bin/sh",
"/app/healthz.sh"
]
},
"failureThreshold": 3,
"initialDelaySeconds": 30,
"periodSeconds": 10,
"successThreshold": 2,
"timeoutSeconds": 10
},
"resources": {
"limits": {
"cpu": "2",
"memory": "2Gi"
},
"requests": {
"cpu": "300m",
"memory": "512Mi"
}
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/app/backup-service/global-volume",
"name": "backup-global-volume"
},
{
"mountPath": "/app/backup-service/db-volume",
"name": "backup-db-volume"
},
{
"mountPath": "/app/backup-service/smartanalytics-volume",
"name": "backup-smartanalytics-volume"
},
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "default-token-4w6dp",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"hostAliases": [],
"imagePullSecrets": [
{
"name": "registrypullsecret"
}
],
"initContainers": [],
"nodeName": "shc-sma-cd77.hpeswlab.net",
"nodeSelector": {
"Worker": "label"
},
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {
"supplementalGroups": []
},
"serviceAccount": "default",
"serviceAccountName": "default",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"tolerationSeconds": 300
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"tolerationSeconds": 300
}
],
"volumes": [
{
"name": "backup-global-volume",
"persistentVolumeClaim": {
"claimName": "global-volume"
}
},
{
"name": "backup-db-volume",
"persistentVolumeClaim": {
"claimName": "db-volume"
}
},
{
"name": "backup-smartanalytics-volume",
"persistentVolumeClaim": {
"claimName": "smartanalytics-volume"
}
},
{
"name": "default-token-4w6dp",
"secret": {
"defaultMode": 420,
"items": [],
"secretName": "default-token-4w6dp"
}
}
]
},
"status": {
"conditions": [
{
"lastTransitionTime": "2018-08-20T07:58:16Z",
"status": "True",
"type": "Initialized"
},
{
"lastTransitionTime": "2018-08-27T03:58:47Z",
"message": "containers with unready status: [itom-itsma-backup-pod]",
"reason": "ContainersNotReady",
"status": "False",
"type": "Ready"
},
{
"lastTransitionTime": "2018-08-20T07:58:16Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"containerID": "docker://26108b9c62bd0833b68f3ea94a17736e1b237cc6742b4b80b42808474e990a9b",
"image": "shc-harbor-dev.hpeswlab.net/itsma/itom-itsma-backup:0.0.35",
"imageID": "docker-pullable://shc-harbor-dev.hpeswlab.net/itsma/itom-itsma-backup@sha256:34045745e7968730bf760264cb395ca9976154b686cc1c3128a35c44a40bd2d6",
"lastState": {},
"name": "itom-itsma-backup-pod",
"ready": false,
"restartCount": 0,
"state": {
"running": {
"startedAt": "2018-08-20T07:59:24Z"
}
}
}
],
"hostIP": "15.119.88.77",
"initContainerStatuses": [],
"phase": "Running",
"podIP": "172.16.79.4",
"qosClass": "Burstable",
"startTime": "2018-08-20T07:58:16Z"
}
}
But if I use "kubectl describe pod itom-itsma-backup-deployment-989867dc5-9w84b -n itsma1" I can get the status "Terminating".
Name: itom-itsma-backup-deployment-989867dc5-9w84b
Namespace: itsma1
Node: shc-sma-cd77.hpeswlab.net/15.119.88.77
Start Time: Mon, 20 Aug 2018 15:58:16 +0800
Labels: app=itom-itsma-backup
itsmaRelease=2018.05
itsmaService=itom-itsma-backup
name=itom-itsma-backup-pod
pod-template-hash=545423871
Annotations: <none>
Status: Terminating (expires Mon, 27 Aug 2018 12:00:22 +0800)
Termination Grace Period: 30s
IP: 172.16.79.4
Controlled By: ReplicaSet/itom-itsma-backup-deployment-989867dc5
Containers:
itom-itsma-backup-pod:
Container ID: docker://26108b9c62bd0833b68f3ea94a17736e1b237cc6742b4b80b42808474e990a9b
Image: shc-harbor-dev.hpeswlab.net/itsma/itom-itsma-backup:0.0.35
Image ID: docker-pullable://shc-harbor-dev.hpeswlab.net/itsma/itom-itsma-backup@sha256:34045745e7968730bf760264cb395ca9976154b686cc1c3128a35c44a40bd2d6
Port: 8081/TCP
State: Running
Started: Mon, 20 Aug 2018 15:59:24 +0800
Ready: False
Restart Count: 0
Limits:
cpu: 2
memory: 2Gi
Requests:
cpu: 300m
memory: 512Mi
Liveness: http-get http://:8081/health delay=180s timeout=10s period=30s #success=1 #failure=6
Readiness: exec [/bin/sh /app/healthz.sh] delay=30s timeout=10s period=10s #success=2 #failure=3
Environment:
SUITE_TIME_ZONE: Asia/Shanghai
Mounts:
/app/backup-service/db-volume from backup-db-volume (rw)
/app/backup-service/global-volume from backup-global-volume (rw)
/app/backup-service/smartanalytics-volume from backup-smartanalytics-volume (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-4w6dp (ro)
Conditions:
Type Status
Initialized True
Ready False
PodScheduled True
Volumes:
backup-global-volume:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: global-volume
ReadOnly: false
backup-db-volume:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: db-volume
ReadOnly: false
backup-smartanalytics-volume:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: smartanalytics-volume
ReadOnly: false
default-token-4w6dp:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-4w6dp
Optional: false
QoS Class: Burstable
Node-Selectors: Worker=label
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Killing 1m (x51 over 1h) kubelet, shc-sma-cd77.hpeswlab.net Killing container with id docker://itom-itsma-backup-pod:Need to kill Pod
I am curious about how can I get the "Terminating" status? Is there a corresponding method in io.fabric8.kubernetes-client of "kubectl describe pod [pod name]"