KubeDNS x509: failed to load system roots and no roots provided but curl work

1/24/2017

I encounter a trouble with the last version of kubernetes (1.5.1). I have a quiet non usual setup composed with 5 Redhat Enterprise server. 3 are nodes, 2 are masters. Both masters are on an etcd cluster, flannel had been also added in baremetal. I have this looping log in the kube-DNS container :
Failed to list *api.Endpoints: Get https://*.*.*.33:443/api/v1/endpoints?resourceVersion=0: x509: failed to load system roots and no roots provided

I made a big number of tests concerning the certificate. Curl works with the same credentials perfectly. The generation has been made with the official recommandation of kubernetes.

This is my different files of configuration ( with just the censorship of the ip and hostname if needed).

kube-apiserver.yml

{
  "kind": "Pod",
  "apiVersion": "v1",
  "metadata": {
    "name": "kube-apiserver",
    "namespace": "kube-system",
    "labels": {
      "component": "kube-apiserver",
      "tier": "control-plane"
    }
  },
  "spec": {
    "volumes": [
      {
        "name": "certs",
        "hostPath": {
          "path": "/etc/ssl/certs"
        }
      },
      {
        "name": "pki",
        "hostPath": {
          "path": "/etc/kubernetes"
        }
      }
    ],
    "containers": [
      {
        "name": "kube-apiserver",
        "image": "gcr.io/google_containers/kube-apiserver-amd64:v1.5.1",
        "command": [
          "/usr/local/bin/kube-apiserver",
          "--v=0",
          "--insecure-bind-address=127.0.0.1",
          "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
          "--service-cluster-ip-range=100.64.0.0/12",
          "--service-account-key-file=/etc/kubernetes/pki/apiserver-key.pem",
          "--client-ca-file=/etc/kubernetes/pki/ca.pem",
          "--tls-cert-file=/etc/kubernetes/pki/apiserver.pem",
          "--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem",
          "--secure-port=5443",
          "--allow-privileged",
          "--advertise-address=X.X.X.33",
          "--etcd-servers=http://X.X.X.33:2379,http://X.X.X.37:2379",
          "--kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP"
        ],
        "resources": {
          "requests": {
            "cpu": "250m"
          }
        },
        "volumeMounts": [
          {
            "name": "certs",
            "mountPath": "/etc/ssl/certs"
          },
          {
            "name": "pki",
            "readOnly": true,
            "mountPath": "/etc/kubernetes/"
          }
        ],
        "livenessProbe": {
          "httpGet": {
            "path": "/healthz",
            "port": 8080,
            "host": "127.0.0.1"
          },
          "initialDelaySeconds": 15,
          "timeoutSeconds": 15
        }
      }
    ],
    "hostNetwork": true
  }
}

kube-controlleur-manager.yml

{
  "kind": "Pod",
  "apiVersion": "v1",
  "metadata": {
    "name": "kube-controller-manager",
    "namespace": "kube-system",
    "labels": {
      "component": "kube-controller-manager",
      "tier": "control-plane"
    }
  },
  "spec": {
    "volumes": [
      {
        "name": "pki",
        "hostPath": {
          "path": "/etc/kubernetes"
        }
      }
    ],
    "containers": [
      {
        "name": "kube-controller-manager",
        "image": "gcr.io/google_containers/kube-controller-manager-amd64:v1.5.1",
        "command": [
          "/usr/local/bin/kube-controller-manager",
          "--v=0",
          "--address=127.0.0.1",
          "--leader-elect=true",
          "--master=https://X.X.X.33",
          "--cluster-name= kubernetes",
          "--kubeconfig=/etc/kubernetes/kubeadminconfig",
          "--root-ca-file=/etc/kubernetes/pki/ca.pem",
          "--service-account-private-key-file=/etc/kubernetes/pki/apiserver-key.pem",
          "--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem",
          "--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem"
        ],
        "resources": {
          "requests": {
            "cpu": "200m"
          }
        },
        "volumeMounts": [
          {
            "name": "pki",
            "readOnly": true,
            "mountPath": "/etc/kubernetes/"
          }
        ],
        "livenessProbe": {
          "httpGet": {
            "path": "/healthz",
            "port": 10252,
            "host": "127.0.0.1"
          },
          "initialDelaySeconds": 15,
          "timeoutSeconds": 15
        }
      }
    ],
    "hostNetwork": true
  }
}

kube-scheduler.yml

{
  "kind": "Pod",
  "apiVersion": "v1",
  "metadata": {
    "name": "kube-scheduler",
    "namespace": "kube-system",
    "labels": {
      "component": "kube-scheduler",
      "tier": "control-plane"
    }
  },
  "spec": {
"volumes": [
      {
        "name": "pki",
        "hostPath": {
          "path": "/etc/kubernetes"
        }
      }
    ],
    "containers": [
      {
        "name": "kube-scheduler",
        "image": "gcr.io/google_containers/kube-scheduler-amd64:v1.5.1",
        "command": [
          "/usr/local/bin/kube-scheduler",
          "--v=0",
          "--address=127.0.0.1",
          "--leader-elect=true",
      "--kubeconfig=/etc/kubernetes/kubeadminconfig",
          "--master=https://X.X.X.33"
        ],
        "resources": {
          "requests": {
            "cpu": "100m"
          }
        },
       "volumeMounts": [
          {
            "name": "pki",
            "readOnly": true,
            "mountPath": "/etc/kubernetes/"
          }
        ],
        "livenessProbe": {
          "httpGet": {
            "path": "/healthz",
            "port": 10251,
            "host": "127.0.0.1"
          },
          "initialDelaySeconds": 15,
          "timeoutSeconds": 15
        }
      }
    ],
    "hostNetwork": true
  }
}

haproxy.yml

{
  "kind": "Pod",
  "apiVersion": "v1",
  "metadata": {
    "name": "haproxy",
    "namespace": "kube-system",
    "labels": {
      "component": "kube-apiserver",
      "tier": "control-plane"
    }
  },
  "spec": {
    "volumes": [
      {
        "name": "vol",
        "hostPath": {
          "path": "/etc/haproxy/haproxy.cfg"
        }
      }
    ],
    "containers": [
      {
        "name": "haproxy",
        "image": "docker.io/haproxy:1.7",
        "resources": {
          "requests": {
            "cpu": "250m"
          }
        },
        "volumeMounts": [
          {
            "name": "vol",
            "readOnly": true,
            "mountPath": "/usr/local/etc/haproxy/haproxy.cfg"
          }
        ]
      }
    ],
    "hostNetwork": true
  }
}

kubelet.service

[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service 
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet 
EnvironmentFile=/etc/kubernetes/kubelet     ExecStart=/usr/bin/kubelet \
        $KUBELET_ADDRESS \
        $KUBELET_POD_INFRA_CONTAINER \
        $KUBELET_ARGS \
        $KUBE_LOGTOSTDERR \
        $KUBE_ALLOW_PRIV \
        $KUBELET_NETWORK_ARGS \
        $KUBELET_DNS_ARGS
Restart=on-failure

[Install]
WantedBy=multi-user.target

kubelet

KUBELET_ADDRESS="--address=0.0.0.0 --port=10250"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS="--kubeconfig=/etc/kubernetes/kubeadminconfig --require-kubeconfig=true --pod-manifest-path=/etc/kubernetes/manifests"
KUBE_LOGTOSTDERR="--logtostderr=true --v=9"
KUBE_ALLOW_PRIV="--allow-privileged=true"
KUBELET_DNS_ARGS="--cluster-dns=100.64.0.10 --cluster-domain=cluster.local"

kubadminconfig

apiVersion: v1
clusters:
- cluster:
    certificate-authority: /etc/kubernetes/pki/ca.pem
    server: https://X.X.X.33
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: admin
  name: admin@kubernetes
- context:
    cluster: kubernetes
    user: kubelet
  name: kubelet@kubernetes
current-context: admin@kubernetes
kind: Config
users:
- name: admin
  user:
    client-certificate: /etc/kubernetes/pki/admin.pem
    client-key: /etc/kubernetes/pki/admin-key.pem

I already have seen most of the question relative from far to close to this question in the internet so i hope someone will have a hint to debug this.

-- Gabriel Cupillard
kubernetes

0 Answers