Autoscaling GKE node pool stuck at 0 instances even with autoscaling set at min 3 max 5?

12/5/2019

I've created a cluster using terraform with:

provider "google" {
  credentials = "${file("gcp.json")}"
  project = "${var.gcp_project}"
  region  = "us-central1"
  zone    = "us-central1-c"
}

resource "google_container_cluster" "primary" {
  name     = "${var.k8s_cluster_name}"
  location   = "us-central1-a"
  project      = "${var.gcp_project}"

  # We can't create a cluster with no node pool defined, but we want to only use
  # separately managed node pools. So we create the smallest possible default
  # node pool and immediately delete it.
  remove_default_node_pool = true
  initial_node_count = 1

  master_auth {
    username = ""
    password = ""

    client_certificate_config {
      issue_client_certificate = false
    }
  }
}

resource "google_container_node_pool" "primary_preemptible_nodes" {
  project      = "${var.gcp_project}"
  name       = "my-node-pool"
  location   = "us-central1-a"
  cluster    = "${google_container_cluster.primary.name}"
  # node_count = 3

  autoscaling {
    min_node_count = 3
    max_node_count = 5
  }

  node_config {
    # preemptible  = true
    machine_type = "g1-small"

    metadata = {
      disable-legacy-endpoints = "true"
    }

    oauth_scopes = [
      "https://www.googleapis.com/auth/logging.write",
      "https://www.googleapis.com/auth/monitoring",
      "https://www.googleapis.com/auth/devstorage.read_only"
    ]
  }
}

Surprisingly this node pool seems to be 'stuck' at 0 instances? Why? How can I diagnose this?

enter image description here

-- Chris Stryczynski
google-kubernetes-engine
kubernetes
terraform
terraform-provider-gcp

0 Answers