GPUs not added to GKE node pool

3/23/2021

I am trying to create a GKE node pool with GPU in my cluster. When I do a make apply I don't see guest_accelerator block applied. When the node pool is created they come up with no GPUs in it.

I am i missing something in my config below?

Thanks

resource "google_container_cluster" "default" {
  provider       = google-beta
  project        = var.project_id
  name           = var.name
  location       = "us-central1"
  node_locations = ["us-central1-a", "us-central1-c", "us-central1-f"]
  network        = var.network
  subnetwork     = var.subnetwork

  ip_allocation_policy {
    cluster_ipv4_cidr_block  = var.ip_cidr.cluster
    services_ipv4_cidr_block = var.ip_cidr.services
  }

  remove_default_node_pool = true
  initial_node_count       = 1

  master_auth {
    username = ""
    password = ""

    client_certificate_config {
      issue_client_certificate = false
    }
  }

  addons_config {
    horizontal_pod_autoscaling {
      disabled = false
    }

    http_load_balancing {
      disabled = false
    }
  }

  private_cluster_config {
    enable_private_nodes    = true
	.
	.
	.
  }

  logging_service    = "logging.googleapis.com/kubernetes"
  monitoring_service = "monitoring.googleapis.com/kubernetes"

  workload_identity_config {
    identity_namespace = "${var.project_id}.svc.id.goog"
  }
}

resource "google_container_node_pool" "default" {
  provider = google-beta

  project            = google_container_cluster.default.project
  name               = var.pool_name
  location           = google_container_cluster.default.location
  node_locations     = ["us-central1-a", "us-central1-c"]
  cluster            = google_container_cluster.default.name
  initial_node_count = 2

  autoscaling {
    min_node_count = 1
    max_node_count = 4
  }

  node_config {
    machine_type    = "n1-standard-4"
    image_type      = "COS"
    disk_size_gb    = 100
    disk_type       = "pd-standard"
    local_ssd_count = 0
    preemptible     = false
    service_account = var.node_service_account
    guest_accelerator {
      type  = "nvidia-tesla-p4"
      count = 1
    }
    metadata = {
      disable-legacy-endpoints = "true"
    }
	
    shielded_instance_config {
      enable_secure_boot          = true
      enable_integrity_monitoring = true
    }
  }

  management {
    auto_repair  = true
    auto_upgrade = true
  }
}

Below is output when I do a terraform plan

  + resource "google_container_node_pool" "default" {
      + cluster             = "my-cluster"
      + id                  = (known after apply)
      + initial_node_count  = 1
      + instance_group_urls = (known after apply)
      + location            = "us-central1"
      + max_pods_per_node   = (known after apply)
      + name                = "my-gpu-nodes"
      + name_prefix         = (known after apply)
      + node_count          = (known after apply)
      + node_locations      = [
          + "us-central1-a",
          + "us-central1-c",
        ]
      + operation           = (known after apply)
      + project             = "my-project"
      + version             = (known after apply)

      + autoscaling {
          + max_node_count = 4
          + min_node_count = 1
        }

      + management {
          + auto_repair  = true
          + auto_upgrade = true
        }

      + node_config {
          + disk_size_gb      = 100
          + disk_type         = "pd-standard"
          + guest_accelerator = (known after apply)
          + image_type        = "COS"
          + local_ssd_count   = 0
          + machine_type      = "n1-standard-4"
          + metadata          = {
              + "disable-legacy-endpoints" = "true"
            }
          + oauth_scopes      = [
              + "https://www.googleapis.com/auth/cloud-platform",
            ]
          + preemptible       = false
          + tags              = []
          + taint             = (known after apply)

          + shielded_instance_config {
              + enable_integrity_monitoring = true
              + enable_secure_boot          = true
            }

          + workload_metadata_config {
              + node_metadata = "GKE_METADATA_SERVER"
            }
        }

      + upgrade_settings {
          + max_surge       = (known after apply)
          + max_unavailable = (known after apply)
        }
    }

Below is from terraform debug when it is apply the config

---[ REQUEST ]---------------------------------------                                                                                                                                [37647/43374]
POST /v1beta1/projects/verily-surgical-cloud-dev/locations/us-central1/clusters/verily-surgical/nodePools?alt=json&prettyPrint=false HTTP/1.1
Host: container.googleapis.com
User-Agent: google-api-go-client/0.5 Terraform/0.13.3 (+https://www.terraform.io) Terraform-Plugin-SDK/2.4.4 terraform-provider-google-beta/dev
Content-Length: 703
Content-Type: application/json
X-Goog-Api-Client: gl-go/1.14.5 gdcl/20210308
Accept-Encoding: gzip

{
 "nodePool": {
  "autoscaling": {
   "enabled": true,
   "maxNodeCount": 4,
   "minNodeCount": 1
  },
  "config": {
   "diskSizeGb": 100,
   "diskType": "pd-standard",
   "imageType": "COS",
   "machineType": "n1-standard-4",
   "metadata": {
    "disable-legacy-endpoints": "true"
   },
   "shieldedInstanceConfig": {
    "enableIntegrityMonitoring": true,
    "enableSecureBoot": true
   },
   "workloadMetadataConfig": {
    "nodeMetadata": "GKE_METADATA_SERVER"
   }
  },
  "initialNodeCount": 2,
  "locations": [
   "us-central1-a",
   "us-central1-c"
  ],
  "management": {
   "autoRepair": true,
   "autoUpgrade": true
  },
  "name": "my-gpu-nodes"
 }
}
-- RandomQuests
google-kubernetes-engine
kubernetes
terraform

1 Answer

3/29/2021

Sometimes particular GPU is not available for particular locations. Even if you try from console, it shows, "GPUs not available-Choose another location". Try configuring the same from console once before applying through terraform. It will give better context.

-- ragavi_ceg
Source: StackOverflow