From 05bc4f6c95e053a35eb829eaea776852f621ddc0 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Thu, 14 Nov 2024 17:25:09 +0000 Subject: [PATCH 01/36] checkpoint --- scaletest/terraform/infra/gcp_cluster.tf | 1 + scaletest/terraform/infra/main.tf | 2 +- scaletest/terraform/k8s/cert-manager.tf | 78 +++++++++------ scaletest/terraform/k8s/coder.tf | 101 +++++++++++-------- scaletest/terraform/k8s/main.tf | 11 ++- scaletest/terraform/k8s/otel.tf | 121 +++++++++++++++-------- scaletest/terraform/k8s/prometheus.tf | 64 +++++++----- 7 files changed, 242 insertions(+), 136 deletions(-) diff --git a/scaletest/terraform/infra/gcp_cluster.tf b/scaletest/terraform/infra/gcp_cluster.tf index c37132c38071b..5032f2e6b93fa 100644 --- a/scaletest/terraform/infra/gcp_cluster.tf +++ b/scaletest/terraform/infra/gcp_cluster.tf @@ -1,5 +1,6 @@ data "google_compute_default_service_account" "default" { project = var.project_id + depends_on = [ google_project_service.api["compute.googleapis.com"] ] } locals { diff --git a/scaletest/terraform/infra/main.tf b/scaletest/terraform/infra/main.tf index 1724692b19f3a..0c07534b1ebd2 100644 --- a/scaletest/terraform/infra/main.tf +++ b/scaletest/terraform/infra/main.tf @@ -11,7 +11,7 @@ terraform { } } - required_version = "~> 1.5.0" + required_version = "~> 1.9.0" } provider "google" { diff --git a/scaletest/terraform/k8s/cert-manager.tf b/scaletest/terraform/k8s/cert-manager.tf index cfcb324b3ea0b..f0d5f099241a9 100644 --- a/scaletest/terraform/k8s/cert-manager.tf +++ b/scaletest/terraform/k8s/cert-manager.tf @@ -36,32 +36,54 @@ EOF ] } -resource "kubernetes_manifest" "cloudflare-cluster-issuer" { - manifest = { - apiVersion = "cert-manager.io/v1" - kind = "ClusterIssuer" - metadata = { - name = "cloudflare-issuer" - } - spec = { - acme = { - email = var.cloudflare_email - privateKeySecretRef = { - name = local.cloudflare_issuer_private_key_secret_name - } - solvers = [ - { - dns01 = { - cloudflare = { - apiTokenSecretRef = { - name = kubernetes_secret.cloudflare-api-key.metadata.0.name - key = "api-token" - } - } - } - } - ] - } - } - } +# resource "kubernetes_manifest" "cloudflare-cluster-issuer" { +# manifest = { +# apiVersion = "cert-manager.io/v1" +# kind = "ClusterIssuer" +# metadata = { +# name = "cloudflare-issuer" +# } +# spec = { +# acme = { +# email = var.cloudflare_email +# privateKeySecretRef = { +# name = local.cloudflare_issuer_private_key_secret_name +# } +# solvers = [ +# { +# dns01 = { +# cloudflare = { +# apiTokenSecretRef = { +# name = kubernetes_secret.cloudflare-api-key.metadata.0.name +# key = "api-token" +# } +# } +# } +# } +# ] +# } +# } +# } +# } + +resource "kubectl_manifest" "cloudflare-cluster-issuer" { + depends_on = [ helm_release.cert-manager ] + yaml_body = < Date: Thu, 14 Nov 2024 20:17:48 +0000 Subject: [PATCH 02/36] baseline with iter --- scaletest/terraform/infra/gcp_cluster.tf | 140 +++++------------------ scaletest/terraform/infra/gcp_db.tf | 17 +-- scaletest/terraform/infra/gcp_vpc.tf | 24 ++-- scaletest/terraform/infra/main.tf | 2 - scaletest/terraform/infra/outputs.tf | 124 ++++++++++---------- scaletest/terraform/infra/vars.tf | 29 +++-- scaletest/terraform/k8s/prometheus.tf | 4 +- 7 files changed, 130 insertions(+), 210 deletions(-) diff --git a/scaletest/terraform/infra/gcp_cluster.tf b/scaletest/terraform/infra/gcp_cluster.tf index 5032f2e6b93fa..ca3d20fdf925f 100644 --- a/scaletest/terraform/infra/gcp_cluster.tf +++ b/scaletest/terraform/infra/gcp_cluster.tf @@ -4,17 +4,35 @@ data "google_compute_default_service_account" "default" { } locals { - abs_module_path = abspath(path.module) - rel_kubeconfig_path = "../../.coderv2/${var.name}-cluster.kubeconfig" - cluster_kubeconfig_path = abspath("${local.abs_module_path}/${local.rel_kubeconfig_path}") + node_pools = flatten([ for i, deployment in var.deployments : [ + { + name = "${var.name}-${deployment.name}-coder" + zone = deployment.zone + size = deployment.coder_node_pool_size + cluster_i = i + }, + { + name = "${var.name}-${deployment.name}-workspaces" + zone = deployment.zone + size = deployment.workspaces_node_pool_size + cluster_i = i + }, + { + name = "${var.name}-${deployment.name}-misc" + zone = deployment.zone + size = deployment.misc_node_pool_size + cluster_i = i + } + ] ]) } -resource "google_container_cluster" "primary" { - name = var.name - location = var.zone +resource "google_container_cluster" "cluster" { + count = length(var.deployments) + name = "${var.name}-${var.deployments[count.index].name}" + location = var.deployments[count.index].zone project = var.project_id network = google_compute_network.vpc.name - subnetwork = google_compute_subnetwork.subnet.name + subnetwork = google_compute_subnetwork.subnet[count.index].name networking_mode = "VPC_NATIVE" default_max_pods_per_node = 256 ip_allocation_policy { # Required with networking_mode=VPC_NATIVE @@ -53,14 +71,15 @@ resource "google_container_cluster" "primary" { } } -resource "google_container_node_pool" "coder" { - name = "${var.name}-coder" - location = var.zone +resource "google_container_node_pool" "node_pool" { + count = length(local.node_pools) + name = local.node_pools[count.index].name + location = local.node_pools[count.index].zone project = var.project_id - cluster = google_container_cluster.primary.name + cluster = google_container_cluster.cluster[local.node_pools[count.index].cluster_i].name autoscaling { min_node_count = 1 - max_node_count = var.nodepool_size_coder + max_node_count = local.node_pools[count.index].size } node_config { oauth_scopes = [ @@ -88,100 +107,3 @@ resource "google_container_node_pool" "coder" { ignore_changes = [management[0].auto_repair, management[0].auto_upgrade, timeouts] } } - -resource "google_container_node_pool" "workspaces" { - name = "${var.name}-workspaces" - location = var.zone - project = var.project_id - cluster = google_container_cluster.primary.name - autoscaling { - min_node_count = 0 - total_max_node_count = var.nodepool_size_workspaces - } - management { - auto_upgrade = false - } - node_config { - oauth_scopes = [ - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/trace.append", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - ] - disk_size_gb = var.node_disk_size_gb - machine_type = var.nodepool_machine_type_workspaces - image_type = var.node_image_type - preemptible = var.node_preemptible - service_account = data.google_compute_default_service_account.default.email - tags = ["gke-node", "${var.project_id}-gke"] - labels = { - env = var.project_id - } - metadata = { - disable-legacy-endpoints = "true" - } - } - lifecycle { - ignore_changes = [management[0].auto_repair, management[0].auto_upgrade, timeouts] - } -} - -resource "google_container_node_pool" "misc" { - name = "${var.name}-misc" - location = var.zone - project = var.project_id - cluster = google_container_cluster.primary.name - node_count = var.state == "stopped" ? 0 : var.nodepool_size_misc - management { - auto_upgrade = false - } - node_config { - oauth_scopes = [ - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/trace.append", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - ] - disk_size_gb = var.node_disk_size_gb - machine_type = var.nodepool_machine_type_misc - image_type = var.node_image_type - preemptible = var.node_preemptible - service_account = data.google_compute_default_service_account.default.email - tags = ["gke-node", "${var.project_id}-gke"] - labels = { - env = var.project_id - } - metadata = { - disable-legacy-endpoints = "true" - } - } - lifecycle { - ignore_changes = [management[0].auto_repair, management[0].auto_upgrade, timeouts] - } -} - -resource "null_resource" "cluster_kubeconfig" { - depends_on = [google_container_cluster.primary] - triggers = { - path = local.cluster_kubeconfig_path - name = google_container_cluster.primary.name - project_id = var.project_id - zone = var.zone - } - provisioner "local-exec" { - command = < Date: Mon, 18 Nov 2024 18:12:14 +0000 Subject: [PATCH 03/36] move to new dir --- scaletest/terraform/infra/gcp_cluster.tf | 141 ++++++++++++++++++----- scaletest/terraform/infra/gcp_db.tf | 17 ++- scaletest/terraform/infra/gcp_vpc.tf | 24 ++-- scaletest/terraform/infra/main.tf | 4 +- scaletest/terraform/infra/outputs.tf | 124 ++++++++++---------- scaletest/terraform/infra/vars.tf | 29 ++--- scaletest/terraform/new/gcp_cluster.tf | 109 ++++++++++++++++++ scaletest/terraform/new/gcp_db.tf | 89 ++++++++++++++ scaletest/terraform/new/gcp_project.tf | 27 +++++ scaletest/terraform/new/gcp_vpc.tf | 41 +++++++ scaletest/terraform/new/main.tf | 18 +++ scaletest/terraform/new/vars.tf | 63 ++++++++++ 12 files changed, 556 insertions(+), 130 deletions(-) create mode 100644 scaletest/terraform/new/gcp_cluster.tf create mode 100644 scaletest/terraform/new/gcp_db.tf create mode 100644 scaletest/terraform/new/gcp_project.tf create mode 100644 scaletest/terraform/new/gcp_vpc.tf create mode 100644 scaletest/terraform/new/main.tf create mode 100644 scaletest/terraform/new/vars.tf diff --git a/scaletest/terraform/infra/gcp_cluster.tf b/scaletest/terraform/infra/gcp_cluster.tf index ca3d20fdf925f..c37132c38071b 100644 --- a/scaletest/terraform/infra/gcp_cluster.tf +++ b/scaletest/terraform/infra/gcp_cluster.tf @@ -1,38 +1,19 @@ data "google_compute_default_service_account" "default" { project = var.project_id - depends_on = [ google_project_service.api["compute.googleapis.com"] ] } locals { - node_pools = flatten([ for i, deployment in var.deployments : [ - { - name = "${var.name}-${deployment.name}-coder" - zone = deployment.zone - size = deployment.coder_node_pool_size - cluster_i = i - }, - { - name = "${var.name}-${deployment.name}-workspaces" - zone = deployment.zone - size = deployment.workspaces_node_pool_size - cluster_i = i - }, - { - name = "${var.name}-${deployment.name}-misc" - zone = deployment.zone - size = deployment.misc_node_pool_size - cluster_i = i - } - ] ]) + abs_module_path = abspath(path.module) + rel_kubeconfig_path = "../../.coderv2/${var.name}-cluster.kubeconfig" + cluster_kubeconfig_path = abspath("${local.abs_module_path}/${local.rel_kubeconfig_path}") } -resource "google_container_cluster" "cluster" { - count = length(var.deployments) - name = "${var.name}-${var.deployments[count.index].name}" - location = var.deployments[count.index].zone +resource "google_container_cluster" "primary" { + name = var.name + location = var.zone project = var.project_id network = google_compute_network.vpc.name - subnetwork = google_compute_subnetwork.subnet[count.index].name + subnetwork = google_compute_subnetwork.subnet.name networking_mode = "VPC_NATIVE" default_max_pods_per_node = 256 ip_allocation_policy { # Required with networking_mode=VPC_NATIVE @@ -71,15 +52,14 @@ resource "google_container_cluster" "cluster" { } } -resource "google_container_node_pool" "node_pool" { - count = length(local.node_pools) - name = local.node_pools[count.index].name - location = local.node_pools[count.index].zone +resource "google_container_node_pool" "coder" { + name = "${var.name}-coder" + location = var.zone project = var.project_id - cluster = google_container_cluster.cluster[local.node_pools[count.index].cluster_i].name + cluster = google_container_cluster.primary.name autoscaling { min_node_count = 1 - max_node_count = local.node_pools[count.index].size + max_node_count = var.nodepool_size_coder } node_config { oauth_scopes = [ @@ -107,3 +87,100 @@ resource "google_container_node_pool" "node_pool" { ignore_changes = [management[0].auto_repair, management[0].auto_upgrade, timeouts] } } + +resource "google_container_node_pool" "workspaces" { + name = "${var.name}-workspaces" + location = var.zone + project = var.project_id + cluster = google_container_cluster.primary.name + autoscaling { + min_node_count = 0 + total_max_node_count = var.nodepool_size_workspaces + } + management { + auto_upgrade = false + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/trace.append", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + ] + disk_size_gb = var.node_disk_size_gb + machine_type = var.nodepool_machine_type_workspaces + image_type = var.node_image_type + preemptible = var.node_preemptible + service_account = data.google_compute_default_service_account.default.email + tags = ["gke-node", "${var.project_id}-gke"] + labels = { + env = var.project_id + } + metadata = { + disable-legacy-endpoints = "true" + } + } + lifecycle { + ignore_changes = [management[0].auto_repair, management[0].auto_upgrade, timeouts] + } +} + +resource "google_container_node_pool" "misc" { + name = "${var.name}-misc" + location = var.zone + project = var.project_id + cluster = google_container_cluster.primary.name + node_count = var.state == "stopped" ? 0 : var.nodepool_size_misc + management { + auto_upgrade = false + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/trace.append", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + ] + disk_size_gb = var.node_disk_size_gb + machine_type = var.nodepool_machine_type_misc + image_type = var.node_image_type + preemptible = var.node_preemptible + service_account = data.google_compute_default_service_account.default.email + tags = ["gke-node", "${var.project_id}-gke"] + labels = { + env = var.project_id + } + metadata = { + disable-legacy-endpoints = "true" + } + } + lifecycle { + ignore_changes = [management[0].auto_repair, management[0].auto_upgrade, timeouts] + } +} + +resource "null_resource" "cluster_kubeconfig" { + depends_on = [google_container_cluster.primary] + triggers = { + path = local.cluster_kubeconfig_path + name = google_container_cluster.primary.name + project_id = var.project_id + zone = var.zone + } + provisioner "local-exec" { + command = < Date: Tue, 19 Nov 2024 15:58:36 +0000 Subject: [PATCH 04/36] coderd --- scaletest/terraform/new/k8s_certmanager.tf | 57 +++++ scaletest/terraform/new/k8s_coder.tf | 280 +++++++++++++++++++++ scaletest/terraform/new/k8s_otel.tf | 62 +++++ scaletest/terraform/new/main.tf | 45 ++++ scaletest/terraform/new/vars.tf | 141 ++++++++++- 5 files changed, 577 insertions(+), 8 deletions(-) create mode 100644 scaletest/terraform/new/k8s_certmanager.tf create mode 100644 scaletest/terraform/new/k8s_coder.tf create mode 100644 scaletest/terraform/new/k8s_otel.tf diff --git a/scaletest/terraform/new/k8s_certmanager.tf b/scaletest/terraform/new/k8s_certmanager.tf new file mode 100644 index 0000000000000..c4ae7ac707025 --- /dev/null +++ b/scaletest/terraform/new/k8s_certmanager.tf @@ -0,0 +1,57 @@ +locals { + cert_manager_namespace = "cert-manager" + cert_manager_helm_repo = "https://charts.jetstack.io" + cert_manager_helm_chart = "cert-manager" + cert_manager_release_name = "cert-manager" + cert_manager_chart_version = "1.12.2" + cloudflare_issuer_private_key_secret_name = "cloudflare-issuer-private-key" +} + +resource "kubernetes_secret" "cloudflare-api-key" { + metadata { + name = "cloudflare-api-key-secret" + namespace = local.cert_manager_namespace + } + data = { + api-token = var.cloudflare_api_token + } +} + +resource "kubernetes_namespace" "cert-manager-namespace" { + metadata { + name = local.cert_manager_namespace + } +} + +resource "helm_release" "cert-manager" { + repository = local.cert_manager_helm_repo + chart = local.cert_manager_helm_chart + name = local.cert_manager_release_name + namespace = kubernetes_namespace.cert-manager-namespace.metadata.0.name + values = [< Date: Thu, 21 Nov 2024 18:07:08 +0000 Subject: [PATCH 05/36] non-dynamic providers --- scaletest/terraform/new/cf_dns.tf | 7 ++ scaletest/terraform/new/gcp_cluster.tf | 60 ++++++----- scaletest/terraform/new/gcp_db.tf | 4 +- scaletest/terraform/new/gcp_vpc.tf | 15 +-- scaletest/terraform/new/k8s_certmanager.tf | 8 ++ scaletest/terraform/new/k8s_coder.tf | 76 +++++++++++++- scaletest/terraform/new/k8s_otel.tf | 116 ++++++++++----------- scaletest/terraform/new/main.tf | 24 +++-- scaletest/terraform/new/vars.tf | 32 ++++-- 9 files changed, 226 insertions(+), 116 deletions(-) create mode 100644 scaletest/terraform/new/cf_dns.tf diff --git a/scaletest/terraform/new/cf_dns.tf b/scaletest/terraform/new/cf_dns.tf new file mode 100644 index 0000000000000..e28aaf22c721f --- /dev/null +++ b/scaletest/terraform/new/cf_dns.tf @@ -0,0 +1,7 @@ +resource "cloudflare_record" "coder" { + zone_id = var.cloudflare_zone_id + name = local.coder_subdomain + content = google_compute_address.coder["primary"].address + type = "A" + ttl = 3600 +} diff --git a/scaletest/terraform/new/gcp_cluster.tf b/scaletest/terraform/new/gcp_cluster.tf index ca3d20fdf925f..f23411bdd7dbe 100644 --- a/scaletest/terraform/new/gcp_cluster.tf +++ b/scaletest/terraform/new/gcp_cluster.tf @@ -4,35 +4,39 @@ data "google_compute_default_service_account" "default" { } locals { - node_pools = flatten([ for i, deployment in var.deployments : [ - { - name = "${var.name}-${deployment.name}-coder" - zone = deployment.zone - size = deployment.coder_node_pool_size - cluster_i = i - }, - { - name = "${var.name}-${deployment.name}-workspaces" - zone = deployment.zone - size = deployment.workspaces_node_pool_size - cluster_i = i - }, - { - name = "${var.name}-${deployment.name}-misc" - zone = deployment.zone - size = deployment.misc_node_pool_size - cluster_i = i + clusters = { + primary = { + region = "us-east1" + zone = "us-east1-c" + cidr = "10.200.0.0/24" } - ] ]) + } + node_pools = { + primary_coder = { + name = "coder" + cluster = "primary" + size = 1 + } + primary_workspaces = { + name = "workspaces" + cluster = "primary" + size = 1 + } + primary_misc = { + name = "misc" + cluster = "primary" + size = 1 + } + } } resource "google_container_cluster" "cluster" { - count = length(var.deployments) - name = "${var.name}-${var.deployments[count.index].name}" - location = var.deployments[count.index].zone + for_each = local.clusters + name = "${var.name}-${each.key}" + location = each.value.zone project = var.project_id network = google_compute_network.vpc.name - subnetwork = google_compute_subnetwork.subnet[count.index].name + subnetwork = google_compute_subnetwork.subnet[each.key].name networking_mode = "VPC_NATIVE" default_max_pods_per_node = 256 ip_allocation_policy { # Required with networking_mode=VPC_NATIVE @@ -72,14 +76,14 @@ resource "google_container_cluster" "cluster" { } resource "google_container_node_pool" "node_pool" { - count = length(local.node_pools) - name = local.node_pools[count.index].name - location = local.node_pools[count.index].zone + for_each = local.node_pools + name = each.value.name + location = local.clusters[each.value.cluster].zone project = var.project_id - cluster = google_container_cluster.cluster[local.node_pools[count.index].cluster_i].name + cluster = google_container_cluster.cluster[each.value.cluster].name autoscaling { min_node_count = 1 - max_node_count = local.node_pools[count.index].size + max_node_count = each.value.size } node_config { oauth_scopes = [ diff --git a/scaletest/terraform/new/gcp_db.tf b/scaletest/terraform/new/gcp_db.tf index 71d92c4a77c9f..4a394289b75bf 100644 --- a/scaletest/terraform/new/gcp_db.tf +++ b/scaletest/terraform/new/gcp_db.tf @@ -1,7 +1,7 @@ resource "google_sql_database_instance" "db" { name = "${var.name}-coder" project = var.project_id - region = var.deployments[0].region + region = local.clusters.primary.region database_version = var.cloudsql_version deletion_protection = false @@ -13,7 +13,7 @@ resource "google_sql_database_instance" "db" { availability_type = "ZONAL" location_preference { - zone = var.deployments[0].zone + zone = local.clusters.primary.zone } database_flags { diff --git a/scaletest/terraform/new/gcp_vpc.tf b/scaletest/terraform/new/gcp_vpc.tf index db557c5735b2d..56c2910996138 100644 --- a/scaletest/terraform/new/gcp_vpc.tf +++ b/scaletest/terraform/new/gcp_vpc.tf @@ -1,3 +1,4 @@ + resource "google_compute_network" "vpc" { project = var.project_id name = var.name @@ -8,19 +9,19 @@ resource "google_compute_network" "vpc" { } resource "google_compute_subnetwork" "subnet" { - count = length(var.deployments) - name = "${var.name}-${var.deployments[count.index].name}" + for_each = local.clusters + name = "${var.name}-${each.key}" project = var.project_id - region = var.deployments[count.index].region + region = each.value.region network = google_compute_network.vpc.name - ip_cidr_range = var.deployments[count.index].subnet_cidr + ip_cidr_range = each.value.cidr } resource "google_compute_address" "coder" { - count = length(var.deployments) + for_each = local.clusters project = var.project_id - region = var.deployments[count.index].region - name = "${var.name}-${var.deployments[count.index].name}-coder" + region = each.value.region + name = "${var.name}-${each.key}-coder" address_type = "EXTERNAL" network_tier = "PREMIUM" } diff --git a/scaletest/terraform/new/k8s_certmanager.tf b/scaletest/terraform/new/k8s_certmanager.tf index c4ae7ac707025..25cf69cf363d6 100644 --- a/scaletest/terraform/new/k8s_certmanager.tf +++ b/scaletest/terraform/new/k8s_certmanager.tf @@ -8,6 +8,8 @@ locals { } resource "kubernetes_secret" "cloudflare-api-key" { + provider = kubernetes.primary + metadata { name = "cloudflare-api-key-secret" namespace = local.cert_manager_namespace @@ -18,12 +20,16 @@ resource "kubernetes_secret" "cloudflare-api-key" { } resource "kubernetes_namespace" "cert-manager-namespace" { + provider = kubernetes.primary + metadata { name = local.cert_manager_namespace } } resource "helm_release" "cert-manager" { + provider = helm.primary + repository = local.cert_manager_helm_repo chart = local.cert_manager_helm_chart name = local.cert_manager_release_name @@ -35,6 +41,8 @@ EOF } resource "kubectl_manifest" "cloudflare-cluster-issuer" { + provider = kubectl.primary + depends_on = [ helm_release.cert-manager ] yaml_body = < Date: Mon, 2 Dec 2024 15:39:52 +0000 Subject: [PATCH 06/36] workspace proxies registered --- scaletest/terraform/new/gcp_cluster.tf | 40 ++++++++++++++++++++++++++ scaletest/terraform/new/k8s_coder.tf | 4 +++ 2 files changed, 44 insertions(+) diff --git a/scaletest/terraform/new/gcp_cluster.tf b/scaletest/terraform/new/gcp_cluster.tf index f23411bdd7dbe..ba3c8c115eaf2 100644 --- a/scaletest/terraform/new/gcp_cluster.tf +++ b/scaletest/terraform/new/gcp_cluster.tf @@ -10,6 +10,16 @@ locals { zone = "us-east1-c" cidr = "10.200.0.0/24" } + europe = { + region = "europe-west1" + zone = "europe-west1-b" + cidr = "10.201.0.0/24" + } + asia = { + region = "asia-southeast1" + zone = "asia-southeast1-a" + cidr = "10.202.0.0/24" + } } node_pools = { primary_coder = { @@ -27,6 +37,36 @@ locals { cluster = "primary" size = 1 } + europe_coder = { + name = "coder" + cluster = "europe" + size = 1 + } + europe_workspaces = { + name = "workspaces" + cluster = "europe" + size = 1 + } + europe_misc = { + name = "misc" + cluster = "europe" + size = 1 + } + asia_coder = { + name = "coder" + cluster = "asia" + size = 1 + } + asia_workspaces = { + name = "workspaces" + cluster = "asia" + size = 1 + } + asia_misc = { + name = "misc" + cluster = "asia" + size = 1 + } } } diff --git a/scaletest/terraform/new/k8s_coder.tf b/scaletest/terraform/new/k8s_coder.tf index 0c6bdb9ec1fbf..c606988cd0a1f 100644 --- a/scaletest/terraform/new/k8s_coder.tf +++ b/scaletest/terraform/new/k8s_coder.tf @@ -32,6 +32,8 @@ resource "random_password" "provisionerd_psk" { } resource "kubernetes_secret" "coder-db" { + provider = kubernetes.primary + type = "Opaque" metadata { name = "coder-db-url" @@ -224,6 +226,8 @@ EOF } resource "helm_release" "provisionerd-chart" { + provider = helm.primary + repository = local.coder_helm_repo chart = local.provisionerd_helm_chart name = local.provisionerd_release_name From 8dc0243d6114ab83109c7bf139eaf149fc43a0c7 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Mon, 2 Dec 2024 16:01:17 +0000 Subject: [PATCH 07/36] reset old dir --- scaletest/terraform/k8s/cert-manager.tf | 78 ++++++--------- scaletest/terraform/k8s/coder.tf | 101 ++++++++------------ scaletest/terraform/k8s/main.tf | 11 +-- scaletest/terraform/k8s/otel.tf | 121 +++++++++--------------- scaletest/terraform/k8s/prometheus.tf | 68 +++++-------- 5 files changed, 137 insertions(+), 242 deletions(-) diff --git a/scaletest/terraform/k8s/cert-manager.tf b/scaletest/terraform/k8s/cert-manager.tf index f0d5f099241a9..cfcb324b3ea0b 100644 --- a/scaletest/terraform/k8s/cert-manager.tf +++ b/scaletest/terraform/k8s/cert-manager.tf @@ -36,54 +36,32 @@ EOF ] } -# resource "kubernetes_manifest" "cloudflare-cluster-issuer" { -# manifest = { -# apiVersion = "cert-manager.io/v1" -# kind = "ClusterIssuer" -# metadata = { -# name = "cloudflare-issuer" -# } -# spec = { -# acme = { -# email = var.cloudflare_email -# privateKeySecretRef = { -# name = local.cloudflare_issuer_private_key_secret_name -# } -# solvers = [ -# { -# dns01 = { -# cloudflare = { -# apiTokenSecretRef = { -# name = kubernetes_secret.cloudflare-api-key.metadata.0.name -# key = "api-token" -# } -# } -# } -# } -# ] -# } -# } -# } -# } - -resource "kubectl_manifest" "cloudflare-cluster-issuer" { - depends_on = [ helm_release.cert-manager ] - yaml_body = < Date: Tue, 3 Dec 2024 23:55:34 +0000 Subject: [PATCH 08/36] eu --- scaletest/terraform/new/k8s_certmanager.tf | 117 ++++---- scaletest/terraform/new/k8s_coder.tf | 324 ++++++++++++++++++--- scaletest/terraform/new/main.tf | 24 ++ 3 files changed, 374 insertions(+), 91 deletions(-) diff --git a/scaletest/terraform/new/k8s_certmanager.tf b/scaletest/terraform/new/k8s_certmanager.tf index 25cf69cf363d6..730dd6f99d7b9 100644 --- a/scaletest/terraform/new/k8s_certmanager.tf +++ b/scaletest/terraform/new/k8s_certmanager.tf @@ -1,65 +1,68 @@ -locals { - cert_manager_namespace = "cert-manager" - cert_manager_helm_repo = "https://charts.jetstack.io" - cert_manager_helm_chart = "cert-manager" - cert_manager_release_name = "cert-manager" - cert_manager_chart_version = "1.12.2" - cloudflare_issuer_private_key_secret_name = "cloudflare-issuer-private-key" -} +# locals { +# cert_manager_namespace = "cert-manager" +# cert_manager_helm_repo = "https://charts.jetstack.io" +# cert_manager_helm_chart = "cert-manager" +# cert_manager_release_name = "cert-manager" +# cert_manager_chart_version = "1.16.2" +# cloudflare_issuer_private_key_secret_name = "cloudflare-issuer-private-key" +# } -resource "kubernetes_secret" "cloudflare-api-key" { - provider = kubernetes.primary +# resource "kubernetes_secret" "cloudflare-api-key" { +# provider = kubernetes.primary - metadata { - name = "cloudflare-api-key-secret" - namespace = local.cert_manager_namespace - } - data = { - api-token = var.cloudflare_api_token - } -} +# metadata { +# name = "cloudflare-api-key-secret" +# namespace = local.cert_manager_namespace +# } +# data = { +# api-token = var.cloudflare_api_token +# } +# } -resource "kubernetes_namespace" "cert-manager-namespace" { - provider = kubernetes.primary +# resource "kubernetes_namespace" "cert-manager-namespace" { +# provider = kubernetes.primary - metadata { - name = local.cert_manager_namespace - } -} +# metadata { +# name = local.cert_manager_namespace +# } +# } -resource "helm_release" "cert-manager" { - provider = helm.primary +# resource "helm_release" "cert-manager" { +# provider = helm.primary - repository = local.cert_manager_helm_repo - chart = local.cert_manager_helm_chart - name = local.cert_manager_release_name - namespace = kubernetes_namespace.cert-manager-namespace.metadata.0.name - values = [< Date: Fri, 6 Dec 2024 18:27:44 +0000 Subject: [PATCH 09/36] working eu --- scaletest/terraform/new/cf_dns.tf | 8 +++++++ scaletest/terraform/new/k8s_coder.tf | 36 ++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/scaletest/terraform/new/cf_dns.tf b/scaletest/terraform/new/cf_dns.tf index e28aaf22c721f..61c669801a36d 100644 --- a/scaletest/terraform/new/cf_dns.tf +++ b/scaletest/terraform/new/cf_dns.tf @@ -5,3 +5,11 @@ resource "cloudflare_record" "coder" { type = "A" ttl = 3600 } + +resource "cloudflare_record" "coder_europe" { + zone_id = var.cloudflare_zone_id + name = local.coder_europe_subdomain + content = google_compute_address.coder["europe"].address + type = "A" + ttl = 3600 +} diff --git a/scaletest/terraform/new/k8s_coder.tf b/scaletest/terraform/new/k8s_coder.tf index 31b44967e6f45..b9a48ea6013f5 100644 --- a/scaletest/terraform/new/k8s_coder.tf +++ b/scaletest/terraform/new/k8s_coder.tf @@ -278,7 +278,7 @@ resource "kubernetes_secret" "proxy_token_europe" { namespace = kubernetes_namespace.coder_europe.metadata.0.name } data = { - token = terraform_data.proxy_tokens.output.europe + token = trimspace(data.local_file.europe_proxy_token.content) } lifecycle { ignore_changes = [timeouts, wait_for_service_account_token] @@ -295,6 +295,7 @@ resource "helm_release" "coder_europe" { namespace = kubernetes_namespace.coder_europe.metadata.0.name values = [< ${path.module}/europe_proxy_token +echo -n $${asia_token} > ${path.module}/asia_proxy_token EOF } depends_on = [ data.http.coder_healthy ] } +data "local_file" "europe_proxy_token" { + filename = "${path.module}/europe_proxy_token" + depends_on = [ null_resource.proxy_tokens ] +} + +data "local_file" "asia_proxy_token" { + filename = "${path.module}/asia_proxy_token" + depends_on = [ null_resource.proxy_tokens ] +} + +# data "external" "proxy_tokens" { +# program = ["bash", "${path.module}/workspace_proxies.sh"] +# query = { +# coder_url = local.coder_url +# coder_admin_email = local.coder_admin_email +# coder_admin_password = local.coder_admin_password +# coder_admin_user = local.coder_admin_user +# coder_admin_full_name = local.coder_admin_full_name +# coder_license = var.coder_license + +# status_code = data.http.coder_healthy.status_code +# } + +# depends_on = [ data.http.coder_healthy ] +# } + From 3a75224bb51add091d886188ab0089b547044032 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Fri, 6 Dec 2024 20:59:43 +0000 Subject: [PATCH 10/36] all working --- scaletest/terraform/new/cf_dns.tf | 13 +- scaletest/terraform/new/deployments.tf | 25 + .../new/{gcp_cluster.tf => gcp_clusters.tf} | 21 +- scaletest/terraform/new/gcp_db.tf | 4 +- scaletest/terraform/new/gcp_vpc.tf | 4 +- scaletest/terraform/new/k8s_certmanager.tf | 68 -- scaletest/terraform/new/k8s_coder.tf | 636 ------------------ scaletest/terraform/new/k8s_coder_asia.tf | 212 ++++++ scaletest/terraform/new/k8s_coder_europe.tf | 212 ++++++ scaletest/terraform/new/k8s_coder_primary.tf | 229 +++++++ scaletest/terraform/new/k8s_coder_proxies.tf | 63 ++ scaletest/terraform/new/k8s_otel.tf | 62 -- scaletest/terraform/new/main.tf | 28 +- scaletest/terraform/new/vars.tf | 23 +- 14 files changed, 781 insertions(+), 819 deletions(-) create mode 100644 scaletest/terraform/new/deployments.tf rename scaletest/terraform/new/{gcp_cluster.tf => gcp_clusters.tf} (88%) delete mode 100644 scaletest/terraform/new/k8s_certmanager.tf delete mode 100644 scaletest/terraform/new/k8s_coder.tf create mode 100644 scaletest/terraform/new/k8s_coder_asia.tf create mode 100644 scaletest/terraform/new/k8s_coder_europe.tf create mode 100644 scaletest/terraform/new/k8s_coder_primary.tf create mode 100644 scaletest/terraform/new/k8s_coder_proxies.tf delete mode 100644 scaletest/terraform/new/k8s_otel.tf diff --git a/scaletest/terraform/new/cf_dns.tf b/scaletest/terraform/new/cf_dns.tf index 61c669801a36d..5906741a81c7f 100644 --- a/scaletest/terraform/new/cf_dns.tf +++ b/scaletest/terraform/new/cf_dns.tf @@ -1,15 +1,8 @@ resource "cloudflare_record" "coder" { + for_each = local.deployments zone_id = var.cloudflare_zone_id - name = local.coder_subdomain - content = google_compute_address.coder["primary"].address - type = "A" - ttl = 3600 -} - -resource "cloudflare_record" "coder_europe" { - zone_id = var.cloudflare_zone_id - name = local.coder_europe_subdomain - content = google_compute_address.coder["europe"].address + name = each.value.subdomain + content = google_compute_address.coder[each.key].address type = "A" ttl = 3600 } diff --git a/scaletest/terraform/new/deployments.tf b/scaletest/terraform/new/deployments.tf new file mode 100644 index 0000000000000..0fed67ab7d2c9 --- /dev/null +++ b/scaletest/terraform/new/deployments.tf @@ -0,0 +1,25 @@ +locals { + deployments = { + primary = { + subdomain = "${var.name}-scaletest" + url = "http://${var.name}-scaletest.${var.cloudflare_domain}" + region = "us-east1" + zone = "us-east1-c" + cidr = "10.200.0.0/24" + } + europe = { + subdomain = "${var.name}-europe-scaletest" + url = "http://${var.name}-europe-scaletest.${var.cloudflare_domain}" + region = "europe-west1" + zone = "europe-west1-b" + cidr = "10.201.0.0/24" + } + asia = { + subdomain = "${var.name}-asia-scaletest" + url = "http://${var.name}-asia-scaletest.${var.cloudflare_domain}" + region = "asia-southeast1" + zone = "asia-southeast1-a" + cidr = "10.202.0.0/24" + } + } +} diff --git a/scaletest/terraform/new/gcp_cluster.tf b/scaletest/terraform/new/gcp_clusters.tf similarity index 88% rename from scaletest/terraform/new/gcp_cluster.tf rename to scaletest/terraform/new/gcp_clusters.tf index ba3c8c115eaf2..16865c9f5690b 100644 --- a/scaletest/terraform/new/gcp_cluster.tf +++ b/scaletest/terraform/new/gcp_clusters.tf @@ -4,23 +4,6 @@ data "google_compute_default_service_account" "default" { } locals { - clusters = { - primary = { - region = "us-east1" - zone = "us-east1-c" - cidr = "10.200.0.0/24" - } - europe = { - region = "europe-west1" - zone = "europe-west1-b" - cidr = "10.201.0.0/24" - } - asia = { - region = "asia-southeast1" - zone = "asia-southeast1-a" - cidr = "10.202.0.0/24" - } - } node_pools = { primary_coder = { name = "coder" @@ -71,7 +54,7 @@ locals { } resource "google_container_cluster" "cluster" { - for_each = local.clusters + for_each = local.deployments name = "${var.name}-${each.key}" location = each.value.zone project = var.project_id @@ -118,7 +101,7 @@ resource "google_container_cluster" "cluster" { resource "google_container_node_pool" "node_pool" { for_each = local.node_pools name = each.value.name - location = local.clusters[each.value.cluster].zone + location = local.deployments[each.value.cluster].zone project = var.project_id cluster = google_container_cluster.cluster[each.value.cluster].name autoscaling { diff --git a/scaletest/terraform/new/gcp_db.tf b/scaletest/terraform/new/gcp_db.tf index 4a394289b75bf..aa5dc1c5b923a 100644 --- a/scaletest/terraform/new/gcp_db.tf +++ b/scaletest/terraform/new/gcp_db.tf @@ -1,7 +1,7 @@ resource "google_sql_database_instance" "db" { name = "${var.name}-coder" project = var.project_id - region = local.clusters.primary.region + region = local.deployments.primary.region database_version = var.cloudsql_version deletion_protection = false @@ -13,7 +13,7 @@ resource "google_sql_database_instance" "db" { availability_type = "ZONAL" location_preference { - zone = local.clusters.primary.zone + zone = local.deployments.primary.zone } database_flags { diff --git a/scaletest/terraform/new/gcp_vpc.tf b/scaletest/terraform/new/gcp_vpc.tf index 56c2910996138..c9fd412aa3cb4 100644 --- a/scaletest/terraform/new/gcp_vpc.tf +++ b/scaletest/terraform/new/gcp_vpc.tf @@ -9,7 +9,7 @@ resource "google_compute_network" "vpc" { } resource "google_compute_subnetwork" "subnet" { - for_each = local.clusters + for_each = local.deployments name = "${var.name}-${each.key}" project = var.project_id region = each.value.region @@ -18,7 +18,7 @@ resource "google_compute_subnetwork" "subnet" { } resource "google_compute_address" "coder" { - for_each = local.clusters + for_each = local.deployments project = var.project_id region = each.value.region name = "${var.name}-${each.key}-coder" diff --git a/scaletest/terraform/new/k8s_certmanager.tf b/scaletest/terraform/new/k8s_certmanager.tf deleted file mode 100644 index 730dd6f99d7b9..0000000000000 --- a/scaletest/terraform/new/k8s_certmanager.tf +++ /dev/null @@ -1,68 +0,0 @@ -# locals { -# cert_manager_namespace = "cert-manager" -# cert_manager_helm_repo = "https://charts.jetstack.io" -# cert_manager_helm_chart = "cert-manager" -# cert_manager_release_name = "cert-manager" -# cert_manager_chart_version = "1.16.2" -# cloudflare_issuer_private_key_secret_name = "cloudflare-issuer-private-key" -# } - -# resource "kubernetes_secret" "cloudflare-api-key" { -# provider = kubernetes.primary - -# metadata { -# name = "cloudflare-api-key-secret" -# namespace = local.cert_manager_namespace -# } -# data = { -# api-token = var.cloudflare_api_token -# } -# } - -# resource "kubernetes_namespace" "cert-manager-namespace" { -# provider = kubernetes.primary - -# metadata { -# name = local.cert_manager_namespace -# } -# } - -# resource "helm_release" "cert-manager" { -# provider = helm.primary - -# repository = local.cert_manager_helm_repo -# chart = local.cert_manager_helm_chart -# name = local.cert_manager_release_name -# namespace = kubernetes_namespace.cert-manager-namespace.metadata.0.name -# values = [< ${path.module}/europe_proxy_token -echo -n $${asia_token} > ${path.module}/asia_proxy_token -EOF - } - - depends_on = [ data.http.coder_healthy ] -} - -data "local_file" "europe_proxy_token" { - filename = "${path.module}/europe_proxy_token" - depends_on = [ null_resource.proxy_tokens ] -} - -data "local_file" "asia_proxy_token" { - filename = "${path.module}/asia_proxy_token" - depends_on = [ null_resource.proxy_tokens ] -} - -# data "external" "proxy_tokens" { -# program = ["bash", "${path.module}/workspace_proxies.sh"] -# query = { -# coder_url = local.coder_url -# coder_admin_email = local.coder_admin_email -# coder_admin_password = local.coder_admin_password -# coder_admin_user = local.coder_admin_user -# coder_admin_full_name = local.coder_admin_full_name -# coder_license = var.coder_license - -# status_code = data.http.coder_healthy.status_code -# } - -# depends_on = [ data.http.coder_healthy ] -# } - diff --git a/scaletest/terraform/new/k8s_coder_asia.tf b/scaletest/terraform/new/k8s_coder_asia.tf new file mode 100644 index 0000000000000..6067621f01d75 --- /dev/null +++ b/scaletest/terraform/new/k8s_coder_asia.tf @@ -0,0 +1,212 @@ +resource "kubernetes_namespace" "coder_asia" { + provider = kubernetes.asia + + metadata { + name = local.coder_namespace + } + lifecycle { + ignore_changes = [timeouts, wait_for_default_service_account] + } +} + +resource "kubernetes_secret" "provisionerd_psk_asia" { + provider = kubernetes.asia + + type = "Opaque" + metadata { + name = "coder-provisioner-psk" + namespace = kubernetes_namespace.coder_asia.metadata.0.name + } + data = { + psk = random_password.provisionerd_psk.result + } + lifecycle { + ignore_changes = [timeouts, wait_for_service_account_token] + } +} + +resource "kubernetes_secret" "proxy_token_asia" { + provider = kubernetes.asia + + type = "Opaque" + metadata { + name = "coder-proxy-token" + namespace = kubernetes_namespace.coder_asia.metadata.0.name + } + data = { + token = trimspace(data.local_file.asia_proxy_token.content) + } + lifecycle { + ignore_changes = [timeouts, wait_for_service_account_token] + } +} + +resource "helm_release" "coder_asia" { + provider = helm.asia + + repository = local.coder_helm_repo + chart = local.coder_helm_chart + name = local.coder_release_name + version = var.coder_chart_version + namespace = kubernetes_namespace.coder_asia.metadata.0.name + values = [< ${path.module}/.coderv2/europe_proxy_token +echo -n $${asia_token} > ${path.module}/.coderv2/asia_proxy_token +EOF + } + + depends_on = [ data.http.coder_healthy ] +} + +data "local_file" "europe_proxy_token" { + filename = "${path.module}/.coderv2/europe_proxy_token" + depends_on = [ null_resource.proxy_tokens ] +} + +data "local_file" "asia_proxy_token" { + filename = "${path.module}/.coderv2/asia_proxy_token" + depends_on = [ null_resource.proxy_tokens ] +} diff --git a/scaletest/terraform/new/k8s_otel.tf b/scaletest/terraform/new/k8s_otel.tf deleted file mode 100644 index ed48e5d01ea70..0000000000000 --- a/scaletest/terraform/new/k8s_otel.tf +++ /dev/null @@ -1,62 +0,0 @@ -# # Terraform configuration for OpenTelemetry Operator - -# locals { -# otel_namespace = "opentelemetry-operator-system" -# otel_operator_helm_repo = "https://open-telemetry.github.io/opentelemetry-helm-charts" -# otel_operator_helm_chart = "opentelemetry-operator" -# otel_operator_release_name = "opentelemetry-operator" -# otel_operator_chart_version = "0.34.1" -# } - -# resource "kubernetes_namespace" "otel-namespace" { -# metadata { -# name = local.otel_namespace -# } -# lifecycle { -# ignore_changes = [timeouts, wait_for_default_service_account] -# } -# } - -# resource "helm_release" "otel-operator" { -# repository = local.otel_operator_helm_repo -# chart = local.otel_operator_helm_chart -# name = local.otel_operator_release_name -# namespace = kubernetes_namespace.otel-namespace.metadata.0.name -# values = [< Date: Fri, 6 Dec 2024 21:01:13 +0000 Subject: [PATCH 11/36] fmt --- scaletest/terraform/new/cf_dns.tf | 10 ++--- scaletest/terraform/new/deployments.tf | 24 ++++++------ scaletest/terraform/new/gcp_clusters.tf | 40 ++++++++++---------- scaletest/terraform/new/k8s_coder_asia.tf | 2 +- scaletest/terraform/new/k8s_coder_europe.tf | 2 +- scaletest/terraform/new/k8s_coder_primary.tf | 2 +- scaletest/terraform/new/k8s_coder_proxies.tf | 22 +++++------ scaletest/terraform/new/main.tf | 12 +++--- 8 files changed, 57 insertions(+), 57 deletions(-) diff --git a/scaletest/terraform/new/cf_dns.tf b/scaletest/terraform/new/cf_dns.tf index 5906741a81c7f..eaaff28ce03a0 100644 --- a/scaletest/terraform/new/cf_dns.tf +++ b/scaletest/terraform/new/cf_dns.tf @@ -1,8 +1,8 @@ resource "cloudflare_record" "coder" { for_each = local.deployments - zone_id = var.cloudflare_zone_id - name = each.value.subdomain - content = google_compute_address.coder[each.key].address - type = "A" - ttl = 3600 + zone_id = var.cloudflare_zone_id + name = each.value.subdomain + content = google_compute_address.coder[each.key].address + type = "A" + ttl = 3600 } diff --git a/scaletest/terraform/new/deployments.tf b/scaletest/terraform/new/deployments.tf index 0fed67ab7d2c9..938943a2a8c16 100644 --- a/scaletest/terraform/new/deployments.tf +++ b/scaletest/terraform/new/deployments.tf @@ -2,24 +2,24 @@ locals { deployments = { primary = { subdomain = "${var.name}-scaletest" - url = "http://${var.name}-scaletest.${var.cloudflare_domain}" - region = "us-east1" - zone = "us-east1-c" - cidr = "10.200.0.0/24" + url = "http://${var.name}-scaletest.${var.cloudflare_domain}" + region = "us-east1" + zone = "us-east1-c" + cidr = "10.200.0.0/24" } europe = { subdomain = "${var.name}-europe-scaletest" - url = "http://${var.name}-europe-scaletest.${var.cloudflare_domain}" - region = "europe-west1" - zone = "europe-west1-b" - cidr = "10.201.0.0/24" + url = "http://${var.name}-europe-scaletest.${var.cloudflare_domain}" + region = "europe-west1" + zone = "europe-west1-b" + cidr = "10.201.0.0/24" } asia = { subdomain = "${var.name}-asia-scaletest" - url = "http://${var.name}-asia-scaletest.${var.cloudflare_domain}" - region = "asia-southeast1" - zone = "asia-southeast1-a" - cidr = "10.202.0.0/24" + url = "http://${var.name}-asia-scaletest.${var.cloudflare_domain}" + region = "asia-southeast1" + zone = "asia-southeast1-a" + cidr = "10.202.0.0/24" } } } diff --git a/scaletest/terraform/new/gcp_clusters.tf b/scaletest/terraform/new/gcp_clusters.tf index 16865c9f5690b..7b8993fc0fd11 100644 --- a/scaletest/terraform/new/gcp_clusters.tf +++ b/scaletest/terraform/new/gcp_clusters.tf @@ -1,54 +1,54 @@ data "google_compute_default_service_account" "default" { - project = var.project_id - depends_on = [ google_project_service.api["compute.googleapis.com"] ] + project = var.project_id + depends_on = [google_project_service.api["compute.googleapis.com"]] } locals { node_pools = { primary_coder = { - name = "coder" + name = "coder" cluster = "primary" - size = 1 + size = 1 } primary_workspaces = { - name = "workspaces" + name = "workspaces" cluster = "primary" - size = 1 + size = 1 } primary_misc = { - name = "misc" + name = "misc" cluster = "primary" - size = 1 + size = 1 } europe_coder = { - name = "coder" + name = "coder" cluster = "europe" - size = 1 + size = 1 } europe_workspaces = { - name = "workspaces" + name = "workspaces" cluster = "europe" - size = 1 + size = 1 } europe_misc = { - name = "misc" + name = "misc" cluster = "europe" - size = 1 + size = 1 } asia_coder = { - name = "coder" + name = "coder" cluster = "asia" - size = 1 + size = 1 } asia_workspaces = { - name = "workspaces" + name = "workspaces" cluster = "asia" - size = 1 + size = 1 } asia_misc = { - name = "misc" + name = "misc" cluster = "asia" - size = 1 + size = 1 } } } diff --git a/scaletest/terraform/new/k8s_coder_asia.tf b/scaletest/terraform/new/k8s_coder_asia.tf index 6067621f01d75..bd237416f6e6a 100644 --- a/scaletest/terraform/new/k8s_coder_asia.tf +++ b/scaletest/terraform/new/k8s_coder_asia.tf @@ -141,7 +141,7 @@ EOF resource "helm_release" "provisionerd_asia" { provider = helm.asia - + repository = local.coder_helm_repo chart = local.provisionerd_helm_chart name = local.provisionerd_release_name diff --git a/scaletest/terraform/new/k8s_coder_europe.tf b/scaletest/terraform/new/k8s_coder_europe.tf index 7609f6eb783e5..244ec4e8de6c2 100644 --- a/scaletest/terraform/new/k8s_coder_europe.tf +++ b/scaletest/terraform/new/k8s_coder_europe.tf @@ -141,7 +141,7 @@ EOF resource "helm_release" "provisionerd_europe" { provider = helm.europe - + repository = local.coder_helm_repo chart = local.provisionerd_helm_chart name = local.provisionerd_release_name diff --git a/scaletest/terraform/new/k8s_coder_primary.tf b/scaletest/terraform/new/k8s_coder_primary.tf index c91464c7d8795..a18698b335788 100644 --- a/scaletest/terraform/new/k8s_coder_primary.tf +++ b/scaletest/terraform/new/k8s_coder_primary.tf @@ -158,7 +158,7 @@ EOF resource "helm_release" "provisionerd_chart" { provider = helm.primary - + repository = local.coder_helm_repo chart = local.provisionerd_helm_chart name = local.provisionerd_release_name diff --git a/scaletest/terraform/new/k8s_coder_proxies.tf b/scaletest/terraform/new/k8s_coder_proxies.tf index cc9887957d598..7dfe4ca60da12 100644 --- a/scaletest/terraform/new/k8s_coder_proxies.tf +++ b/scaletest/terraform/new/k8s_coder_proxies.tf @@ -2,24 +2,24 @@ data "http" "coder_healthy" { url = local.deployments.primary.url // Wait up to 5 minutes for DNS to propogate retry { - attempts = 30 + attempts = 30 min_delay_ms = 10000 } lifecycle { postcondition { - condition = self.status_code == 200 - error_message = "${self.url} returned an unhealthy status code" + condition = self.status_code == 200 + error_message = "${self.url} returned an unhealthy status code" } } - depends_on = [ helm_release.coder_primary, cloudflare_record.coder["primary"] ] + depends_on = [helm_release.coder_primary, cloudflare_record.coder["primary"]] } resource "null_resource" "proxy_tokens" { provisioner "local-exec" { - interpreter = [ "/bin/bash", "-c" ] - command = < ${path.module}/.coderv2/asia_proxy_token EOF } - depends_on = [ data.http.coder_healthy ] + depends_on = [data.http.coder_healthy] } data "local_file" "europe_proxy_token" { - filename = "${path.module}/.coderv2/europe_proxy_token" - depends_on = [ null_resource.proxy_tokens ] + filename = "${path.module}/.coderv2/europe_proxy_token" + depends_on = [null_resource.proxy_tokens] } data "local_file" "asia_proxy_token" { - filename = "${path.module}/.coderv2/asia_proxy_token" - depends_on = [ null_resource.proxy_tokens ] + filename = "${path.module}/.coderv2/asia_proxy_token" + depends_on = [null_resource.proxy_tokens] } diff --git a/scaletest/terraform/new/main.tf b/scaletest/terraform/new/main.tf index 8124e1fb0d854..57a294710c5b5 100644 --- a/scaletest/terraform/new/main.tf +++ b/scaletest/terraform/new/main.tf @@ -51,28 +51,28 @@ provider "cloudflare" { } provider "kubernetes" { - alias = "primary" + alias = "primary" host = "https://${google_container_cluster.cluster["primary"].endpoint}" cluster_ca_certificate = base64decode(google_container_cluster.cluster["primary"].master_auth.0.cluster_ca_certificate) token = data.google_client_config.default.access_token } provider "kubernetes" { - alias = "europe" + alias = "europe" host = "https://${google_container_cluster.cluster["europe"].endpoint}" cluster_ca_certificate = base64decode(google_container_cluster.cluster["europe"].master_auth.0.cluster_ca_certificate) token = data.google_client_config.default.access_token } provider "kubernetes" { - alias = "asia" + alias = "asia" host = "https://${google_container_cluster.cluster["asia"].endpoint}" cluster_ca_certificate = base64decode(google_container_cluster.cluster["asia"].master_auth.0.cluster_ca_certificate) token = data.google_client_config.default.access_token } provider "kubectl" { - alias = "primary" + alias = "primary" host = "https://${google_container_cluster.cluster["primary"].endpoint}" cluster_ca_certificate = base64decode(google_container_cluster.cluster["primary"].master_auth.0.cluster_ca_certificate) token = data.google_client_config.default.access_token @@ -80,7 +80,7 @@ provider "kubectl" { } provider "kubectl" { - alias = "europe" + alias = "europe" host = "https://${google_container_cluster.cluster["europe"].endpoint}" cluster_ca_certificate = base64decode(google_container_cluster.cluster["europe"].master_auth.0.cluster_ca_certificate) token = data.google_client_config.default.access_token @@ -88,7 +88,7 @@ provider "kubectl" { } provider "kubectl" { - alias = "asia" + alias = "asia" host = "https://${google_container_cluster.cluster["asia"].endpoint}" cluster_ca_certificate = base64decode(google_container_cluster.cluster["asia"].master_auth.0.cluster_ca_certificate) token = data.google_client_config.default.access_token From 285e9a89e3ced1f6cc0ac9901a50cf2667af455a Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Fri, 6 Dec 2024 21:06:14 +0000 Subject: [PATCH 12/36] rename --- .../terraform/{new => action}/deployments.tf | 0 .../{new => action}/k8s_coder_proxies.tf | 0 scaletest/terraform/new/cf_dns.tf | 8 - scaletest/terraform/new/gcp_clusters.tf | 136 ----------- scaletest/terraform/new/gcp_db.tf | 89 ------- scaletest/terraform/new/gcp_project.tf | 27 --- scaletest/terraform/new/gcp_vpc.tf | 42 ---- scaletest/terraform/new/k8s_coder_asia.tf | 212 ---------------- scaletest/terraform/new/k8s_coder_europe.tf | 212 ---------------- scaletest/terraform/new/k8s_coder_primary.tf | 229 ------------------ scaletest/terraform/new/main.tf | 123 ---------- scaletest/terraform/new/vars.tf | 185 -------------- 12 files changed, 1263 deletions(-) rename scaletest/terraform/{new => action}/deployments.tf (100%) rename scaletest/terraform/{new => action}/k8s_coder_proxies.tf (100%) delete mode 100644 scaletest/terraform/new/cf_dns.tf delete mode 100644 scaletest/terraform/new/gcp_clusters.tf delete mode 100644 scaletest/terraform/new/gcp_db.tf delete mode 100644 scaletest/terraform/new/gcp_project.tf delete mode 100644 scaletest/terraform/new/gcp_vpc.tf delete mode 100644 scaletest/terraform/new/k8s_coder_asia.tf delete mode 100644 scaletest/terraform/new/k8s_coder_europe.tf delete mode 100644 scaletest/terraform/new/k8s_coder_primary.tf delete mode 100644 scaletest/terraform/new/main.tf delete mode 100644 scaletest/terraform/new/vars.tf diff --git a/scaletest/terraform/new/deployments.tf b/scaletest/terraform/action/deployments.tf similarity index 100% rename from scaletest/terraform/new/deployments.tf rename to scaletest/terraform/action/deployments.tf diff --git a/scaletest/terraform/new/k8s_coder_proxies.tf b/scaletest/terraform/action/k8s_coder_proxies.tf similarity index 100% rename from scaletest/terraform/new/k8s_coder_proxies.tf rename to scaletest/terraform/action/k8s_coder_proxies.tf diff --git a/scaletest/terraform/new/cf_dns.tf b/scaletest/terraform/new/cf_dns.tf deleted file mode 100644 index eaaff28ce03a0..0000000000000 --- a/scaletest/terraform/new/cf_dns.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "cloudflare_record" "coder" { - for_each = local.deployments - zone_id = var.cloudflare_zone_id - name = each.value.subdomain - content = google_compute_address.coder[each.key].address - type = "A" - ttl = 3600 -} diff --git a/scaletest/terraform/new/gcp_clusters.tf b/scaletest/terraform/new/gcp_clusters.tf deleted file mode 100644 index 7b8993fc0fd11..0000000000000 --- a/scaletest/terraform/new/gcp_clusters.tf +++ /dev/null @@ -1,136 +0,0 @@ -data "google_compute_default_service_account" "default" { - project = var.project_id - depends_on = [google_project_service.api["compute.googleapis.com"]] -} - -locals { - node_pools = { - primary_coder = { - name = "coder" - cluster = "primary" - size = 1 - } - primary_workspaces = { - name = "workspaces" - cluster = "primary" - size = 1 - } - primary_misc = { - name = "misc" - cluster = "primary" - size = 1 - } - europe_coder = { - name = "coder" - cluster = "europe" - size = 1 - } - europe_workspaces = { - name = "workspaces" - cluster = "europe" - size = 1 - } - europe_misc = { - name = "misc" - cluster = "europe" - size = 1 - } - asia_coder = { - name = "coder" - cluster = "asia" - size = 1 - } - asia_workspaces = { - name = "workspaces" - cluster = "asia" - size = 1 - } - asia_misc = { - name = "misc" - cluster = "asia" - size = 1 - } - } -} - -resource "google_container_cluster" "cluster" { - for_each = local.deployments - name = "${var.name}-${each.key}" - location = each.value.zone - project = var.project_id - network = google_compute_network.vpc.name - subnetwork = google_compute_subnetwork.subnet[each.key].name - networking_mode = "VPC_NATIVE" - default_max_pods_per_node = 256 - ip_allocation_policy { # Required with networking_mode=VPC_NATIVE - - } - release_channel { - # Setting release channel as STABLE can cause unexpected cluster upgrades. - channel = "UNSPECIFIED" - } - initial_node_count = 1 - remove_default_node_pool = true - - network_policy { - enabled = true - } - depends_on = [ - google_project_service.api["container.googleapis.com"] - ] - monitoring_config { - enable_components = ["SYSTEM_COMPONENTS"] - managed_prometheus { - enabled = false - } - } - workload_identity_config { - workload_pool = "${data.google_project.project.project_id}.svc.id.goog" - } - - - lifecycle { - ignore_changes = [ - maintenance_policy, - release_channel, - remove_default_node_pool - ] - } -} - -resource "google_container_node_pool" "node_pool" { - for_each = local.node_pools - name = each.value.name - location = local.deployments[each.value.cluster].zone - project = var.project_id - cluster = google_container_cluster.cluster[each.value.cluster].name - autoscaling { - min_node_count = 1 - max_node_count = each.value.size - } - node_config { - oauth_scopes = [ - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/trace.append", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - ] - disk_size_gb = var.node_disk_size_gb - machine_type = var.nodepool_machine_type_coder - image_type = var.node_image_type - preemptible = var.node_preemptible - service_account = data.google_compute_default_service_account.default.email - tags = ["gke-node", "${var.project_id}-gke"] - labels = { - env = var.project_id - } - metadata = { - disable-legacy-endpoints = "true" - } - } - lifecycle { - ignore_changes = [management[0].auto_repair, management[0].auto_upgrade, timeouts] - } -} diff --git a/scaletest/terraform/new/gcp_db.tf b/scaletest/terraform/new/gcp_db.tf deleted file mode 100644 index aa5dc1c5b923a..0000000000000 --- a/scaletest/terraform/new/gcp_db.tf +++ /dev/null @@ -1,89 +0,0 @@ -resource "google_sql_database_instance" "db" { - name = "${var.name}-coder" - project = var.project_id - region = local.deployments.primary.region - database_version = var.cloudsql_version - deletion_protection = false - - depends_on = [google_service_networking_connection.private_vpc_connection] - - settings { - tier = var.cloudsql_tier - activation_policy = "ALWAYS" - availability_type = "ZONAL" - - location_preference { - zone = local.deployments.primary.zone - } - - database_flags { - name = "max_connections" - value = var.cloudsql_max_connections - } - - ip_configuration { - ipv4_enabled = false - private_network = google_compute_network.vpc.id - } - - insights_config { - query_insights_enabled = true - query_string_length = 1024 - record_application_tags = false - record_client_address = false - } - } - - lifecycle { - ignore_changes = [deletion_protection, timeouts] - } -} - -resource "google_sql_database" "coder" { - project = var.project_id - instance = google_sql_database_instance.db.id - name = "${var.name}-coder" - # required for postgres, otherwise db fails to delete - deletion_policy = "ABANDON" - lifecycle { - ignore_changes = [deletion_policy] - } -} - -resource "random_password" "coder_postgres_password" { - length = 12 -} - -resource "random_password" "prometheus_postgres_password" { - length = 12 -} - -resource "google_sql_user" "coder" { - project = var.project_id - instance = google_sql_database_instance.db.id - name = "${var.name}-coder" - type = "BUILT_IN" - password = random_password.coder_postgres_password.result - # required for postgres, otherwise user fails to delete - deletion_policy = "ABANDON" - lifecycle { - ignore_changes = [deletion_policy, password] - } -} - -resource "google_sql_user" "prometheus" { - project = var.project_id - instance = google_sql_database_instance.db.id - name = "${var.name}-prometheus" - type = "BUILT_IN" - password = random_password.prometheus_postgres_password.result - # required for postgres, otherwise user fails to delete - deletion_policy = "ABANDON" - lifecycle { - ignore_changes = [deletion_policy, password] - } -} - -locals { - coder_db_url = "postgres://${google_sql_user.coder.name}:${urlencode(random_password.coder_postgres_password.result)}@${google_sql_database_instance.db.private_ip_address}/${google_sql_database.coder.name}?sslmode=disable" -} diff --git a/scaletest/terraform/new/gcp_project.tf b/scaletest/terraform/new/gcp_project.tf deleted file mode 100644 index 1073a621c33e0..0000000000000 --- a/scaletest/terraform/new/gcp_project.tf +++ /dev/null @@ -1,27 +0,0 @@ -locals { - project_apis = [ - "cloudtrace", - "compute", - "container", - "logging", - "monitoring", - "servicemanagement", - "servicenetworking", - "sqladmin", - "stackdriver", - "storage-api", - ] -} - -data "google_project" "project" { - project_id = var.project_id -} - -resource "google_project_service" "api" { - for_each = toset(local.project_apis) - project = data.google_project.project.project_id - service = "${each.value}.googleapis.com" - - disable_dependent_services = false - disable_on_destroy = false -} diff --git a/scaletest/terraform/new/gcp_vpc.tf b/scaletest/terraform/new/gcp_vpc.tf deleted file mode 100644 index c9fd412aa3cb4..0000000000000 --- a/scaletest/terraform/new/gcp_vpc.tf +++ /dev/null @@ -1,42 +0,0 @@ - -resource "google_compute_network" "vpc" { - project = var.project_id - name = var.name - auto_create_subnetworks = "false" - depends_on = [ - google_project_service.api["compute.googleapis.com"] - ] -} - -resource "google_compute_subnetwork" "subnet" { - for_each = local.deployments - name = "${var.name}-${each.key}" - project = var.project_id - region = each.value.region - network = google_compute_network.vpc.name - ip_cidr_range = each.value.cidr -} - -resource "google_compute_address" "coder" { - for_each = local.deployments - project = var.project_id - region = each.value.region - name = "${var.name}-${each.key}-coder" - address_type = "EXTERNAL" - network_tier = "PREMIUM" -} - -resource "google_compute_global_address" "sql_peering" { - project = var.project_id - name = "${var.name}-sql-peering" - purpose = "VPC_PEERING" - address_type = "INTERNAL" - prefix_length = 16 - network = google_compute_network.vpc.id -} - -resource "google_service_networking_connection" "private_vpc_connection" { - network = google_compute_network.vpc.id - service = "servicenetworking.googleapis.com" - reserved_peering_ranges = [google_compute_global_address.sql_peering.name] -} diff --git a/scaletest/terraform/new/k8s_coder_asia.tf b/scaletest/terraform/new/k8s_coder_asia.tf deleted file mode 100644 index bd237416f6e6a..0000000000000 --- a/scaletest/terraform/new/k8s_coder_asia.tf +++ /dev/null @@ -1,212 +0,0 @@ -resource "kubernetes_namespace" "coder_asia" { - provider = kubernetes.asia - - metadata { - name = local.coder_namespace - } - lifecycle { - ignore_changes = [timeouts, wait_for_default_service_account] - } -} - -resource "kubernetes_secret" "provisionerd_psk_asia" { - provider = kubernetes.asia - - type = "Opaque" - metadata { - name = "coder-provisioner-psk" - namespace = kubernetes_namespace.coder_asia.metadata.0.name - } - data = { - psk = random_password.provisionerd_psk.result - } - lifecycle { - ignore_changes = [timeouts, wait_for_service_account_token] - } -} - -resource "kubernetes_secret" "proxy_token_asia" { - provider = kubernetes.asia - - type = "Opaque" - metadata { - name = "coder-proxy-token" - namespace = kubernetes_namespace.coder_asia.metadata.0.name - } - data = { - token = trimspace(data.local_file.asia_proxy_token.content) - } - lifecycle { - ignore_changes = [timeouts, wait_for_service_account_token] - } -} - -resource "helm_release" "coder_asia" { - provider = helm.asia - - repository = local.coder_helm_repo - chart = local.coder_helm_chart - name = local.coder_release_name - version = var.coder_chart_version - namespace = kubernetes_namespace.coder_asia.metadata.0.name - values = [< Date: Fri, 6 Dec 2024 21:11:39 +0000 Subject: [PATCH 13/36] typo --- scaletest/terraform/action/k8s_coder_proxies.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scaletest/terraform/action/k8s_coder_proxies.tf b/scaletest/terraform/action/k8s_coder_proxies.tf index 7dfe4ca60da12..1a7f8f5ca71dc 100644 --- a/scaletest/terraform/action/k8s_coder_proxies.tf +++ b/scaletest/terraform/action/k8s_coder_proxies.tf @@ -1,6 +1,6 @@ data "http" "coder_healthy" { url = local.deployments.primary.url - // Wait up to 5 minutes for DNS to propogate + // Wait up to 5 minutes for DNS to propagate retry { attempts = 30 min_delay_ms = 10000 From 50065768d5cf3dc11ccddf9801a1b9ce84961527 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Mon, 9 Dec 2024 17:14:39 +0000 Subject: [PATCH 14/36] scenarios --- scaletest/terraform/action/deployments.tf | 33 ++++++++++ scaletest/terraform/action/k8s_coder_asia.tf | 65 +++++++++++++++++++ .../terraform/action/k8s_coder_proxies.tf | 63 ------------------ 3 files changed, 98 insertions(+), 63 deletions(-) delete mode 100644 scaletest/terraform/action/k8s_coder_proxies.tf diff --git a/scaletest/terraform/action/deployments.tf b/scaletest/terraform/action/deployments.tf index 938943a2a8c16..d8237f858d374 100644 --- a/scaletest/terraform/action/deployments.tf +++ b/scaletest/terraform/action/deployments.tf @@ -22,4 +22,37 @@ locals { cidr = "10.202.0.0/24" } } + + scenarios = { + small = { + coder = { + nodepool_size = 1 + machine_type = "t2d-standard-4" + replicas = 1 + cpu_request = "1000m" + mem_request = "6Gi" + cpu_limit = "2000m" + mem_limit = "12Gi" + } + provisionerd = { + replicas = 1 + cpu_request = "100m" + mem_request = "1Gi" + cpu_limit = "1000m" + mem_limit = "1Gi" + } + workspaces = { + nodepool_size = 1 + machine_type = "t2d-standard-4" + cpu_request = "100m" + mem_request = "128Mi" + cpu_limit = "100m" + mem_limit = "128Mi" + } + cloudsql = { + tier = "db-f1-micro" + replicas = 1 + } + } + } } diff --git a/scaletest/terraform/action/k8s_coder_asia.tf b/scaletest/terraform/action/k8s_coder_asia.tf index 2d22173498e85..95249174ed1da 100644 --- a/scaletest/terraform/action/k8s_coder_asia.tf +++ b/scaletest/terraform/action/k8s_coder_asia.tf @@ -81,6 +81,7 @@ resource "helm_release" "provisionerd_asia" { name = local.provisionerd_release_name version = var.provisionerd_chart_version namespace = kubernetes_namespace.coder_asia.metadata.0.name +<<<<<<< HEAD values = [templatefile("${path.module}/coder_helm_values.tftpl", { workspace_proxy = false, provisionerd = true, @@ -101,4 +102,68 @@ resource "helm_release" "provisionerd_asia" { cpu_limit = local.scenarios[var.scenario].provisionerd.cpu_limit, mem_limit = local.scenarios[var.scenario].provisionerd.mem_limit, })] +======= + values = [<>>>>>> 2751240f8 (scenarios) } diff --git a/scaletest/terraform/action/k8s_coder_proxies.tf b/scaletest/terraform/action/k8s_coder_proxies.tf deleted file mode 100644 index 1a7f8f5ca71dc..0000000000000 --- a/scaletest/terraform/action/k8s_coder_proxies.tf +++ /dev/null @@ -1,63 +0,0 @@ -data "http" "coder_healthy" { - url = local.deployments.primary.url - // Wait up to 5 minutes for DNS to propagate - retry { - attempts = 30 - min_delay_ms = 10000 - } - - lifecycle { - postcondition { - condition = self.status_code == 200 - error_message = "${self.url} returned an unhealthy status code" - } - } - - depends_on = [helm_release.coder_primary, cloudflare_record.coder["primary"]] -} - -resource "null_resource" "proxy_tokens" { - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = < ${path.module}/.coderv2/europe_proxy_token -echo -n $${asia_token} > ${path.module}/.coderv2/asia_proxy_token -EOF - } - - depends_on = [data.http.coder_healthy] -} - -data "local_file" "europe_proxy_token" { - filename = "${path.module}/.coderv2/europe_proxy_token" - depends_on = [null_resource.proxy_tokens] -} - -data "local_file" "asia_proxy_token" { - filename = "${path.module}/.coderv2/asia_proxy_token" - depends_on = [null_resource.proxy_tokens] -} From 896d2fbd9fd30f9c211a591d15fbcd46dee64986 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Mon, 9 Dec 2024 17:51:35 +0000 Subject: [PATCH 15/36] emojis --- scaletest/terraform/action/deployments.tf | 6 +++++- scaletest/terraform/action/k8s_coder_asia.tf | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/scaletest/terraform/action/deployments.tf b/scaletest/terraform/action/deployments.tf index d8237f858d374..8b18e285ee28d 100644 --- a/scaletest/terraform/action/deployments.tf +++ b/scaletest/terraform/action/deployments.tf @@ -49,9 +49,13 @@ locals { cpu_limit = "100m" mem_limit = "128Mi" } + misc = { + nodepool_size = 1 + machine_type = "t2d-standard-4" + } cloudsql = { tier = "db-f1-micro" - replicas = 1 + max_connections = 500 } } } diff --git a/scaletest/terraform/action/k8s_coder_asia.tf b/scaletest/terraform/action/k8s_coder_asia.tf index 95249174ed1da..20b46705f73e8 100644 --- a/scaletest/terraform/action/k8s_coder_asia.tf +++ b/scaletest/terraform/action/k8s_coder_asia.tf @@ -148,7 +148,7 @@ coder: replicaCount: "${local.scenarios[var.scenario].provisionerd.replicas}" resources: requests: - cpu: "${local.scenarios[var.scenario].provisionerd.request}" + cpu: "${local.scenarios[var.scenario].provisionerd.cpu_request}" memory: "${local.scenarios[var.scenario].provisionerd.mem_request}" limits: cpu: "${local.scenarios[var.scenario].provisionerd.cpu_limit}" From 6b170349714c2b8f612ff3a82e50293a13770d43 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Mon, 9 Dec 2024 18:19:52 +0000 Subject: [PATCH 16/36] fmt --- scaletest/terraform/action/deployments.tf | 32 +++++++++++------------ 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/scaletest/terraform/action/deployments.tf b/scaletest/terraform/action/deployments.tf index 8b18e285ee28d..bd46b2629208d 100644 --- a/scaletest/terraform/action/deployments.tf +++ b/scaletest/terraform/action/deployments.tf @@ -27,34 +27,34 @@ locals { small = { coder = { nodepool_size = 1 - machine_type = "t2d-standard-4" - replicas = 1 - cpu_request = "1000m" - mem_request = "6Gi" - cpu_limit = "2000m" - mem_limit = "12Gi" + machine_type = "t2d-standard-4" + replicas = 1 + cpu_request = "1000m" + mem_request = "6Gi" + cpu_limit = "2000m" + mem_limit = "12Gi" } provisionerd = { - replicas = 1 + replicas = 1 cpu_request = "100m" mem_request = "1Gi" - cpu_limit = "1000m" - mem_limit = "1Gi" + cpu_limit = "1000m" + mem_limit = "1Gi" } workspaces = { nodepool_size = 1 - machine_type = "t2d-standard-4" - cpu_request = "100m" - mem_request = "128Mi" - cpu_limit = "100m" - mem_limit = "128Mi" + machine_type = "t2d-standard-4" + cpu_request = "100m" + mem_request = "128Mi" + cpu_limit = "100m" + mem_limit = "128Mi" } misc = { nodepool_size = 1 - machine_type = "t2d-standard-4" + machine_type = "t2d-standard-4" } cloudsql = { - tier = "db-f1-micro" + tier = "db-f1-micro" max_connections = 500 } } From 57322836ef851cf3aa40e3157e41e61a6214cbfe Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Mon, 9 Dec 2024 18:24:21 +0000 Subject: [PATCH 17/36] fmt: --- scaletest/terraform/action/deployments.tf | 37 ----------------------- 1 file changed, 37 deletions(-) diff --git a/scaletest/terraform/action/deployments.tf b/scaletest/terraform/action/deployments.tf index bd46b2629208d..938943a2a8c16 100644 --- a/scaletest/terraform/action/deployments.tf +++ b/scaletest/terraform/action/deployments.tf @@ -22,41 +22,4 @@ locals { cidr = "10.202.0.0/24" } } - - scenarios = { - small = { - coder = { - nodepool_size = 1 - machine_type = "t2d-standard-4" - replicas = 1 - cpu_request = "1000m" - mem_request = "6Gi" - cpu_limit = "2000m" - mem_limit = "12Gi" - } - provisionerd = { - replicas = 1 - cpu_request = "100m" - mem_request = "1Gi" - cpu_limit = "1000m" - mem_limit = "1Gi" - } - workspaces = { - nodepool_size = 1 - machine_type = "t2d-standard-4" - cpu_request = "100m" - mem_request = "128Mi" - cpu_limit = "100m" - mem_limit = "128Mi" - } - misc = { - nodepool_size = 1 - machine_type = "t2d-standard-4" - } - cloudsql = { - tier = "db-f1-micro" - max_connections = 500 - } - } - } } From 24c8b1f23e6587bf67aaf1249d1deed3ea755301 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Tue, 10 Dec 2024 17:47:25 +0000 Subject: [PATCH 18/36] start traffic --- scaletest/terraform/action/coderd.tf | 165 ++++++++++++++++++ scaletest/terraform/action/deployments.tf | 25 --- scaletest/terraform/action/k8s_coder_asia.tf | 2 +- .../terraform/action/k8s_coder_europe.tf | 2 +- scaletest/terraform/action/main.tf | 5 + scaletest/terraform/action/vars.tf | 21 +++ .../terraform/action/workspace_traffic.tf | 95 ++++++++++ 7 files changed, 288 insertions(+), 27 deletions(-) create mode 100644 scaletest/terraform/action/coderd.tf delete mode 100644 scaletest/terraform/action/deployments.tf create mode 100644 scaletest/terraform/action/workspace_traffic.tf diff --git a/scaletest/terraform/action/coderd.tf b/scaletest/terraform/action/coderd.tf new file mode 100644 index 0000000000000..02ae3f272f664 --- /dev/null +++ b/scaletest/terraform/action/coderd.tf @@ -0,0 +1,165 @@ +data "http" "coder_healthy" { + url = local.deployments.primary.url + // Wait up to 5 minutes for DNS to propagate + retry { + attempts = 30 + min_delay_ms = 10000 + } + + lifecycle { + postcondition { + condition = self.status_code == 200 + error_message = "${self.url} returned an unhealthy status code" + } + } + + depends_on = [helm_release.coder_primary, cloudflare_record.coder["primary"]] +} + +resource "null_resource" "api_key" { + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + command = < ${path.module}/.coderv2/api_key +EOF + } + + depends_on = [data.http.coder_healthy] +} + +data "local_file" "api_key" { + filename = "${path.module}/.coderv2/api_key" + depends_on = [null_resource.api_key] +} + +resource "coderd_license" "license" { + license = var.coder_license + lifecycle { + create_before_destroy = true + } +} + +resource "coderd_workspace_proxy" "europe" { + name = "europe" + display_name = "Europe" + icon = "/emojis/1f950.png" + + depends_on = [coderd_license.license] +} + +resource "coderd_workspace_proxy" "asia" { + name = "asia" + display_name = "Asia" + icon = "/emojis/1f35b.png" + + depends_on = [coderd_license.license] +} + +resource "local_file" "kubernetes_template" { + filename = "${path.module}/.coderv2/templates/kubernetes/main.tf" + content = < Date: Tue, 10 Dec 2024 19:23:58 +0000 Subject: [PATCH 19/36] back to curl --- scaletest/terraform/action/coder_proxies.tf | 3 + scaletest/terraform/action/coder_templates.tf | 51 +++++ scaletest/terraform/action/coderd.tf | 165 ---------------- scaletest/terraform/action/k8s_coder_asia.tf | 2 +- .../terraform/action/k8s_coder_europe.tf | 2 +- scaletest/terraform/action/vars.tf | 18 +- .../terraform/action/workspace_traffic.tf | 178 +++++++++--------- 7 files changed, 155 insertions(+), 264 deletions(-) delete mode 100644 scaletest/terraform/action/coderd.tf diff --git a/scaletest/terraform/action/coder_proxies.tf b/scaletest/terraform/action/coder_proxies.tf index 6af3ef82bb392..a6769c045b235 100644 --- a/scaletest/terraform/action/coder_proxies.tf +++ b/scaletest/terraform/action/coder_proxies.tf @@ -20,8 +20,11 @@ resource "null_resource" "api_key" { provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] command = <>>>>>> 46f631c0c (back to curl) curl '${local.deployments.primary.url}/api/v2/users/first' \ --data-raw $'{"email":"${local.coder_admin_email}","password":"${local.coder_admin_password}","username":"${local.coder_admin_user}","name":"${local.coder_admin_full_name}","trial":false}' \ --insecure --silent --output /dev/null diff --git a/scaletest/terraform/action/coder_templates.tf b/scaletest/terraform/action/coder_templates.tf index c2334a488a85a..f77490b8ac880 100644 --- a/scaletest/terraform/action/coder_templates.tf +++ b/scaletest/terraform/action/coder_templates.tf @@ -98,7 +98,11 @@ resource "kubernetes_config_map" "template" { } } +<<<<<<< HEAD resource "kubernetes_job" "push_template" { +======= +resource "kubernetes_pod" "push_template" { +>>>>>>> 46f631c0c (back to curl) provider = kubernetes.primary metadata { @@ -109,6 +113,7 @@ resource "kubernetes_job" "push_template" { } } spec { +<<<<<<< HEAD completions = 1 template { metadata {} @@ -157,4 +162,50 @@ resource "kubernetes_job" "push_template" { } } wait_for_completion = true +======= + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "cloud.google.com/gke-nodepool" + operator = "In" + values = ["${google_container_node_pool.node_pool["primary_misc"].name}"] + } + } + } + } + } + container { + name = "cli" + image = "${var.coder_image_repo}:${var.coder_image_tag}" + command = [ + "/opt/coder", + "--verbose", + "--url=${local.deployments.primary.url}", + "--token=${trimspace(data.local_file.api_key.content)}", + "templates", + "push", + "--directory=/template", + "--yes", + "kubernetes" + ] + volume_mount { + name = "coder-template" + mount_path = "/template" + } + } + volume { + name = "coder-template" + config_map { + name = kubernetes_config_map.template.metadata.0.name + items { + key = "main.tf" + path = "main.tf" + } + } + } + restart_policy = "Never" + } +>>>>>>> 46f631c0c (back to curl) } diff --git a/scaletest/terraform/action/coderd.tf b/scaletest/terraform/action/coderd.tf deleted file mode 100644 index 02ae3f272f664..0000000000000 --- a/scaletest/terraform/action/coderd.tf +++ /dev/null @@ -1,165 +0,0 @@ -data "http" "coder_healthy" { - url = local.deployments.primary.url - // Wait up to 5 minutes for DNS to propagate - retry { - attempts = 30 - min_delay_ms = 10000 - } - - lifecycle { - postcondition { - condition = self.status_code == 200 - error_message = "${self.url} returned an unhealthy status code" - } - } - - depends_on = [helm_release.coder_primary, cloudflare_record.coder["primary"]] -} - -resource "null_resource" "api_key" { - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = < ${path.module}/.coderv2/api_key -EOF - } - - depends_on = [data.http.coder_healthy] -} - -data "local_file" "api_key" { - filename = "${path.module}/.coderv2/api_key" - depends_on = [null_resource.api_key] -} - -resource "coderd_license" "license" { - license = var.coder_license - lifecycle { - create_before_destroy = true - } -} - -resource "coderd_workspace_proxy" "europe" { - name = "europe" - display_name = "Europe" - icon = "/emojis/1f950.png" - - depends_on = [coderd_license.license] -} - -resource "coderd_workspace_proxy" "asia" { - name = "asia" - display_name = "Asia" - icon = "/emojis/1f35b.png" - - depends_on = [coderd_license.license] -} - -resource "local_file" "kubernetes_template" { - filename = "${path.module}/.coderv2/templates/kubernetes/main.tf" - content = < Date: Wed, 11 Dec 2024 19:14:44 +0000 Subject: [PATCH 20/36] push template --- scaletest/terraform/action/coder_proxies.tf | 3 -- scaletest/terraform/action/coder_templates.tf | 51 ------------------- .../terraform/action/workspace_traffic.tf | 14 ++++- 3 files changed, 13 insertions(+), 55 deletions(-) diff --git a/scaletest/terraform/action/coder_proxies.tf b/scaletest/terraform/action/coder_proxies.tf index a6769c045b235..6af3ef82bb392 100644 --- a/scaletest/terraform/action/coder_proxies.tf +++ b/scaletest/terraform/action/coder_proxies.tf @@ -20,11 +20,8 @@ resource "null_resource" "api_key" { provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] command = <>>>>>> 46f631c0c (back to curl) curl '${local.deployments.primary.url}/api/v2/users/first' \ --data-raw $'{"email":"${local.coder_admin_email}","password":"${local.coder_admin_password}","username":"${local.coder_admin_user}","name":"${local.coder_admin_full_name}","trial":false}' \ --insecure --silent --output /dev/null diff --git a/scaletest/terraform/action/coder_templates.tf b/scaletest/terraform/action/coder_templates.tf index f77490b8ac880..c2334a488a85a 100644 --- a/scaletest/terraform/action/coder_templates.tf +++ b/scaletest/terraform/action/coder_templates.tf @@ -98,11 +98,7 @@ resource "kubernetes_config_map" "template" { } } -<<<<<<< HEAD resource "kubernetes_job" "push_template" { -======= -resource "kubernetes_pod" "push_template" { ->>>>>>> 46f631c0c (back to curl) provider = kubernetes.primary metadata { @@ -113,7 +109,6 @@ resource "kubernetes_pod" "push_template" { } } spec { -<<<<<<< HEAD completions = 1 template { metadata {} @@ -162,50 +157,4 @@ resource "kubernetes_pod" "push_template" { } } wait_for_completion = true -======= - affinity { - node_affinity { - required_during_scheduling_ignored_during_execution { - node_selector_term { - match_expressions { - key = "cloud.google.com/gke-nodepool" - operator = "In" - values = ["${google_container_node_pool.node_pool["primary_misc"].name}"] - } - } - } - } - } - container { - name = "cli" - image = "${var.coder_image_repo}:${var.coder_image_tag}" - command = [ - "/opt/coder", - "--verbose", - "--url=${local.deployments.primary.url}", - "--token=${trimspace(data.local_file.api_key.content)}", - "templates", - "push", - "--directory=/template", - "--yes", - "kubernetes" - ] - volume_mount { - name = "coder-template" - mount_path = "/template" - } - } - volume { - name = "coder-template" - config_map { - name = kubernetes_config_map.template.metadata.0.name - items { - key = "main.tf" - path = "main.tf" - } - } - } - restart_policy = "Never" - } ->>>>>>> 46f631c0c (back to curl) } diff --git a/scaletest/terraform/action/workspace_traffic.tf b/scaletest/terraform/action/workspace_traffic.tf index a58637979d166..a0db132725456 100644 --- a/scaletest/terraform/action/workspace_traffic.tf +++ b/scaletest/terraform/action/workspace_traffic.tf @@ -25,7 +25,19 @@ # container { # name = "cli" # image = "${var.coder_image_repo}:${var.coder_image_tag}" -# command = ["/opt/coder --verbose --url=${local.deployments.primary.url} --token=${trimspace(data.local_file.api_key.content)} exp scaletest create-workspaces --count ${var.workspace_count} --template=kubernetes --concurrency ${var.workspace_create_concurrency} --no-cleanup"] +# command = [ +# "/opt/coder", +# "--verbose", +# "--url=${local.deployments.primary.url}", +# "--token=${trimspace(data.local_file.api_key.content)}", +# "exp", +# "scaletest", +# "create-workspaces", +# "--count=${var.workspace_count}", +# "--template=kubernetes", +# "--concurrency=${var.workspace_create_concurrency}", +# "--no-cleanup" +# ] # } # restart_policy = "Never" # } From a2313ac2f60bc9a86ea36b1edc08018f8eddfad4 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Wed, 11 Dec 2024 21:26:33 +0000 Subject: [PATCH 21/36] tpl --- scaletest/terraform/action/k8s_coder_asia.tf | 23 -------------------- 1 file changed, 23 deletions(-) diff --git a/scaletest/terraform/action/k8s_coder_asia.tf b/scaletest/terraform/action/k8s_coder_asia.tf index 20b46705f73e8..1233754fdfbbe 100644 --- a/scaletest/terraform/action/k8s_coder_asia.tf +++ b/scaletest/terraform/action/k8s_coder_asia.tf @@ -81,28 +81,6 @@ resource "helm_release" "provisionerd_asia" { name = local.provisionerd_release_name version = var.provisionerd_chart_version namespace = kubernetes_namespace.coder_asia.metadata.0.name -<<<<<<< HEAD - values = [templatefile("${path.module}/coder_helm_values.tftpl", { - workspace_proxy = false, - provisionerd = true, - primary_url = null, - proxy_token = null, - db_secret = null, - ip_address = null, - provisionerd_psk = kubernetes_secret.provisionerd_psk_asia.metadata.0.name, - access_url = local.deployments.primary.url, - node_pool = google_container_node_pool.node_pool["asia_coder"].name, - release_name = local.coder_release_name, - experiments = var.coder_experiments, - image_repo = var.coder_image_repo, - image_tag = var.coder_image_tag, - replicas = local.scenarios[var.scenario].provisionerd.replicas, - cpu_request = local.scenarios[var.scenario].provisionerd.cpu_request, - mem_request = local.scenarios[var.scenario].provisionerd.mem_request, - cpu_limit = local.scenarios[var.scenario].provisionerd.cpu_limit, - mem_limit = local.scenarios[var.scenario].provisionerd.mem_limit, - })] -======= values = [<>>>>>> 2751240f8 (scenarios) } From 1e6ac51185198dc79e094670093aa27e4ad42076 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Wed, 11 Dec 2024 21:30:33 +0000 Subject: [PATCH 22/36] parity --- scaletest/terraform/action/main.tf | 5 - scaletest/terraform/action/vars.tf | 21 ---- .../terraform/action/workspace_traffic.tf | 109 ------------------ 3 files changed, 135 deletions(-) delete mode 100644 scaletest/terraform/action/workspace_traffic.tf diff --git a/scaletest/terraform/action/main.tf b/scaletest/terraform/action/main.tf index 4c7e5fdcbb217..57a294710c5b5 100644 --- a/scaletest/terraform/action/main.tf +++ b/scaletest/terraform/action/main.tf @@ -38,11 +38,6 @@ terraform { source = "cloudflare/cloudflare" version = "~> 4.0" } - - coderd = { - source = "coder/coderd" - version = "~> 0.0.8" - } } required_version = "~> 1.9.0" diff --git a/scaletest/terraform/action/vars.tf b/scaletest/terraform/action/vars.tf index ddd8162c7f570..264110e239845 100644 --- a/scaletest/terraform/action/vars.tf +++ b/scaletest/terraform/action/vars.tf @@ -85,24 +85,3 @@ variable "provisionerd_image_tag" { description = "Tag to use for Provisionerd image." default = "latest" } - -// Traffic -# variable "traffic_bytes_per_tick" { -# description = "Number of bytes to send per tick." -# default = 1024 -# } - -# variable "traffic_tick_interval" { -# description = "Interval between ticks." -# default = "10s" -# } - -variable "workspace_count" { - description = "Number of workspaces to create." - default = 10 -} - -variable "workspace_create_concurrency" { - description = "Number of workspaces to create concurrently." - default = 10 -} diff --git a/scaletest/terraform/action/workspace_traffic.tf b/scaletest/terraform/action/workspace_traffic.tf deleted file mode 100644 index a0db132725456..0000000000000 --- a/scaletest/terraform/action/workspace_traffic.tf +++ /dev/null @@ -1,109 +0,0 @@ -# resource "kubernetes_pod" "create_workspaces" { -# provider = kubernetes.primary - -# metadata { -# name = "${var.name}-create-workspaces" -# namespace = kubernetes_namespace.coder_primary.metadata.0.name -# labels = { -# "app.kubernetes.io/name" = "${var.name}-create-workspaces" -# } -# } -# spec { -# affinity { -# node_affinity { -# required_during_scheduling_ignored_during_execution { -# node_selector_term { -# match_expressions { -# key = "cloud.google.com/gke-nodepool" -# operator = "In" -# values = ["${google_container_node_pool.node_pool["primary_misc"].name}"] -# } -# } -# } -# } -# } -# container { -# name = "cli" -# image = "${var.coder_image_repo}:${var.coder_image_tag}" -# command = [ -# "/opt/coder", -# "--verbose", -# "--url=${local.deployments.primary.url}", -# "--token=${trimspace(data.local_file.api_key.content)}", -# "exp", -# "scaletest", -# "create-workspaces", -# "--count=${var.workspace_count}", -# "--template=kubernetes", -# "--concurrency=${var.workspace_create_concurrency}", -# "--no-cleanup" -# ] -# } -# restart_policy = "Never" -# } - -# depends_on = [ coderd_template.kubernetes ] -# } - -# resource "time_sleep" "wait_for_baseline" { -# depends_on = [kubernetes_pod.create_workspaces] - -# create_duration = "600s" -# } - -# resource "kubernetes_pod" "workspace_traffic_primary" { -# provider = kubernetes.primary - -# metadata { -# name = "${var.name}-traffic" -# namespace = kubernetes_namespace.coder.metadata.0.name -# labels = { -# "app.kubernetes.io/name" = "${var.name}-traffic" -# } -# } -# spec { -# affinity { -# node_affinity { -# required_during_scheduling_ignored_during_execution { -# node_selector_term { -# match_expressions { -# key = "cloud.google.com/gke-nodepool" -# operator = "In" -# values = ["${google_container_node_pool.node_pool["primary_misc"].name}"] -# } -# } -# } -# } -# } -# container { -# name = "cli" -# image = "${var.coder_image_repo}:${var.coder_image_tag}" -# command = ["/opt/coder --verbose --url=${local.deployments.primary.url} --token=${trimspace(local_file.api_key.content)} exp scaletest workspace-traffic --concurrency=0 --bytes-per-tick=${var.traffic_bytes_per_tick} --tick-interval=${var.traffic_tick_interval} --scaletest-prometheus-wait=60s"] - -# env { -# name = "CODER_URL" -# value = local.deployments.primary.url -# } -# env { -# name = "CODER_TOKEN" -# value = trimspace(local_file.api_key.content) -# } -# env { -# name = "CODER_SCALETEST_PROMETHEUS_ADDRESS" -# value = "0.0.0.0:21112" -# } -# env { -# name = "CODER_SCALETEST_JOB_TIMEOUT" -# value = "30m" -# } -# port { -# container_port = 21112 -# name = "prometheus-http" -# protocol = "TCP" -# } -# } -# restart_policy = "Never" -# } - -# depends_on = [time_sleep.wait_for_baseline] -# } From 1d9177d62f174f23584f54943790d242e8e69850 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Wed, 11 Dec 2024 23:24:16 +0000 Subject: [PATCH 23/36] 3 templates --- .../terraform/action/coder_helm_values.tftpl | 4 +- scaletest/terraform/action/coder_templates.tf | 162 +++++++++++++++++- scaletest/terraform/action/k8s_coder_asia.tf | 1 + .../terraform/action/k8s_coder_europe.tf | 2 + .../terraform/action/k8s_coder_primary.tf | 2 + 5 files changed, 166 insertions(+), 5 deletions(-) diff --git a/scaletest/terraform/action/coder_helm_values.tftpl b/scaletest/terraform/action/coder_helm_values.tftpl index 7de0c598a1780..df8eada62eaa3 100644 --- a/scaletest/terraform/action/coder_helm_values.tftpl +++ b/scaletest/terraform/action/coder_helm_values.tftpl @@ -34,7 +34,9 @@ coder: - name: "CODER_URL" value: "${access_url}" - name: "CODER_PROVISIONERD_TAGS" - value: "scope=organization" + value: "scope=organization,deployment=${deployment}" + - name: "CODER_PROVISIONER_DAEMON_NAME" + value: "${deployment}" - name: "CODER_CONFIG_DIR" value: "/tmp/config" %{~ endif ~} diff --git a/scaletest/terraform/action/coder_templates.tf b/scaletest/terraform/action/coder_templates.tf index c2334a488a85a..8b9c8a83bc2bb 100644 --- a/scaletest/terraform/action/coder_templates.tf +++ b/scaletest/terraform/action/coder_templates.tf @@ -85,7 +85,7 @@ resource "local_file" "kubernetes_template" { EOF } -resource "kubernetes_config_map" "template" { +resource "kubernetes_config_map" "template_primary" { provider = kubernetes.primary metadata { @@ -98,7 +98,7 @@ resource "kubernetes_config_map" "template" { } } -resource "kubernetes_job" "push_template" { +resource "kubernetes_job" "push_template_primary" { provider = kubernetes.primary metadata { @@ -137,8 +137,10 @@ resource "kubernetes_job" "push_template" { "templates", "push", "--directory=/home/coder/template", + "--provisioner-tag=scope=organization", + "--provisioner-tag=deployment=primary", "--yes", - "kubernetes" + "kubernetes-primary" ] volume_mount { name = "coder-template" @@ -149,7 +151,159 @@ resource "kubernetes_job" "push_template" { volume { name = "coder-template" config_map { - name = kubernetes_config_map.template.metadata.0.name + name = kubernetes_config_map.template_primary.metadata.0.name + } + } + restart_policy = "Never" + } + } + } + wait_for_completion = true +} + +resource "kubernetes_config_map" "template_europe" { + provider = kubernetes.europe + + metadata { + name = "coder-template" + namespace = kubernetes_namespace.coder_europe.metadata.0.name + } + + data = { + "main.tf" = local_file.kubernetes_template.content + } +} + +resource "kubernetes_job" "push_template_europe" { + provider = kubernetes.europe + + metadata { + name = "${var.name}-push-template" + namespace = kubernetes_namespace.coder_europe.metadata.0.name + labels = { + "app.kubernetes.io/name" = "${var.name}-push-template" + } + } + spec { + completions = 1 + template { + metadata {} + spec { + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "cloud.google.com/gke-nodepool" + operator = "In" + values = ["${google_container_node_pool.node_pool["europe_misc"].name}"] + } + } + } + } + } + container { + name = "cli" + image = "${var.coder_image_repo}:${var.coder_image_tag}" + command = [ + "/opt/coder", + "--verbose", + "--url=${local.deployments.primary.url}", + "--token=${trimspace(data.local_file.api_key.content)}", + "templates", + "push", + "--directory=/home/coder/template", + "--provisioner-tag=scope=organization", + "--provisioner-tag=deployment=europe", + "--yes", + "kubernetes-europe" + ] + volume_mount { + name = "coder-template" + mount_path = "/home/coder/template/main.tf" + sub_path = "main.tf" + } + } + volume { + name = "coder-template" + config_map { + name = kubernetes_config_map.template_europe.metadata.0.name + } + } + restart_policy = "Never" + } + } + } + wait_for_completion = true +} + +resource "kubernetes_config_map" "template_asia" { + provider = kubernetes.asia + + metadata { + name = "coder-template" + namespace = kubernetes_namespace.coder_asia.metadata.0.name + } + + data = { + "main.tf" = local_file.kubernetes_template.content + } +} + +resource "kubernetes_job" "push_template_asia" { + provider = kubernetes.asia + + metadata { + name = "${var.name}-push-template" + namespace = kubernetes_namespace.coder_asia.metadata.0.name + labels = { + "app.kubernetes.io/name" = "${var.name}-push-template" + } + } + spec { + completions = 1 + template { + metadata {} + spec { + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "cloud.google.com/gke-nodepool" + operator = "In" + values = ["${google_container_node_pool.node_pool["asia_misc"].name}"] + } + } + } + } + } + container { + name = "cli" + image = "${var.coder_image_repo}:${var.coder_image_tag}" + command = [ + "/opt/coder", + "--verbose", + "--url=${local.deployments.primary.url}", + "--token=${trimspace(data.local_file.api_key.content)}", + "templates", + "push", + "--directory=/home/coder/template", + "--provisioner-tag=scope=organization", + "--provisioner-tag=deployment=asia", + "--yes", + "kubernetes-asia" + ] + volume_mount { + name = "coder-template" + mount_path = "/home/coder/template/main.tf" + sub_path = "main.tf" + } + } + volume { + name = "coder-template" + config_map { + name = kubernetes_config_map.template_asia.metadata.0.name } } restart_policy = "Never" diff --git a/scaletest/terraform/action/k8s_coder_asia.tf b/scaletest/terraform/action/k8s_coder_asia.tf index 1233754fdfbbe..ccb7084d4a1f3 100644 --- a/scaletest/terraform/action/k8s_coder_asia.tf +++ b/scaletest/terraform/action/k8s_coder_asia.tf @@ -70,6 +70,7 @@ resource "helm_release" "coder_asia" { mem_request = local.scenarios[var.scenario].coder.mem_request, cpu_limit = local.scenarios[var.scenario].coder.cpu_limit, mem_limit = local.scenarios[var.scenario].coder.mem_limit, + deployment = "asia", })] } diff --git a/scaletest/terraform/action/k8s_coder_europe.tf b/scaletest/terraform/action/k8s_coder_europe.tf index bb6140aef7ea6..fd65035f70d7c 100644 --- a/scaletest/terraform/action/k8s_coder_europe.tf +++ b/scaletest/terraform/action/k8s_coder_europe.tf @@ -70,6 +70,7 @@ resource "helm_release" "coder_europe" { mem_request = local.scenarios[var.scenario].coder.mem_request, cpu_limit = local.scenarios[var.scenario].coder.cpu_limit, mem_limit = local.scenarios[var.scenario].coder.mem_limit, + deployment = "europe", })] } @@ -100,5 +101,6 @@ resource "helm_release" "provisionerd_europe" { mem_request = local.scenarios[var.scenario].provisionerd.mem_request, cpu_limit = local.scenarios[var.scenario].provisionerd.cpu_limit, mem_limit = local.scenarios[var.scenario].provisionerd.mem_limit, + deployment = "europe", })] } diff --git a/scaletest/terraform/action/k8s_coder_primary.tf b/scaletest/terraform/action/k8s_coder_primary.tf index 68dc7fc80a561..5e2d9b2ecedc7 100644 --- a/scaletest/terraform/action/k8s_coder_primary.tf +++ b/scaletest/terraform/action/k8s_coder_primary.tf @@ -90,6 +90,7 @@ resource "helm_release" "coder_primary" { mem_request = local.scenarios[var.scenario].coder.mem_request, cpu_limit = local.scenarios[var.scenario].coder.cpu_limit, mem_limit = local.scenarios[var.scenario].coder.mem_limit, + deployment = "primary", })] } @@ -120,5 +121,6 @@ resource "helm_release" "provisionerd_chart" { mem_request = local.scenarios[var.scenario].provisionerd.mem_request, cpu_limit = local.scenarios[var.scenario].provisionerd.cpu_limit, mem_limit = local.scenarios[var.scenario].provisionerd.mem_limit, + deployment = "primary", })] } From 60c86215541cb97513046c79ac27137c6edeab45 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Wed, 11 Dec 2024 23:51:07 +0000 Subject: [PATCH 24/36] 3 create worksapces --- .../terraform/action/coder_workspaces.tf | 177 ++++++++++++++++++ scaletest/terraform/action/k8s_coder_asia.tf | 83 +++----- scaletest/terraform/action/scenarios.tf | 39 ++-- scaletest/terraform/action/vars.tf | 5 + 4 files changed, 224 insertions(+), 80 deletions(-) create mode 100644 scaletest/terraform/action/coder_workspaces.tf diff --git a/scaletest/terraform/action/coder_workspaces.tf b/scaletest/terraform/action/coder_workspaces.tf new file mode 100644 index 0000000000000..e1132da78da97 --- /dev/null +++ b/scaletest/terraform/action/coder_workspaces.tf @@ -0,0 +1,177 @@ +locals { + create_workspace_timeout = "600s" +} + +resource "kubernetes_job" "create_workspaces_primary" { + provider = kubernetes.primary + + metadata { + name = "${var.name}-create-workspaces" + namespace = kubernetes_namespace.coder_primary.metadata.0.name + labels = { + "app.kubernetes.io/name" = "${var.name}-create-workspaces" + } + } + spec { + completions = 1 + template { + metadata {} + spec { + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "cloud.google.com/gke-nodepool" + operator = "In" + values = ["${google_container_node_pool.node_pool["primary_misc"].name}"] + } + } + } + } + } + container { + name = "cli" + image = "${var.coder_image_repo}:${var.coder_image_tag}" + command = [ + "/opt/coder", + "--verbose", + "--url=${local.deployments.primary.url}", + "--token=${trimspace(data.local_file.api_key.content)}", + "exp", + "scaletest", + "create-workspaces", + "--count=${local.scenarios[var.scenario].workspaces.count_per_deployment}", + "--template=kubernetes-primary", + "--concurrency=${var.workspace_create_concurrency}", + "--no-cleanup" + ] + } + restart_policy = "Never" + } + } + } + wait_for_completion = true + + timeouts { + create = local.create_workspace_timeout + } + + depends_on = [kubernetes_job.push_template_primary] +} + +resource "kubernetes_job" "create_workspaces_europe" { + provider = kubernetes.europe + + metadata { + name = "${var.name}-create-workspaces" + namespace = kubernetes_namespace.coder_europe.metadata.0.name + labels = { + "app.kubernetes.io/name" = "${var.name}-create-workspaces" + } + } + spec { + completions = 1 + template { + metadata {} + spec { + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "cloud.google.com/gke-nodepool" + operator = "In" + values = ["${google_container_node_pool.node_pool["europe_misc"].name}"] + } + } + } + } + } + container { + name = "cli" + image = "${var.coder_image_repo}:${var.coder_image_tag}" + command = [ + "/opt/coder", + "--verbose", + "--url=${local.deployments.primary.url}", + "--token=${trimspace(data.local_file.api_key.content)}", + "exp", + "scaletest", + "create-workspaces", + "--count=${local.scenarios[var.scenario].workspaces.count_per_deployment}", + "--template=kubernetes-europe", + "--concurrency=${var.workspace_create_concurrency}", + "--no-cleanup" + ] + } + restart_policy = "Never" + } + } + } + wait_for_completion = true + + timeouts { + create = local.create_workspace_timeout + } + + depends_on = [kubernetes_job.push_template_europe] +} + +resource "kubernetes_job" "create_workspaces_asia" { + provider = kubernetes.asia + + metadata { + name = "${var.name}-create-workspaces" + namespace = kubernetes_namespace.coder_asia.metadata.0.name + labels = { + "app.kubernetes.io/name" = "${var.name}-create-workspaces" + } + } + spec { + completions = 1 + template { + metadata {} + spec { + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "cloud.google.com/gke-nodepool" + operator = "In" + values = ["${google_container_node_pool.node_pool["asia_misc"].name}"] + } + } + } + } + } + container { + name = "cli" + image = "${var.coder_image_repo}:${var.coder_image_tag}" + command = [ + "/opt/coder", + "--verbose", + "--url=${local.deployments.primary.url}", + "--token=${trimspace(data.local_file.api_key.content)}", + "exp", + "scaletest", + "create-workspaces", + "--count=${local.scenarios[var.scenario].workspaces.count_per_deployment}", + "--template=kubernetes-asia", + "--concurrency=${var.workspace_create_concurrency}", + "--no-cleanup" + ] + } + restart_policy = "Never" + } + } + } + wait_for_completion = true + + timeouts { + create = local.create_workspace_timeout + } + + depends_on = [kubernetes_job.push_template_asia] +} diff --git a/scaletest/terraform/action/k8s_coder_asia.tf b/scaletest/terraform/action/k8s_coder_asia.tf index ccb7084d4a1f3..6f36bb3ae23fe 100644 --- a/scaletest/terraform/action/k8s_coder_asia.tf +++ b/scaletest/terraform/action/k8s_coder_asia.tf @@ -82,66 +82,25 @@ resource "helm_release" "provisionerd_asia" { name = local.provisionerd_release_name version = var.provisionerd_chart_version namespace = kubernetes_namespace.coder_asia.metadata.0.name - values = [< Date: Thu, 12 Dec 2024 00:29:13 +0000 Subject: [PATCH 25/36] traffic works --- scaletest/terraform/action/coder_traffic.tf | 192 ++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 scaletest/terraform/action/coder_traffic.tf diff --git a/scaletest/terraform/action/coder_traffic.tf b/scaletest/terraform/action/coder_traffic.tf new file mode 100644 index 0000000000000..d277758ae32ed --- /dev/null +++ b/scaletest/terraform/action/coder_traffic.tf @@ -0,0 +1,192 @@ +locals { + workspace_traffic_job_timeout = "300s" + workspace_traffic_duration = "60s" + bytes_per_tick = 1024 + tick_interval = "100ms" +} + +resource "kubernetes_job" "workspace_traffic_primary" { + provider = kubernetes.primary + + metadata { + name = "${var.name}-workspace-traffic" + namespace = kubernetes_namespace.coder_primary.metadata.0.name + labels = { + "app.kubernetes.io/name" = "${var.name}-workspace-traffic" + } + } + spec { + completions = 1 + template { + metadata {} + spec { + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "cloud.google.com/gke-nodepool" + operator = "In" + values = ["${google_container_node_pool.node_pool["primary_misc"].name}"] + } + } + } + } + } + container { + name = "cli" + image = "${var.coder_image_repo}:${var.coder_image_tag}" + command = [ + "/opt/coder", + "--verbose", + "--url=${local.deployments.primary.url}", + "--token=${trimspace(data.local_file.api_key.content)}", + "exp", + "scaletest", + "workspace-traffic", + "--concurrency=0", + "--bytes-per-tick=${local.bytes_per_tick}", + "--tick-interval=${local.tick_interval}", + "--scaletest-prometheus-wait=60s", + ] + env { + name = "CODER_SCALETEST_JOB_TIMEOUT" + value = local.workspace_traffic_duration + } + } + restart_policy = "Never" + } + } + } + wait_for_completion = true + + timeouts { + create = local.workspace_traffic_job_timeout + } + + depends_on = [kubernetes_job.create_workspaces_primary] +} + +resource "kubernetes_job" "workspace_traffic_europe" { + provider = kubernetes.europe + + metadata { + name = "${var.name}-workspace-traffic" + namespace = kubernetes_namespace.coder_europe.metadata.0.name + labels = { + "app.kubernetes.io/name" = "${var.name}-workspace-traffic" + } + } + spec { + completions = 1 + template { + metadata {} + spec { + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "cloud.google.com/gke-nodepool" + operator = "In" + values = ["${google_container_node_pool.node_pool["europe_misc"].name}"] + } + } + } + } + } + container { + name = "cli" + image = "${var.coder_image_repo}:${var.coder_image_tag}" + command = [ + "/opt/coder", + "--verbose", + "--url=${local.deployments.primary.url}", + "--token=${trimspace(data.local_file.api_key.content)}", + "exp", + "scaletest", + "workspace-traffic", + "--concurrency=0", + "--bytes-per-tick=${local.bytes_per_tick}", + "--tick-interval=${local.tick_interval}", + "--scaletest-prometheus-wait=60s", + ] + env { + name = "CODER_SCALETEST_JOB_TIMEOUT" + value = local.workspace_traffic_duration + } + } + restart_policy = "Never" + } + } + } + wait_for_completion = true + + timeouts { + create = local.workspace_traffic_job_timeout + } + + depends_on = [kubernetes_job.create_workspaces_europe] +} + +resource "kubernetes_job" "workspace_traffic_asia" { + provider = kubernetes.asia + + metadata { + name = "${var.name}-workspace-traffic" + namespace = kubernetes_namespace.coder_asia.metadata.0.name + labels = { + "app.kubernetes.io/name" = "${var.name}-workspace-traffic" + } + } + spec { + completions = 1 + template { + metadata {} + spec { + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "cloud.google.com/gke-nodepool" + operator = "In" + values = ["${google_container_node_pool.node_pool["asia_misc"].name}"] + } + } + } + } + } + container { + name = "cli" + image = "${var.coder_image_repo}:${var.coder_image_tag}" + command = [ + "/opt/coder", + "--verbose", + "--url=${local.deployments.primary.url}", + "--token=${trimspace(data.local_file.api_key.content)}", + "exp", + "scaletest", + "workspace-traffic", + "--concurrency=0", + "--bytes-per-tick=${local.bytes_per_tick}", + "--tick-interval=${local.tick_interval}", + "--scaletest-prometheus-wait=60s", + ] + env { + name = "CODER_SCALETEST_JOB_TIMEOUT" + value = local.workspace_traffic_duration + } + } + restart_policy = "Never" + } + } + } + wait_for_completion = true + + timeouts { + create = local.workspace_traffic_job_timeout + } + + depends_on = [kubernetes_job.create_workspaces_asia] +} From 807e34234927f8167d3c188b2ca2226eba6990e1 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Tue, 17 Dec 2024 19:41:42 +0000 Subject: [PATCH 26/36] working loadgen with pprof download --- scaletest/terraform/action/coder_pprof.tf | 67 +++++++++++++++++++ scaletest/terraform/action/coder_templates.tf | 6 ++ scaletest/terraform/action/coder_traffic.tf | 28 ++++++-- scaletest/terraform/action/k8s_coder_asia.tf | 4 ++ .../terraform/action/k8s_coder_europe.tf | 4 ++ .../terraform/action/k8s_coder_primary.tf | 4 +- scaletest/terraform/action/kubeconfig.tftpl | 17 +++++ 7 files changed, 122 insertions(+), 8 deletions(-) create mode 100644 scaletest/terraform/action/coder_pprof.tf create mode 100644 scaletest/terraform/action/kubeconfig.tftpl diff --git a/scaletest/terraform/action/coder_pprof.tf b/scaletest/terraform/action/coder_pprof.tf new file mode 100644 index 0000000000000..5646fe3450ede --- /dev/null +++ b/scaletest/terraform/action/coder_pprof.tf @@ -0,0 +1,67 @@ +locals { + pprof_interval = "30s" + pprof_duration = "300s" +} + +resource "local_file" "kubeconfig" { + for_each = local.deployments + + content = templatefile("${path.module}/kubeconfig.tftpl", { + name = google_container_cluster.cluster[each.key].name + endpoint = "https://${google_container_cluster.cluster[each.key].endpoint}" + cluster_ca_certificate = google_container_cluster.cluster[each.key].master_auth[0].cluster_ca_certificate + access_token = data.google_client_config.default.access_token + }) + filename = "${path.module}/.coderv2/kubeconfig/${each.key}.yaml" +} + +resource "null_resource" "pprof" { + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + command = < Date: Tue, 17 Dec 2024 19:42:24 +0000 Subject: [PATCH 27/36] fmt --- scaletest/terraform/action/coder_pprof.tf | 14 +++++++------- scaletest/terraform/action/coder_templates.tf | 6 +++--- scaletest/terraform/action/coder_traffic.tf | 2 +- scaletest/terraform/action/k8s_coder_asia.tf | 4 ++-- scaletest/terraform/action/k8s_coder_europe.tf | 4 ++-- scaletest/terraform/action/k8s_coder_primary.tf | 2 +- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/scaletest/terraform/action/coder_pprof.tf b/scaletest/terraform/action/coder_pprof.tf index 5646fe3450ede..f0766bf023796 100644 --- a/scaletest/terraform/action/coder_pprof.tf +++ b/scaletest/terraform/action/coder_pprof.tf @@ -1,14 +1,14 @@ locals { - pprof_interval = "30s" - pprof_duration = "300s" + pprof_interval = "30s" + pprof_duration = "300s" } resource "local_file" "kubeconfig" { for_each = local.deployments - content = templatefile("${path.module}/kubeconfig.tftpl", { - name = google_container_cluster.cluster[each.key].name - endpoint = "https://${google_container_cluster.cluster[each.key].endpoint}" + content = templatefile("${path.module}/kubeconfig.tftpl", { + name = google_container_cluster.cluster[each.key].name + endpoint = "https://${google_container_cluster.cluster[each.key].endpoint}" cluster_ca_certificate = google_container_cluster.cluster[each.key].master_auth[0].cluster_ca_certificate access_token = data.google_client_config.default.access_token }) @@ -18,7 +18,7 @@ resource "local_file" "kubeconfig" { resource "null_resource" "pprof" { provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] - command = < Date: Wed, 18 Dec 2024 05:10:50 +0000 Subject: [PATCH 28/36] large scenario --- scaletest/terraform/action/coder_pprof.tf | 37 +- scaletest/terraform/action/coder_traffic.tf | 12 +- .../terraform/action/coder_workspaces.tf | 5 +- scaletest/terraform/action/gcp_clusters.tf | 5 + scaletest/terraform/action/plan.log | 1772 +++++++++++++++++ scaletest/terraform/action/scenarios.tf | 28 +- 6 files changed, 1825 insertions(+), 34 deletions(-) create mode 100644 scaletest/terraform/action/plan.log diff --git a/scaletest/terraform/action/coder_pprof.tf b/scaletest/terraform/action/coder_pprof.tf index f0766bf023796..8be483d3638cf 100644 --- a/scaletest/terraform/action/coder_pprof.tf +++ b/scaletest/terraform/action/coder_pprof.tf @@ -1,6 +1,12 @@ locals { pprof_interval = "30s" - pprof_duration = "300s" + pprof_duration = "30m" + + pprof_ports = { + primary = 6061 + europe = 7061 + asia = 8061 + } } resource "local_file" "kubeconfig" { @@ -16,6 +22,9 @@ resource "local_file" "kubeconfig" { } resource "null_resource" "pprof" { + for_each = { + primary = {} + } provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] command = </regions//zones/] +data.google_project.project: Read complete after 1s [id=projects/sandbox-20241217-k7h9cj] +google_project_service.api["stackdriver"]: Refreshing state... [id=sandbox-20241217-k7h9cj/stackdriver.googleapis.com] +google_project_service.api["storage-api"]: Refreshing state... [id=sandbox-20241217-k7h9cj/storage-api.googleapis.com] +google_project_service.api["servicemanagement"]: Refreshing state... [id=sandbox-20241217-k7h9cj/servicemanagement.googleapis.com] +google_project_service.api["compute"]: Refreshing state... [id=sandbox-20241217-k7h9cj/compute.googleapis.com] +google_project_service.api["logging"]: Refreshing state... [id=sandbox-20241217-k7h9cj/logging.googleapis.com] +google_project_service.api["cloudtrace"]: Refreshing state... [id=sandbox-20241217-k7h9cj/cloudtrace.googleapis.com] +google_project_service.api["monitoring"]: Refreshing state... [id=sandbox-20241217-k7h9cj/monitoring.googleapis.com] +google_project_service.api["container"]: Refreshing state... [id=sandbox-20241217-k7h9cj/container.googleapis.com] +google_project_service.api["sqladmin"]: Refreshing state... [id=sandbox-20241217-k7h9cj/sqladmin.googleapis.com] +google_project_service.api["servicenetworking"]: Refreshing state... [id=sandbox-20241217-k7h9cj/servicenetworking.googleapis.com] +data.google_compute_default_service_account.default: Reading... +google_compute_network.vpc: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/global/networks/d17-3] +cloudflare_record.coder["primary"]: Refreshing state... [id=21e416c5e43250da81de73b475f30f87] +cloudflare_record.coder["europe"]: Refreshing state... [id=82fbe51332a53858d9f068f9c88d49db] +cloudflare_record.coder["asia"]: Refreshing state... [id=f5c434e083f2f2cd06e750e8cf1541f9] +google_compute_global_address.sql_peering: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/global/addresses/d17-3-sql-peering] +google_compute_subnetwork.subnet["asia"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/regions/asia-southeast1/subnetworks/d17-3-asia] +google_compute_subnetwork.subnet["primary"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/regions/us-east1/subnetworks/d17-3-primary] +google_compute_subnetwork.subnet["europe"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/regions/europe-west1/subnetworks/d17-3-europe] +data.google_compute_default_service_account.default: Read complete after 1s [id=projects/sandbox-20241217-k7h9cj/serviceAccounts/267709392779-compute@developer.gserviceaccount.com] +google_service_networking_connection.private_vpc_connection: Refreshing state... [id=projects%2Fsandbox-20241217-k7h9cj%2Fglobal%2Fnetworks%2Fd17-3:servicenetworking.googleapis.com] +google_container_cluster.cluster["europe"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/europe-west1-b/clusters/d17-3-europe] +google_container_cluster.cluster["asia"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/asia-southeast1-a/clusters/d17-3-asia] +google_container_cluster.cluster["primary"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/us-east1-c/clusters/d17-3-primary] +google_sql_database_instance.db: Refreshing state... [id=d17-3-coder] +google_sql_database.coder: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/instances/d17-3-coder/databases/d17-3-coder] +google_sql_user.coder: Refreshing state... [id=d17-3-coder//d17-3-coder] +google_sql_user.prometheus: Refreshing state... [id=d17-3-prometheus//d17-3-coder] +google_container_node_pool.node_pool["primary_workspaces"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/us-east1-c/clusters/d17-3-primary/nodePools/workspaces] +google_container_node_pool.node_pool["primary_misc"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/us-east1-c/clusters/d17-3-primary/nodePools/misc] +google_container_node_pool.node_pool["asia_workspaces"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/asia-southeast1-a/clusters/d17-3-asia/nodePools/workspaces] +google_container_node_pool.node_pool["europe_coder"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/europe-west1-b/clusters/d17-3-europe/nodePools/coder] +google_container_node_pool.node_pool["asia_misc"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/asia-southeast1-a/clusters/d17-3-asia/nodePools/misc] +google_container_node_pool.node_pool["europe_misc"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/europe-west1-b/clusters/d17-3-europe/nodePools/misc] +google_container_node_pool.node_pool["primary_coder"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/us-east1-c/clusters/d17-3-primary/nodePools/coder] +google_container_node_pool.node_pool["asia_coder"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/asia-southeast1-a/clusters/d17-3-asia/nodePools/coder] +google_container_node_pool.node_pool["europe_workspaces"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/europe-west1-b/clusters/d17-3-europe/nodePools/workspaces] +local_file.kubeconfig["asia"]: Refreshing state... [id=67e73f2cb69ccee72f709b6d867ef847d15801e2] +local_file.kubeconfig["europe"]: Refreshing state... [id=f597afb96c32a7571ba3a81cc18f9f2c9fe50204] +local_file.kubeconfig["primary"]: Refreshing state... [id=1caab4f9002954e4a59be717dab5487cd1096614] +kubernetes_namespace.coder_primary: Refreshing state... [id=coder] +local_file.kubernetes_template: Refreshing state... [id=619345fcc362d300d43b054926122573f0cfa82d] +kubernetes_namespace.coder_europe: Refreshing state... [id=coder] +kubernetes_namespace.coder_asia: Refreshing state... [id=coder] +kubernetes_secret.provisionerd_psk_primary: Refreshing state... [id=coder/coder-provisioner-psk] +kubernetes_config_map.template_primary: Refreshing state... [id=coder/coder-template] +kubernetes_secret.provisionerd_psk_europe: Refreshing state... [id=coder/coder-provisioner-psk] +kubernetes_config_map.template_europe: Refreshing state... [id=coder/coder-template] +kubernetes_secret.provisionerd_psk_asia: Refreshing state... [id=coder/coder-provisioner-psk] +kubernetes_config_map.template_asia: Refreshing state... [id=coder/coder-template] + +Terraform used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create +-/+ destroy and then create replacement + <= read (data resources) + +Terraform will perform the following actions: + + # data.http.coder_healthy will be read during apply + # (depends on a resource or a module with changes pending) + <= data "http" "coder_healthy" { + + body = (known after apply) + + id = (known after apply) + + response_body = (known after apply) + + response_body_base64 = (known after apply) + + response_headers = (known after apply) + + status_code = (known after apply) + + url = "http://d17-3-scaletest.f0ssel.io" + + + retry { + + attempts = 30 + + min_delay_ms = 10000 + } + } + + # data.local_file.api_key will be read during apply + # (depends on a resource or a module with changes pending) + <= data "local_file" "api_key" { + + content = (known after apply) + + content_base64 = (known after apply) + + content_base64sha256 = (known after apply) + + content_base64sha512 = (known after apply) + + content_md5 = (known after apply) + + content_sha1 = (known after apply) + + content_sha256 = (known after apply) + + content_sha512 = (known after apply) + + filename = "./.coderv2/api_key" + + id = (known after apply) + } + + # data.local_file.asia_proxy_token will be read during apply + # (depends on a resource or a module with changes pending) + <= data "local_file" "asia_proxy_token" { + + content = (known after apply) + + content_base64 = (known after apply) + + content_base64sha256 = (known after apply) + + content_base64sha512 = (known after apply) + + content_md5 = (known after apply) + + content_sha1 = (known after apply) + + content_sha256 = (known after apply) + + content_sha512 = (known after apply) + + filename = "./.coderv2/asia_proxy_token" + + id = (known after apply) + } + + # data.local_file.europe_proxy_token will be read during apply + # (depends on a resource or a module with changes pending) + <= data "local_file" "europe_proxy_token" { + + content = (known after apply) + + content_base64 = (known after apply) + + content_base64sha256 = (known after apply) + + content_base64sha512 = (known after apply) + + content_md5 = (known after apply) + + content_sha1 = (known after apply) + + content_sha256 = (known after apply) + + content_sha512 = (known after apply) + + filename = "./.coderv2/europe_proxy_token" + + id = (known after apply) + } + + # helm_release.coder_asia will be created + + resource "helm_release" "coder_asia" { + + atomic = false + + chart = "coder" + + cleanup_on_fail = false + + create_namespace = false + + dependency_update = false + + disable_crd_hooks = false + + disable_openapi_validation = false + + disable_webhooks = false + + force_update = false + + id = (known after apply) + + lint = false + + manifest = (known after apply) + + max_history = 0 + + metadata = (known after apply) + + name = "d17-3-coder" + + namespace = "coder" + + pass_credentials = false + + recreate_pods = false + + render_subchart_notes = true + + replace = false + + repository = "https://helm.coder.com/v2" + + reset_values = false + + reuse_values = false + + skip_crds = false + + status = "deployed" + + timeout = 300 + + values = [ + + <<-EOT + coder: + workspaceProxy: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: "In" + values: ["coder"] + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchExpressions: + - key: "app.kubernetes.io/instance" + operator: "In" + values: ["d17-3-coder"] + env: + - name: "CODER_ACCESS_URL" + value: "http://d17-3-asia-scaletest.f0ssel.io" + - name: CODER_PRIMARY_ACCESS_URL + value: "http://d17-3-scaletest.f0ssel.io" + - name: CODER_PROXY_SESSION_TOKEN + valueFrom: + secretKeyRef: + key: token + name: "coder-proxy-token" + - name: "CODER_CACHE_DIRECTORY" + value: "/tmp/coder" + - name: "CODER_TELEMETRY_ENABLE" + value: "false" + - name: "CODER_LOGGING_HUMAN" + value: "/dev/null" + - name: "CODER_LOGGING_STACKDRIVER" + value: "/dev/stderr" + - name: "CODER_PROMETHEUS_ENABLE" + value: "true" + - name: "CODER_VERBOSE" + value: "true" + - name: "CODER_EXPERIMENTS" + value: "" + - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" + value: "true" + image: + repo: ghcr.io/coder/coder + tag: latest + replicaCount: "3" + resources: + requests: + cpu: "3000m" + memory: "12Gi" + limits: + cpu: "3000m" + memory: "12Gi" + securityContext: + readOnlyRootFilesystem: true + service: + enable: true + sessionAffinity: None + loadBalancerIP: "34.124.188.45" + volumeMounts: + - mountPath: "/tmp" + name: cache + readOnly: false + volumes: + - emptyDir: + sizeLimit: 1024Mi + name: cache + EOT, + ] + + verify = false + + version = "2.18.1" + + wait = true + + wait_for_jobs = false + } + + # helm_release.coder_europe will be created + + resource "helm_release" "coder_europe" { + + atomic = false + + chart = "coder" + + cleanup_on_fail = false + + create_namespace = false + + dependency_update = false + + disable_crd_hooks = false + + disable_openapi_validation = false + + disable_webhooks = false + + force_update = false + + id = (known after apply) + + lint = false + + manifest = (known after apply) + + max_history = 0 + + metadata = (known after apply) + + name = "d17-3-coder" + + namespace = "coder" + + pass_credentials = false + + recreate_pods = false + + render_subchart_notes = true + + replace = false + + repository = "https://helm.coder.com/v2" + + reset_values = false + + reuse_values = false + + skip_crds = false + + status = "deployed" + + timeout = 300 + + values = [ + + <<-EOT + coder: + workspaceProxy: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: "In" + values: ["coder"] + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchExpressions: + - key: "app.kubernetes.io/instance" + operator: "In" + values: ["d17-3-coder"] + env: + - name: "CODER_ACCESS_URL" + value: "http://d17-3-europe-scaletest.f0ssel.io" + - name: CODER_PRIMARY_ACCESS_URL + value: "http://d17-3-scaletest.f0ssel.io" + - name: CODER_PROXY_SESSION_TOKEN + valueFrom: + secretKeyRef: + key: token + name: "coder-proxy-token" + - name: "CODER_CACHE_DIRECTORY" + value: "/tmp/coder" + - name: "CODER_TELEMETRY_ENABLE" + value: "false" + - name: "CODER_LOGGING_HUMAN" + value: "/dev/null" + - name: "CODER_LOGGING_STACKDRIVER" + value: "/dev/stderr" + - name: "CODER_PROMETHEUS_ENABLE" + value: "true" + - name: "CODER_VERBOSE" + value: "true" + - name: "CODER_EXPERIMENTS" + value: "" + - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" + value: "true" + image: + repo: ghcr.io/coder/coder + tag: latest + replicaCount: "3" + resources: + requests: + cpu: "3000m" + memory: "12Gi" + limits: + cpu: "3000m" + memory: "12Gi" + securityContext: + readOnlyRootFilesystem: true + service: + enable: true + sessionAffinity: None + loadBalancerIP: "35.205.158.154" + volumeMounts: + - mountPath: "/tmp" + name: cache + readOnly: false + volumes: + - emptyDir: + sizeLimit: 1024Mi + name: cache + EOT, + ] + + verify = false + + version = "2.18.1" + + wait = true + + wait_for_jobs = false + } + + # helm_release.coder_primary will be created + + resource "helm_release" "coder_primary" { + + atomic = false + + chart = "coder" + + cleanup_on_fail = false + + create_namespace = false + + dependency_update = false + + disable_crd_hooks = false + + disable_openapi_validation = false + + disable_webhooks = false + + force_update = false + + id = (known after apply) + + lint = false + + manifest = (known after apply) + + max_history = 0 + + metadata = (known after apply) + + name = "d17-3-coder" + + namespace = "coder" + + pass_credentials = false + + recreate_pods = false + + render_subchart_notes = true + + replace = false + + repository = "https://helm.coder.com/v2" + + reset_values = false + + reuse_values = false + + skip_crds = false + + status = "deployed" + + timeout = 300 + + values = [ + + <<-EOT + coder: + workspaceProxy: false + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: "In" + values: ["coder"] + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchExpressions: + - key: "app.kubernetes.io/instance" + operator: "In" + values: ["d17-3-coder"] + env: + - name: "CODER_ACCESS_URL" + value: "http://d17-3-scaletest.f0ssel.io" + - name: "CODER_PG_CONNECTION_URL" + valueFrom: + secretKeyRef: + name: "coder-db-url" + key: url + - name: "CODER_PROVISIONER_DAEMONS" + value: "0" + - name: CODER_PROVISIONER_DAEMON_PSK + valueFrom: + secretKeyRef: + key: psk + name: "coder-provisioner-psk" + - name: "CODER_PROMETHEUS_COLLECT_AGENT_STATS" + value: "true" + - name: "CODER_PROMETHEUS_COLLECT_DB_METRICS" + value: "true" + - name: "CODER_PPROF_ENABLE" + value: "true" + - name: "CODER_CACHE_DIRECTORY" + value: "/tmp/coder" + - name: "CODER_TELEMETRY_ENABLE" + value: "false" + - name: "CODER_LOGGING_HUMAN" + value: "/dev/null" + - name: "CODER_LOGGING_STACKDRIVER" + value: "/dev/stderr" + - name: "CODER_PROMETHEUS_ENABLE" + value: "true" + - name: "CODER_VERBOSE" + value: "true" + - name: "CODER_EXPERIMENTS" + value: "" + - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" + value: "true" + image: + repo: ghcr.io/coder/coder + tag: latest + replicaCount: "3" + resources: + requests: + cpu: "3000m" + memory: "12Gi" + limits: + cpu: "3000m" + memory: "12Gi" + securityContext: + readOnlyRootFilesystem: true + service: + enable: true + sessionAffinity: None + loadBalancerIP: "34.23.114.144" + volumeMounts: + - mountPath: "/tmp" + name: cache + readOnly: false + volumes: + - emptyDir: + sizeLimit: 1024Mi + name: cache + EOT, + ] + + verify = false + + version = "2.18.1" + + wait = true + + wait_for_jobs = false + } + + # helm_release.provisionerd_asia will be created + + resource "helm_release" "provisionerd_asia" { + + atomic = false + + chart = "coder-provisioner" + + cleanup_on_fail = false + + create_namespace = false + + dependency_update = false + + disable_crd_hooks = false + + disable_openapi_validation = false + + disable_webhooks = false + + force_update = false + + id = (known after apply) + + lint = false + + manifest = (known after apply) + + max_history = 0 + + metadata = (known after apply) + + name = "d17-3-provisionerd" + + namespace = "coder" + + pass_credentials = false + + recreate_pods = false + + render_subchart_notes = true + + replace = false + + repository = "https://helm.coder.com/v2" + + reset_values = false + + reuse_values = false + + skip_crds = false + + status = "deployed" + + timeout = 300 + + values = [ + + <<-EOT + coder: + workspaceProxy: false + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: "In" + values: ["coder"] + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchExpressions: + - key: "app.kubernetes.io/instance" + operator: "In" + values: ["d17-3-coder"] + env: + - name: "CODER_URL" + value: "http://d17-3-scaletest.f0ssel.io" + - name: "CODER_PROVISIONERD_TAGS" + value: "scope=organization,deployment=asia" + - name: "CODER_PROVISIONER_DAEMON_NAME" + value: "asia" + - name: "CODER_CONFIG_DIR" + value: "/tmp/config" + - name: "CODER_CACHE_DIRECTORY" + value: "/tmp/coder" + - name: "CODER_TELEMETRY_ENABLE" + value: "false" + - name: "CODER_LOGGING_HUMAN" + value: "/dev/null" + - name: "CODER_LOGGING_STACKDRIVER" + value: "/dev/stderr" + - name: "CODER_PROMETHEUS_ENABLE" + value: "true" + - name: "CODER_VERBOSE" + value: "true" + - name: "CODER_EXPERIMENTS" + value: "" + - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" + value: "true" + image: + repo: ghcr.io/coder/coder + tag: latest + replicaCount: "30" + resources: + requests: + cpu: "100m" + memory: "512Mi" + limits: + cpu: "1000m" + memory: "1Gi" + securityContext: + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: "/tmp" + name: cache + readOnly: false + volumes: + - emptyDir: + sizeLimit: 1024Mi + name: cache + EOT, + ] + + verify = false + + version = "2.18.1" + + wait = true + + wait_for_jobs = false + } + + # helm_release.provisionerd_europe will be created + + resource "helm_release" "provisionerd_europe" { + + atomic = false + + chart = "coder-provisioner" + + cleanup_on_fail = false + + create_namespace = false + + dependency_update = false + + disable_crd_hooks = false + + disable_openapi_validation = false + + disable_webhooks = false + + force_update = false + + id = (known after apply) + + lint = false + + manifest = (known after apply) + + max_history = 0 + + metadata = (known after apply) + + name = "d17-3-provisionerd" + + namespace = "coder" + + pass_credentials = false + + recreate_pods = false + + render_subchart_notes = true + + replace = false + + repository = "https://helm.coder.com/v2" + + reset_values = false + + reuse_values = false + + skip_crds = false + + status = "deployed" + + timeout = 300 + + values = [ + + <<-EOT + coder: + workspaceProxy: false + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: "In" + values: ["coder"] + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchExpressions: + - key: "app.kubernetes.io/instance" + operator: "In" + values: ["d17-3-coder"] + env: + - name: "CODER_URL" + value: "http://d17-3-scaletest.f0ssel.io" + - name: "CODER_PROVISIONERD_TAGS" + value: "scope=organization,deployment=europe" + - name: "CODER_PROVISIONER_DAEMON_NAME" + value: "europe" + - name: "CODER_CONFIG_DIR" + value: "/tmp/config" + - name: "CODER_CACHE_DIRECTORY" + value: "/tmp/coder" + - name: "CODER_TELEMETRY_ENABLE" + value: "false" + - name: "CODER_LOGGING_HUMAN" + value: "/dev/null" + - name: "CODER_LOGGING_STACKDRIVER" + value: "/dev/stderr" + - name: "CODER_PROMETHEUS_ENABLE" + value: "true" + - name: "CODER_VERBOSE" + value: "true" + - name: "CODER_EXPERIMENTS" + value: "" + - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" + value: "true" + image: + repo: ghcr.io/coder/coder + tag: latest + replicaCount: "30" + resources: + requests: + cpu: "100m" + memory: "512Mi" + limits: + cpu: "1000m" + memory: "1Gi" + securityContext: + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: "/tmp" + name: cache + readOnly: false + volumes: + - emptyDir: + sizeLimit: 1024Mi + name: cache + EOT, + ] + + verify = false + + version = "2.18.1" + + wait = true + + wait_for_jobs = false + } + + # helm_release.provisionerd_primary will be created + + resource "helm_release" "provisionerd_primary" { + + atomic = false + + chart = "coder-provisioner" + + cleanup_on_fail = false + + create_namespace = false + + dependency_update = false + + disable_crd_hooks = false + + disable_openapi_validation = false + + disable_webhooks = false + + force_update = false + + id = (known after apply) + + lint = false + + manifest = (known after apply) + + max_history = 0 + + metadata = (known after apply) + + name = "d17-3-provisionerd" + + namespace = "coder" + + pass_credentials = false + + recreate_pods = false + + render_subchart_notes = true + + replace = false + + repository = "https://helm.coder.com/v2" + + reset_values = false + + reuse_values = false + + skip_crds = false + + status = "deployed" + + timeout = 300 + + values = [ + + <<-EOT + coder: + workspaceProxy: false + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "cloud.google.com/gke-nodepool" + operator: "In" + values: ["coder"] + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchExpressions: + - key: "app.kubernetes.io/instance" + operator: "In" + values: ["d17-3-coder"] + env: + - name: "CODER_URL" + value: "http://d17-3-scaletest.f0ssel.io" + - name: "CODER_PROVISIONERD_TAGS" + value: "scope=organization,deployment=primary" + - name: "CODER_PROVISIONER_DAEMON_NAME" + value: "primary" + - name: "CODER_CONFIG_DIR" + value: "/tmp/config" + - name: "CODER_CACHE_DIRECTORY" + value: "/tmp/coder" + - name: "CODER_TELEMETRY_ENABLE" + value: "false" + - name: "CODER_LOGGING_HUMAN" + value: "/dev/null" + - name: "CODER_LOGGING_STACKDRIVER" + value: "/dev/stderr" + - name: "CODER_PROMETHEUS_ENABLE" + value: "true" + - name: "CODER_VERBOSE" + value: "true" + - name: "CODER_EXPERIMENTS" + value: "" + - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" + value: "true" + image: + repo: ghcr.io/coder/coder + tag: latest + replicaCount: "30" + resources: + requests: + cpu: "100m" + memory: "512Mi" + limits: + cpu: "1000m" + memory: "1Gi" + securityContext: + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: "/tmp" + name: cache + readOnly: false + volumes: + - emptyDir: + sizeLimit: 1024Mi + name: cache + EOT, + ] + + verify = false + + version = "2.18.1" + + wait = true + + wait_for_jobs = false + } + + # kubernetes_job.create_workspaces_asia will be created + + resource "kubernetes_job" "create_workspaces_asia" { + + id = (known after apply) + + wait_for_completion = true + + + metadata { + + generation = (known after apply) + + labels = { + + "app.kubernetes.io/name" = "d17-3-create-workspaces" + } + + name = "d17-3-create-workspaces" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + + + spec { + + backoff_limit = 6 + + completion_mode = (known after apply) + + completions = 1 + + parallelism = 1 + + + selector (known after apply) + + + template { + + metadata { + + generation = (known after apply) + + name = (known after apply) + + resource_version = (known after apply) + + uid = (known after apply) + } + + spec { + + automount_service_account_token = true + + dns_policy = "ClusterFirst" + + enable_service_links = true + + host_ipc = false + + host_network = false + + host_pid = false + + hostname = (known after apply) + + node_name = (known after apply) + + restart_policy = "Never" + + scheduler_name = (known after apply) + + service_account_name = (known after apply) + + share_process_namespace = false + + termination_grace_period_seconds = 30 + + + affinity { + + node_affinity { + + required_during_scheduling_ignored_during_execution { + + node_selector_term { + + match_expressions { + + key = "cloud.google.com/gke-nodepool" + + operator = "In" + + values = [ + + "misc", + ] + } + } + } + } + } + + + container { + + command = (known after apply) + + image = "ghcr.io/coder/coder:latest" + + image_pull_policy = (known after apply) + + name = "cli" + + stdin = false + + stdin_once = false + + termination_message_path = "/dev/termination-log" + + termination_message_policy = (known after apply) + + tty = false + + + resources (known after apply) + } + + + image_pull_secrets (known after apply) + + + readiness_gate (known after apply) + } + } + } + + + timeouts { + + create = "600s" + } + } + + # kubernetes_job.create_workspaces_europe will be created + + resource "kubernetes_job" "create_workspaces_europe" { + + id = (known after apply) + + wait_for_completion = true + + + metadata { + + generation = (known after apply) + + labels = { + + "app.kubernetes.io/name" = "d17-3-create-workspaces" + } + + name = "d17-3-create-workspaces" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + + + spec { + + backoff_limit = 6 + + completion_mode = (known after apply) + + completions = 1 + + parallelism = 1 + + + selector (known after apply) + + + template { + + metadata { + + generation = (known after apply) + + name = (known after apply) + + resource_version = (known after apply) + + uid = (known after apply) + } + + spec { + + automount_service_account_token = true + + dns_policy = "ClusterFirst" + + enable_service_links = true + + host_ipc = false + + host_network = false + + host_pid = false + + hostname = (known after apply) + + node_name = (known after apply) + + restart_policy = "Never" + + scheduler_name = (known after apply) + + service_account_name = (known after apply) + + share_process_namespace = false + + termination_grace_period_seconds = 30 + + + affinity { + + node_affinity { + + required_during_scheduling_ignored_during_execution { + + node_selector_term { + + match_expressions { + + key = "cloud.google.com/gke-nodepool" + + operator = "In" + + values = [ + + "misc", + ] + } + } + } + } + } + + + container { + + command = (known after apply) + + image = "ghcr.io/coder/coder:latest" + + image_pull_policy = (known after apply) + + name = "cli" + + stdin = false + + stdin_once = false + + termination_message_path = "/dev/termination-log" + + termination_message_policy = (known after apply) + + tty = false + + + resources (known after apply) + } + + + image_pull_secrets (known after apply) + + + readiness_gate (known after apply) + } + } + } + + + timeouts { + + create = "600s" + } + } + + # kubernetes_job.create_workspaces_primary will be created + + resource "kubernetes_job" "create_workspaces_primary" { + + id = (known after apply) + + wait_for_completion = true + + + metadata { + + generation = (known after apply) + + labels = { + + "app.kubernetes.io/name" = "d17-3-create-workspaces" + } + + name = "d17-3-create-workspaces" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + + + spec { + + backoff_limit = 6 + + completion_mode = (known after apply) + + completions = 1 + + parallelism = 1 + + + selector (known after apply) + + + template { + + metadata { + + generation = (known after apply) + + name = (known after apply) + + resource_version = (known after apply) + + uid = (known after apply) + } + + spec { + + automount_service_account_token = true + + dns_policy = "ClusterFirst" + + enable_service_links = true + + host_ipc = false + + host_network = false + + host_pid = false + + hostname = (known after apply) + + node_name = (known after apply) + + restart_policy = "Never" + + scheduler_name = (known after apply) + + service_account_name = (known after apply) + + share_process_namespace = false + + termination_grace_period_seconds = 30 + + + affinity { + + node_affinity { + + required_during_scheduling_ignored_during_execution { + + node_selector_term { + + match_expressions { + + key = "cloud.google.com/gke-nodepool" + + operator = "In" + + values = [ + + "misc", + ] + } + } + } + } + } + + + container { + + command = (known after apply) + + image = "ghcr.io/coder/coder:latest" + + image_pull_policy = (known after apply) + + name = "cli" + + stdin = false + + stdin_once = false + + termination_message_path = "/dev/termination-log" + + termination_message_policy = (known after apply) + + tty = false + + + resources (known after apply) + } + + + image_pull_secrets (known after apply) + + + readiness_gate (known after apply) + } + } + } + + + timeouts { + + create = "600s" + } + } + + # kubernetes_job.push_template_asia will be created + + resource "kubernetes_job" "push_template_asia" { + + id = (known after apply) + + wait_for_completion = true + + + metadata { + + generation = (known after apply) + + labels = { + + "app.kubernetes.io/name" = "d17-3-push-template" + } + + name = "d17-3-push-template" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + + + spec { + + backoff_limit = 6 + + completion_mode = (known after apply) + + completions = 1 + + parallelism = 1 + + + selector (known after apply) + + + template { + + metadata { + + generation = (known after apply) + + name = (known after apply) + + resource_version = (known after apply) + + uid = (known after apply) + } + + spec { + + automount_service_account_token = true + + dns_policy = "ClusterFirst" + + enable_service_links = true + + host_ipc = false + + host_network = false + + host_pid = false + + hostname = (known after apply) + + node_name = (known after apply) + + restart_policy = "Never" + + scheduler_name = (known after apply) + + service_account_name = (known after apply) + + share_process_namespace = false + + termination_grace_period_seconds = 30 + + + affinity { + + node_affinity { + + required_during_scheduling_ignored_during_execution { + + node_selector_term { + + match_expressions { + + key = "cloud.google.com/gke-nodepool" + + operator = "In" + + values = [ + + "misc", + ] + } + } + } + } + } + + + container { + + command = (known after apply) + + image = "ghcr.io/coder/coder:latest" + + image_pull_policy = (known after apply) + + name = "cli" + + stdin = false + + stdin_once = false + + termination_message_path = "/dev/termination-log" + + termination_message_policy = (known after apply) + + tty = false + + + resources (known after apply) + + + volume_mount { + + mount_path = "/home/coder/template/main.tf" + + mount_propagation = "None" + + name = "coder-template" + + read_only = false + + sub_path = "main.tf" + } + } + + + image_pull_secrets (known after apply) + + + readiness_gate (known after apply) + + + volume { + + name = "coder-template" + + + config_map { + + default_mode = "0644" + + name = "coder-template" + } + } + } + } + } + } + + # kubernetes_job.push_template_europe will be created + + resource "kubernetes_job" "push_template_europe" { + + id = (known after apply) + + wait_for_completion = true + + + metadata { + + generation = (known after apply) + + labels = { + + "app.kubernetes.io/name" = "d17-3-push-template" + } + + name = "d17-3-push-template" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + + + spec { + + backoff_limit = 6 + + completion_mode = (known after apply) + + completions = 1 + + parallelism = 1 + + + selector (known after apply) + + + template { + + metadata { + + generation = (known after apply) + + name = (known after apply) + + resource_version = (known after apply) + + uid = (known after apply) + } + + spec { + + automount_service_account_token = true + + dns_policy = "ClusterFirst" + + enable_service_links = true + + host_ipc = false + + host_network = false + + host_pid = false + + hostname = (known after apply) + + node_name = (known after apply) + + restart_policy = "Never" + + scheduler_name = (known after apply) + + service_account_name = (known after apply) + + share_process_namespace = false + + termination_grace_period_seconds = 30 + + + affinity { + + node_affinity { + + required_during_scheduling_ignored_during_execution { + + node_selector_term { + + match_expressions { + + key = "cloud.google.com/gke-nodepool" + + operator = "In" + + values = [ + + "misc", + ] + } + } + } + } + } + + + container { + + command = (known after apply) + + image = "ghcr.io/coder/coder:latest" + + image_pull_policy = (known after apply) + + name = "cli" + + stdin = false + + stdin_once = false + + termination_message_path = "/dev/termination-log" + + termination_message_policy = (known after apply) + + tty = false + + + resources (known after apply) + + + volume_mount { + + mount_path = "/home/coder/template/main.tf" + + mount_propagation = "None" + + name = "coder-template" + + read_only = false + + sub_path = "main.tf" + } + } + + + image_pull_secrets (known after apply) + + + readiness_gate (known after apply) + + + volume { + + name = "coder-template" + + + config_map { + + default_mode = "0644" + + name = "coder-template" + } + } + } + } + } + } + + # kubernetes_job.push_template_primary will be created + + resource "kubernetes_job" "push_template_primary" { + + id = (known after apply) + + wait_for_completion = true + + + metadata { + + generation = (known after apply) + + labels = { + + "app.kubernetes.io/name" = "d17-3-push-template" + } + + name = "d17-3-push-template" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + + + spec { + + backoff_limit = 6 + + completion_mode = (known after apply) + + completions = 1 + + parallelism = 1 + + + selector (known after apply) + + + template { + + metadata { + + generation = (known after apply) + + name = (known after apply) + + resource_version = (known after apply) + + uid = (known after apply) + } + + spec { + + automount_service_account_token = true + + dns_policy = "ClusterFirst" + + enable_service_links = true + + host_ipc = false + + host_network = false + + host_pid = false + + hostname = (known after apply) + + node_name = (known after apply) + + restart_policy = "Never" + + scheduler_name = (known after apply) + + service_account_name = (known after apply) + + share_process_namespace = false + + termination_grace_period_seconds = 30 + + + affinity { + + node_affinity { + + required_during_scheduling_ignored_during_execution { + + node_selector_term { + + match_expressions { + + key = "cloud.google.com/gke-nodepool" + + operator = "In" + + values = [ + + "misc", + ] + } + } + } + } + } + + + container { + + command = (known after apply) + + image = "ghcr.io/coder/coder:latest" + + image_pull_policy = (known after apply) + + name = "cli" + + stdin = false + + stdin_once = false + + termination_message_path = "/dev/termination-log" + + termination_message_policy = (known after apply) + + tty = false + + + resources (known after apply) + + + volume_mount { + + mount_path = "/home/coder/template/main.tf" + + mount_propagation = "None" + + name = "coder-template" + + read_only = false + + sub_path = "main.tf" + } + } + + + image_pull_secrets (known after apply) + + + readiness_gate (known after apply) + + + volume { + + name = "coder-template" + + + config_map { + + default_mode = "0644" + + name = "coder-template" + } + } + } + } + } + } + + # kubernetes_job.workspace_traffic_asia will be created + + resource "kubernetes_job" "workspace_traffic_asia" { + + id = (known after apply) + + wait_for_completion = true + + + metadata { + + generation = (known after apply) + + labels = { + + "app.kubernetes.io/name" = "d17-3-workspace-traffic" + } + + name = "d17-3-workspace-traffic" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + + + spec { + + backoff_limit = 6 + + completion_mode = (known after apply) + + completions = 1 + + parallelism = 1 + + + selector (known after apply) + + + template { + + metadata { + + generation = (known after apply) + + name = (known after apply) + + resource_version = (known after apply) + + uid = (known after apply) + } + + spec { + + automount_service_account_token = true + + dns_policy = "ClusterFirst" + + enable_service_links = true + + host_ipc = false + + host_network = false + + host_pid = false + + hostname = (known after apply) + + node_name = (known after apply) + + restart_policy = "Never" + + scheduler_name = (known after apply) + + service_account_name = (known after apply) + + share_process_namespace = false + + termination_grace_period_seconds = 30 + + + affinity { + + node_affinity { + + required_during_scheduling_ignored_during_execution { + + node_selector_term { + + match_expressions { + + key = "cloud.google.com/gke-nodepool" + + operator = "In" + + values = [ + + "misc", + ] + } + } + } + } + } + + + container { + + command = (known after apply) + + image = "ghcr.io/coder/coder:latest" + + image_pull_policy = (known after apply) + + name = "cli" + + stdin = false + + stdin_once = false + + termination_message_path = "/dev/termination-log" + + termination_message_policy = (known after apply) + + tty = false + + + env { + + name = "CODER_SCALETEST_JOB_TIMEOUT" + + value = "300s" + } + + + resources (known after apply) + } + + + image_pull_secrets (known after apply) + + + readiness_gate (known after apply) + } + } + } + + + timeouts { + + create = "420s" + } + } + + # kubernetes_job.workspace_traffic_europe will be created + + resource "kubernetes_job" "workspace_traffic_europe" { + + id = (known after apply) + + wait_for_completion = true + + + metadata { + + generation = (known after apply) + + labels = { + + "app.kubernetes.io/name" = "d17-3-workspace-traffic" + } + + name = "d17-3-workspace-traffic" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + + + spec { + + backoff_limit = 6 + + completion_mode = (known after apply) + + completions = 1 + + parallelism = 1 + + + selector (known after apply) + + + template { + + metadata { + + generation = (known after apply) + + name = (known after apply) + + resource_version = (known after apply) + + uid = (known after apply) + } + + spec { + + automount_service_account_token = true + + dns_policy = "ClusterFirst" + + enable_service_links = true + + host_ipc = false + + host_network = false + + host_pid = false + + hostname = (known after apply) + + node_name = (known after apply) + + restart_policy = "Never" + + scheduler_name = (known after apply) + + service_account_name = (known after apply) + + share_process_namespace = false + + termination_grace_period_seconds = 30 + + + affinity { + + node_affinity { + + required_during_scheduling_ignored_during_execution { + + node_selector_term { + + match_expressions { + + key = "cloud.google.com/gke-nodepool" + + operator = "In" + + values = [ + + "misc", + ] + } + } + } + } + } + + + container { + + command = (known after apply) + + image = "ghcr.io/coder/coder:latest" + + image_pull_policy = (known after apply) + + name = "cli" + + stdin = false + + stdin_once = false + + termination_message_path = "/dev/termination-log" + + termination_message_policy = (known after apply) + + tty = false + + + env { + + name = "CODER_SCALETEST_JOB_TIMEOUT" + + value = "300s" + } + + + resources (known after apply) + } + + + image_pull_secrets (known after apply) + + + readiness_gate (known after apply) + } + } + } + + + timeouts { + + create = "420s" + } + } + + # kubernetes_job.workspace_traffic_primary will be created + + resource "kubernetes_job" "workspace_traffic_primary" { + + id = (known after apply) + + wait_for_completion = true + + + metadata { + + generation = (known after apply) + + labels = { + + "app.kubernetes.io/name" = "d17-3-workspace-traffic" + } + + name = "d17-3-workspace-traffic" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + + + spec { + + backoff_limit = 6 + + completion_mode = (known after apply) + + completions = 1 + + parallelism = 1 + + + selector (known after apply) + + + template { + + metadata { + + generation = (known after apply) + + name = (known after apply) + + resource_version = (known after apply) + + uid = (known after apply) + } + + spec { + + automount_service_account_token = true + + dns_policy = "ClusterFirst" + + enable_service_links = true + + host_ipc = false + + host_network = false + + host_pid = false + + hostname = (known after apply) + + node_name = (known after apply) + + restart_policy = "Never" + + scheduler_name = (known after apply) + + service_account_name = (known after apply) + + share_process_namespace = false + + termination_grace_period_seconds = 30 + + + affinity { + + node_affinity { + + required_during_scheduling_ignored_during_execution { + + node_selector_term { + + match_expressions { + + key = "cloud.google.com/gke-nodepool" + + operator = "In" + + values = [ + + "misc", + ] + } + } + } + } + } + + + container { + + command = (known after apply) + + image = "ghcr.io/coder/coder:latest" + + image_pull_policy = (known after apply) + + name = "cli" + + stdin = false + + stdin_once = false + + termination_message_path = "/dev/termination-log" + + termination_message_policy = (known after apply) + + tty = false + + + env { + + name = "CODER_SCALETEST_JOB_TIMEOUT" + + value = "300s" + } + + + resources (known after apply) + } + + + image_pull_secrets (known after apply) + + + readiness_gate (known after apply) + } + } + } + + + timeouts { + + create = "420s" + } + } + + # kubernetes_secret.coder_db will be created + + resource "kubernetes_secret" "coder_db" { + + data = (sensitive value) + + id = (known after apply) + + type = "Opaque" + + wait_for_service_account_token = true + + + metadata { + + generation = (known after apply) + + name = "coder-db-url" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + } + + # kubernetes_secret.proxy_token_asia will be created + + resource "kubernetes_secret" "proxy_token_asia" { + + data = (sensitive value) + + id = (known after apply) + + type = "Opaque" + + wait_for_service_account_token = true + + + metadata { + + generation = (known after apply) + + name = "coder-proxy-token" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + } + + # kubernetes_secret.proxy_token_europe will be created + + resource "kubernetes_secret" "proxy_token_europe" { + + data = (sensitive value) + + id = (known after apply) + + type = "Opaque" + + wait_for_service_account_token = true + + + metadata { + + generation = (known after apply) + + name = "coder-proxy-token" + + namespace = "coder" + + resource_version = (known after apply) + + uid = (known after apply) + } + } + + # local_file.kubeconfig["asia"] must be replaced +-/+ resource "local_file" "kubeconfig" { + ~ content = (sensitive value) # forces replacement + ~ content_base64sha256 = "ChsjhIZFYD1lrZUEY/t3R9IqIfitDSo0cZa+Ghi9TNw=" -> (known after apply) + ~ content_base64sha512 = "j6538ThzmjKMy2O+immgLHgisp84fXvGy6yPMclhqVioqFAq41+IX4tDkLxSljh3KgZLV0RHrFP95brgaT5kYw==" -> (known after apply) + ~ content_md5 = "252bb519143e7c3f6da69d5db4efd2c6" -> (known after apply) + ~ content_sha1 = "67e73f2cb69ccee72f709b6d867ef847d15801e2" -> (known after apply) + ~ content_sha256 = "0a1b23848645603d65ad950463fb7747d22a21f8ad0d2a347196be1a18bd4cdc" -> (known after apply) + ~ content_sha512 = "8fae77f138739a328ccb63be8a69a02c7822b29f387d7bc6cbac8f31c961a958a8a8502ae35f885f8b4390bc529638772a064b574447ac53fde5bae0693e6463" -> (known after apply) + ~ id = "67e73f2cb69ccee72f709b6d867ef847d15801e2" -> (known after apply) + # (3 unchanged attributes hidden) + } + + # local_file.kubeconfig["europe"] must be replaced +-/+ resource "local_file" "kubeconfig" { + ~ content = (sensitive value) # forces replacement + ~ content_base64sha256 = "rp27dn0J7NB/0ic1w4bcTSpclDisikp+bKcqs0tulIo=" -> (known after apply) + ~ content_base64sha512 = "Hddg/XNvES9rSiesqQglLWvZgxhgJeRywZ1JcBx47RHwQuSsLsbWLBKArfDT/J/K5Datgi8BxwRNWDc6d1U7gQ==" -> (known after apply) + ~ content_md5 = "6adc0d8b5e215839d20e2e26771701bb" -> (known after apply) + ~ content_sha1 = "f597afb96c32a7571ba3a81cc18f9f2c9fe50204" -> (known after apply) + ~ content_sha256 = "ae9dbb767d09ecd07fd22735c386dc4d2a5c9438ac8a4a7e6ca72ab34b6e948a" -> (known after apply) + ~ content_sha512 = "1dd760fd736f112f6b4a27aca908252d6bd983186025e472c19d49701c78ed11f042e4ac2ec6d62c1280adf0d3fc9fcae436ad822f01c7044d58373a77553b81" -> (known after apply) + ~ id = "f597afb96c32a7571ba3a81cc18f9f2c9fe50204" -> (known after apply) + # (3 unchanged attributes hidden) + } + + # local_file.kubeconfig["primary"] must be replaced +-/+ resource "local_file" "kubeconfig" { + ~ content = (sensitive value) # forces replacement + ~ content_base64sha256 = "lKpnFyQi6ZjtcWZ8ptAM8GTSkLDpv6HPJePk/MyM5H8=" -> (known after apply) + ~ content_base64sha512 = "v0awiiwAfQsnU446qMXxWv1ks529P/+0KEWtlsDO/1Hz+nv1lOV0r9tSFqIfIA8ka/uPOcXcGkBJ8NiRK5+kMA==" -> (known after apply) + ~ content_md5 = "fd30e86fee98342ae062c502021ed792" -> (known after apply) + ~ content_sha1 = "1caab4f9002954e4a59be717dab5487cd1096614" -> (known after apply) + ~ content_sha256 = "94aa67172422e998ed71667ca6d00cf064d290b0e9bfa1cf25e3e4fccc8ce47f" -> (known after apply) + ~ content_sha512 = "bf46b08a2c007d0b27538e3aa8c5f15afd64b39dbd3fffb42845ad96c0ceff51f3fa7bf594e574afdb5216a21f200f246bfb8f39c5dc1a4049f0d8912b9fa430" -> (known after apply) + ~ id = "1caab4f9002954e4a59be717dab5487cd1096614" -> (known after apply) + # (3 unchanged attributes hidden) + } + + # null_resource.api_key will be created + + resource "null_resource" "api_key" { + + id = (known after apply) + } + + # null_resource.asia_proxy_token will be created + + resource "null_resource" "asia_proxy_token" { + + id = (known after apply) + } + + # null_resource.destroy_workspaces["asia"] will be created + + resource "null_resource" "destroy_workspaces" { + + id = (known after apply) + } + + # null_resource.destroy_workspaces["europe"] will be created + + resource "null_resource" "destroy_workspaces" { + + id = (known after apply) + } + + # null_resource.destroy_workspaces["primary"] will be created + + resource "null_resource" "destroy_workspaces" { + + id = (known after apply) + } + + # null_resource.europe_proxy_token will be created + + resource "null_resource" "europe_proxy_token" { + + id = (known after apply) + } + + # null_resource.license will be created + + resource "null_resource" "license" { + + id = (known after apply) + } + + # null_resource.pprof["primary"] will be created + + resource "null_resource" "pprof" { + + id = (known after apply) + } + + # time_sleep.wait_baseline will be created + + resource "time_sleep" "wait_baseline" { + + create_duration = "60s" + + id = (known after apply) + } + +Plan: 30 to add, 0 to change, 3 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: + +Interrupt received. +Please wait for Terraform to exit or data loss may occur. +Gracefully shutting down... + diff --git a/scaletest/terraform/action/scenarios.tf b/scaletest/terraform/action/scenarios.tf index 25f85e2f1efdf..2c053cfddea5d 100644 --- a/scaletest/terraform/action/scenarios.tf +++ b/scaletest/terraform/action/scenarios.tf @@ -20,7 +20,7 @@ locals { workspaces = { count_per_deployment = 10 nodepool_size = 1 - machine_type = "t2d-standard-4" + machine_type = "t2d-standard-8" cpu_request = "100m" mem_request = "128Mi" cpu_limit = "100m" @@ -31,7 +31,7 @@ locals { machine_type = "t2d-standard-4" } cloudsql = { - tier = "db-f1-micro" + tier = "db-custom-4-15360" max_connections = 500 } } @@ -73,32 +73,32 @@ locals { large = { coder = { nodepool_size = 3 - machine_type = "t2d-standard-8" + machine_type = "c2d-standard-8" replicas = 3 - cpu_request = "1000m" - mem_request = "6Gi" - cpu_limit = "2000m" + cpu_request = "3000m" + mem_request = "12Gi" + cpu_limit = "3000m" mem_limit = "12Gi" } provisionerd = { - replicas = 1 + replicas = 50 cpu_request = "100m" - mem_request = "1Gi" + mem_request = "256Mi" cpu_limit = "1000m" mem_limit = "1Gi" } workspaces = { - count_per_deployment = 10 - nodepool_size = 1 - machine_type = "t2d-standard-8" + count_per_deployment = 700 + nodepool_size = 3 + machine_type = "c2d-standard-32" cpu_request = "100m" - mem_request = "128Mi" + mem_request = "512Mi" cpu_limit = "100m" - mem_limit = "128Mi" + mem_limit = "512Mi" } misc = { nodepool_size = 1 - machine_type = "t2d-standard-4" + machine_type = "c2d-standard-32" } cloudsql = { tier = "db-custom-2-7680" From 7d7fd9994fd51ef9d0ba072838c5daeeb576c79e Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Wed, 18 Dec 2024 05:11:32 +0000 Subject: [PATCH 29/36] remove log --- scaletest/terraform/action/plan.log | 1772 --------------------------- 1 file changed, 1772 deletions(-) delete mode 100644 scaletest/terraform/action/plan.log diff --git a/scaletest/terraform/action/plan.log b/scaletest/terraform/action/plan.log deleted file mode 100644 index 759b94032fa18..0000000000000 --- a/scaletest/terraform/action/plan.log +++ /dev/null @@ -1,1772 +0,0 @@ -random_password.coder_postgres_password: Refreshing state... [id=none] -random_password.provisionerd_psk: Refreshing state... [id=none] -random_password.prometheus_postgres_password: Refreshing state... [id=none] -data.google_client_config.default: Reading... -data.google_project.project: Reading... -google_compute_address.coder["asia"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/regions/asia-southeast1/addresses/d17-3-asia-coder] -google_compute_address.coder["europe"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/regions/europe-west1/addresses/d17-3-europe-coder] -google_compute_address.coder["primary"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/regions/us-east1/addresses/d17-3-primary-coder] -data.google_client_config.default: Read complete after 1s [id=projects//regions//zones/] -data.google_project.project: Read complete after 1s [id=projects/sandbox-20241217-k7h9cj] -google_project_service.api["stackdriver"]: Refreshing state... [id=sandbox-20241217-k7h9cj/stackdriver.googleapis.com] -google_project_service.api["storage-api"]: Refreshing state... [id=sandbox-20241217-k7h9cj/storage-api.googleapis.com] -google_project_service.api["servicemanagement"]: Refreshing state... [id=sandbox-20241217-k7h9cj/servicemanagement.googleapis.com] -google_project_service.api["compute"]: Refreshing state... [id=sandbox-20241217-k7h9cj/compute.googleapis.com] -google_project_service.api["logging"]: Refreshing state... [id=sandbox-20241217-k7h9cj/logging.googleapis.com] -google_project_service.api["cloudtrace"]: Refreshing state... [id=sandbox-20241217-k7h9cj/cloudtrace.googleapis.com] -google_project_service.api["monitoring"]: Refreshing state... [id=sandbox-20241217-k7h9cj/monitoring.googleapis.com] -google_project_service.api["container"]: Refreshing state... [id=sandbox-20241217-k7h9cj/container.googleapis.com] -google_project_service.api["sqladmin"]: Refreshing state... [id=sandbox-20241217-k7h9cj/sqladmin.googleapis.com] -google_project_service.api["servicenetworking"]: Refreshing state... [id=sandbox-20241217-k7h9cj/servicenetworking.googleapis.com] -data.google_compute_default_service_account.default: Reading... -google_compute_network.vpc: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/global/networks/d17-3] -cloudflare_record.coder["primary"]: Refreshing state... [id=21e416c5e43250da81de73b475f30f87] -cloudflare_record.coder["europe"]: Refreshing state... [id=82fbe51332a53858d9f068f9c88d49db] -cloudflare_record.coder["asia"]: Refreshing state... [id=f5c434e083f2f2cd06e750e8cf1541f9] -google_compute_global_address.sql_peering: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/global/addresses/d17-3-sql-peering] -google_compute_subnetwork.subnet["asia"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/regions/asia-southeast1/subnetworks/d17-3-asia] -google_compute_subnetwork.subnet["primary"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/regions/us-east1/subnetworks/d17-3-primary] -google_compute_subnetwork.subnet["europe"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/regions/europe-west1/subnetworks/d17-3-europe] -data.google_compute_default_service_account.default: Read complete after 1s [id=projects/sandbox-20241217-k7h9cj/serviceAccounts/267709392779-compute@developer.gserviceaccount.com] -google_service_networking_connection.private_vpc_connection: Refreshing state... [id=projects%2Fsandbox-20241217-k7h9cj%2Fglobal%2Fnetworks%2Fd17-3:servicenetworking.googleapis.com] -google_container_cluster.cluster["europe"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/europe-west1-b/clusters/d17-3-europe] -google_container_cluster.cluster["asia"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/asia-southeast1-a/clusters/d17-3-asia] -google_container_cluster.cluster["primary"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/us-east1-c/clusters/d17-3-primary] -google_sql_database_instance.db: Refreshing state... [id=d17-3-coder] -google_sql_database.coder: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/instances/d17-3-coder/databases/d17-3-coder] -google_sql_user.coder: Refreshing state... [id=d17-3-coder//d17-3-coder] -google_sql_user.prometheus: Refreshing state... [id=d17-3-prometheus//d17-3-coder] -google_container_node_pool.node_pool["primary_workspaces"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/us-east1-c/clusters/d17-3-primary/nodePools/workspaces] -google_container_node_pool.node_pool["primary_misc"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/us-east1-c/clusters/d17-3-primary/nodePools/misc] -google_container_node_pool.node_pool["asia_workspaces"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/asia-southeast1-a/clusters/d17-3-asia/nodePools/workspaces] -google_container_node_pool.node_pool["europe_coder"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/europe-west1-b/clusters/d17-3-europe/nodePools/coder] -google_container_node_pool.node_pool["asia_misc"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/asia-southeast1-a/clusters/d17-3-asia/nodePools/misc] -google_container_node_pool.node_pool["europe_misc"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/europe-west1-b/clusters/d17-3-europe/nodePools/misc] -google_container_node_pool.node_pool["primary_coder"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/us-east1-c/clusters/d17-3-primary/nodePools/coder] -google_container_node_pool.node_pool["asia_coder"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/asia-southeast1-a/clusters/d17-3-asia/nodePools/coder] -google_container_node_pool.node_pool["europe_workspaces"]: Refreshing state... [id=projects/sandbox-20241217-k7h9cj/locations/europe-west1-b/clusters/d17-3-europe/nodePools/workspaces] -local_file.kubeconfig["asia"]: Refreshing state... [id=67e73f2cb69ccee72f709b6d867ef847d15801e2] -local_file.kubeconfig["europe"]: Refreshing state... [id=f597afb96c32a7571ba3a81cc18f9f2c9fe50204] -local_file.kubeconfig["primary"]: Refreshing state... [id=1caab4f9002954e4a59be717dab5487cd1096614] -kubernetes_namespace.coder_primary: Refreshing state... [id=coder] -local_file.kubernetes_template: Refreshing state... [id=619345fcc362d300d43b054926122573f0cfa82d] -kubernetes_namespace.coder_europe: Refreshing state... [id=coder] -kubernetes_namespace.coder_asia: Refreshing state... [id=coder] -kubernetes_secret.provisionerd_psk_primary: Refreshing state... [id=coder/coder-provisioner-psk] -kubernetes_config_map.template_primary: Refreshing state... [id=coder/coder-template] -kubernetes_secret.provisionerd_psk_europe: Refreshing state... [id=coder/coder-provisioner-psk] -kubernetes_config_map.template_europe: Refreshing state... [id=coder/coder-template] -kubernetes_secret.provisionerd_psk_asia: Refreshing state... [id=coder/coder-provisioner-psk] -kubernetes_config_map.template_asia: Refreshing state... [id=coder/coder-template] - -Terraform used the selected providers to generate the following execution -plan. Resource actions are indicated with the following symbols: - + create --/+ destroy and then create replacement - <= read (data resources) - -Terraform will perform the following actions: - - # data.http.coder_healthy will be read during apply - # (depends on a resource or a module with changes pending) - <= data "http" "coder_healthy" { - + body = (known after apply) - + id = (known after apply) - + response_body = (known after apply) - + response_body_base64 = (known after apply) - + response_headers = (known after apply) - + status_code = (known after apply) - + url = "http://d17-3-scaletest.f0ssel.io" - - + retry { - + attempts = 30 - + min_delay_ms = 10000 - } - } - - # data.local_file.api_key will be read during apply - # (depends on a resource or a module with changes pending) - <= data "local_file" "api_key" { - + content = (known after apply) - + content_base64 = (known after apply) - + content_base64sha256 = (known after apply) - + content_base64sha512 = (known after apply) - + content_md5 = (known after apply) - + content_sha1 = (known after apply) - + content_sha256 = (known after apply) - + content_sha512 = (known after apply) - + filename = "./.coderv2/api_key" - + id = (known after apply) - } - - # data.local_file.asia_proxy_token will be read during apply - # (depends on a resource or a module with changes pending) - <= data "local_file" "asia_proxy_token" { - + content = (known after apply) - + content_base64 = (known after apply) - + content_base64sha256 = (known after apply) - + content_base64sha512 = (known after apply) - + content_md5 = (known after apply) - + content_sha1 = (known after apply) - + content_sha256 = (known after apply) - + content_sha512 = (known after apply) - + filename = "./.coderv2/asia_proxy_token" - + id = (known after apply) - } - - # data.local_file.europe_proxy_token will be read during apply - # (depends on a resource or a module with changes pending) - <= data "local_file" "europe_proxy_token" { - + content = (known after apply) - + content_base64 = (known after apply) - + content_base64sha256 = (known after apply) - + content_base64sha512 = (known after apply) - + content_md5 = (known after apply) - + content_sha1 = (known after apply) - + content_sha256 = (known after apply) - + content_sha512 = (known after apply) - + filename = "./.coderv2/europe_proxy_token" - + id = (known after apply) - } - - # helm_release.coder_asia will be created - + resource "helm_release" "coder_asia" { - + atomic = false - + chart = "coder" - + cleanup_on_fail = false - + create_namespace = false - + dependency_update = false - + disable_crd_hooks = false - + disable_openapi_validation = false - + disable_webhooks = false - + force_update = false - + id = (known after apply) - + lint = false - + manifest = (known after apply) - + max_history = 0 - + metadata = (known after apply) - + name = "d17-3-coder" - + namespace = "coder" - + pass_credentials = false - + recreate_pods = false - + render_subchart_notes = true - + replace = false - + repository = "https://helm.coder.com/v2" - + reset_values = false - + reuse_values = false - + skip_crds = false - + status = "deployed" - + timeout = 300 - + values = [ - + <<-EOT - coder: - workspaceProxy: true - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "cloud.google.com/gke-nodepool" - operator: "In" - values: ["coder"] - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: "kubernetes.io/hostname" - labelSelector: - matchExpressions: - - key: "app.kubernetes.io/instance" - operator: "In" - values: ["d17-3-coder"] - env: - - name: "CODER_ACCESS_URL" - value: "http://d17-3-asia-scaletest.f0ssel.io" - - name: CODER_PRIMARY_ACCESS_URL - value: "http://d17-3-scaletest.f0ssel.io" - - name: CODER_PROXY_SESSION_TOKEN - valueFrom: - secretKeyRef: - key: token - name: "coder-proxy-token" - - name: "CODER_CACHE_DIRECTORY" - value: "/tmp/coder" - - name: "CODER_TELEMETRY_ENABLE" - value: "false" - - name: "CODER_LOGGING_HUMAN" - value: "/dev/null" - - name: "CODER_LOGGING_STACKDRIVER" - value: "/dev/stderr" - - name: "CODER_PROMETHEUS_ENABLE" - value: "true" - - name: "CODER_VERBOSE" - value: "true" - - name: "CODER_EXPERIMENTS" - value: "" - - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" - value: "true" - image: - repo: ghcr.io/coder/coder - tag: latest - replicaCount: "3" - resources: - requests: - cpu: "3000m" - memory: "12Gi" - limits: - cpu: "3000m" - memory: "12Gi" - securityContext: - readOnlyRootFilesystem: true - service: - enable: true - sessionAffinity: None - loadBalancerIP: "34.124.188.45" - volumeMounts: - - mountPath: "/tmp" - name: cache - readOnly: false - volumes: - - emptyDir: - sizeLimit: 1024Mi - name: cache - EOT, - ] - + verify = false - + version = "2.18.1" - + wait = true - + wait_for_jobs = false - } - - # helm_release.coder_europe will be created - + resource "helm_release" "coder_europe" { - + atomic = false - + chart = "coder" - + cleanup_on_fail = false - + create_namespace = false - + dependency_update = false - + disable_crd_hooks = false - + disable_openapi_validation = false - + disable_webhooks = false - + force_update = false - + id = (known after apply) - + lint = false - + manifest = (known after apply) - + max_history = 0 - + metadata = (known after apply) - + name = "d17-3-coder" - + namespace = "coder" - + pass_credentials = false - + recreate_pods = false - + render_subchart_notes = true - + replace = false - + repository = "https://helm.coder.com/v2" - + reset_values = false - + reuse_values = false - + skip_crds = false - + status = "deployed" - + timeout = 300 - + values = [ - + <<-EOT - coder: - workspaceProxy: true - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "cloud.google.com/gke-nodepool" - operator: "In" - values: ["coder"] - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: "kubernetes.io/hostname" - labelSelector: - matchExpressions: - - key: "app.kubernetes.io/instance" - operator: "In" - values: ["d17-3-coder"] - env: - - name: "CODER_ACCESS_URL" - value: "http://d17-3-europe-scaletest.f0ssel.io" - - name: CODER_PRIMARY_ACCESS_URL - value: "http://d17-3-scaletest.f0ssel.io" - - name: CODER_PROXY_SESSION_TOKEN - valueFrom: - secretKeyRef: - key: token - name: "coder-proxy-token" - - name: "CODER_CACHE_DIRECTORY" - value: "/tmp/coder" - - name: "CODER_TELEMETRY_ENABLE" - value: "false" - - name: "CODER_LOGGING_HUMAN" - value: "/dev/null" - - name: "CODER_LOGGING_STACKDRIVER" - value: "/dev/stderr" - - name: "CODER_PROMETHEUS_ENABLE" - value: "true" - - name: "CODER_VERBOSE" - value: "true" - - name: "CODER_EXPERIMENTS" - value: "" - - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" - value: "true" - image: - repo: ghcr.io/coder/coder - tag: latest - replicaCount: "3" - resources: - requests: - cpu: "3000m" - memory: "12Gi" - limits: - cpu: "3000m" - memory: "12Gi" - securityContext: - readOnlyRootFilesystem: true - service: - enable: true - sessionAffinity: None - loadBalancerIP: "35.205.158.154" - volumeMounts: - - mountPath: "/tmp" - name: cache - readOnly: false - volumes: - - emptyDir: - sizeLimit: 1024Mi - name: cache - EOT, - ] - + verify = false - + version = "2.18.1" - + wait = true - + wait_for_jobs = false - } - - # helm_release.coder_primary will be created - + resource "helm_release" "coder_primary" { - + atomic = false - + chart = "coder" - + cleanup_on_fail = false - + create_namespace = false - + dependency_update = false - + disable_crd_hooks = false - + disable_openapi_validation = false - + disable_webhooks = false - + force_update = false - + id = (known after apply) - + lint = false - + manifest = (known after apply) - + max_history = 0 - + metadata = (known after apply) - + name = "d17-3-coder" - + namespace = "coder" - + pass_credentials = false - + recreate_pods = false - + render_subchart_notes = true - + replace = false - + repository = "https://helm.coder.com/v2" - + reset_values = false - + reuse_values = false - + skip_crds = false - + status = "deployed" - + timeout = 300 - + values = [ - + <<-EOT - coder: - workspaceProxy: false - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "cloud.google.com/gke-nodepool" - operator: "In" - values: ["coder"] - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: "kubernetes.io/hostname" - labelSelector: - matchExpressions: - - key: "app.kubernetes.io/instance" - operator: "In" - values: ["d17-3-coder"] - env: - - name: "CODER_ACCESS_URL" - value: "http://d17-3-scaletest.f0ssel.io" - - name: "CODER_PG_CONNECTION_URL" - valueFrom: - secretKeyRef: - name: "coder-db-url" - key: url - - name: "CODER_PROVISIONER_DAEMONS" - value: "0" - - name: CODER_PROVISIONER_DAEMON_PSK - valueFrom: - secretKeyRef: - key: psk - name: "coder-provisioner-psk" - - name: "CODER_PROMETHEUS_COLLECT_AGENT_STATS" - value: "true" - - name: "CODER_PROMETHEUS_COLLECT_DB_METRICS" - value: "true" - - name: "CODER_PPROF_ENABLE" - value: "true" - - name: "CODER_CACHE_DIRECTORY" - value: "/tmp/coder" - - name: "CODER_TELEMETRY_ENABLE" - value: "false" - - name: "CODER_LOGGING_HUMAN" - value: "/dev/null" - - name: "CODER_LOGGING_STACKDRIVER" - value: "/dev/stderr" - - name: "CODER_PROMETHEUS_ENABLE" - value: "true" - - name: "CODER_VERBOSE" - value: "true" - - name: "CODER_EXPERIMENTS" - value: "" - - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" - value: "true" - image: - repo: ghcr.io/coder/coder - tag: latest - replicaCount: "3" - resources: - requests: - cpu: "3000m" - memory: "12Gi" - limits: - cpu: "3000m" - memory: "12Gi" - securityContext: - readOnlyRootFilesystem: true - service: - enable: true - sessionAffinity: None - loadBalancerIP: "34.23.114.144" - volumeMounts: - - mountPath: "/tmp" - name: cache - readOnly: false - volumes: - - emptyDir: - sizeLimit: 1024Mi - name: cache - EOT, - ] - + verify = false - + version = "2.18.1" - + wait = true - + wait_for_jobs = false - } - - # helm_release.provisionerd_asia will be created - + resource "helm_release" "provisionerd_asia" { - + atomic = false - + chart = "coder-provisioner" - + cleanup_on_fail = false - + create_namespace = false - + dependency_update = false - + disable_crd_hooks = false - + disable_openapi_validation = false - + disable_webhooks = false - + force_update = false - + id = (known after apply) - + lint = false - + manifest = (known after apply) - + max_history = 0 - + metadata = (known after apply) - + name = "d17-3-provisionerd" - + namespace = "coder" - + pass_credentials = false - + recreate_pods = false - + render_subchart_notes = true - + replace = false - + repository = "https://helm.coder.com/v2" - + reset_values = false - + reuse_values = false - + skip_crds = false - + status = "deployed" - + timeout = 300 - + values = [ - + <<-EOT - coder: - workspaceProxy: false - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "cloud.google.com/gke-nodepool" - operator: "In" - values: ["coder"] - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: "kubernetes.io/hostname" - labelSelector: - matchExpressions: - - key: "app.kubernetes.io/instance" - operator: "In" - values: ["d17-3-coder"] - env: - - name: "CODER_URL" - value: "http://d17-3-scaletest.f0ssel.io" - - name: "CODER_PROVISIONERD_TAGS" - value: "scope=organization,deployment=asia" - - name: "CODER_PROVISIONER_DAEMON_NAME" - value: "asia" - - name: "CODER_CONFIG_DIR" - value: "/tmp/config" - - name: "CODER_CACHE_DIRECTORY" - value: "/tmp/coder" - - name: "CODER_TELEMETRY_ENABLE" - value: "false" - - name: "CODER_LOGGING_HUMAN" - value: "/dev/null" - - name: "CODER_LOGGING_STACKDRIVER" - value: "/dev/stderr" - - name: "CODER_PROMETHEUS_ENABLE" - value: "true" - - name: "CODER_VERBOSE" - value: "true" - - name: "CODER_EXPERIMENTS" - value: "" - - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" - value: "true" - image: - repo: ghcr.io/coder/coder - tag: latest - replicaCount: "30" - resources: - requests: - cpu: "100m" - memory: "512Mi" - limits: - cpu: "1000m" - memory: "1Gi" - securityContext: - readOnlyRootFilesystem: true - volumeMounts: - - mountPath: "/tmp" - name: cache - readOnly: false - volumes: - - emptyDir: - sizeLimit: 1024Mi - name: cache - EOT, - ] - + verify = false - + version = "2.18.1" - + wait = true - + wait_for_jobs = false - } - - # helm_release.provisionerd_europe will be created - + resource "helm_release" "provisionerd_europe" { - + atomic = false - + chart = "coder-provisioner" - + cleanup_on_fail = false - + create_namespace = false - + dependency_update = false - + disable_crd_hooks = false - + disable_openapi_validation = false - + disable_webhooks = false - + force_update = false - + id = (known after apply) - + lint = false - + manifest = (known after apply) - + max_history = 0 - + metadata = (known after apply) - + name = "d17-3-provisionerd" - + namespace = "coder" - + pass_credentials = false - + recreate_pods = false - + render_subchart_notes = true - + replace = false - + repository = "https://helm.coder.com/v2" - + reset_values = false - + reuse_values = false - + skip_crds = false - + status = "deployed" - + timeout = 300 - + values = [ - + <<-EOT - coder: - workspaceProxy: false - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "cloud.google.com/gke-nodepool" - operator: "In" - values: ["coder"] - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: "kubernetes.io/hostname" - labelSelector: - matchExpressions: - - key: "app.kubernetes.io/instance" - operator: "In" - values: ["d17-3-coder"] - env: - - name: "CODER_URL" - value: "http://d17-3-scaletest.f0ssel.io" - - name: "CODER_PROVISIONERD_TAGS" - value: "scope=organization,deployment=europe" - - name: "CODER_PROVISIONER_DAEMON_NAME" - value: "europe" - - name: "CODER_CONFIG_DIR" - value: "/tmp/config" - - name: "CODER_CACHE_DIRECTORY" - value: "/tmp/coder" - - name: "CODER_TELEMETRY_ENABLE" - value: "false" - - name: "CODER_LOGGING_HUMAN" - value: "/dev/null" - - name: "CODER_LOGGING_STACKDRIVER" - value: "/dev/stderr" - - name: "CODER_PROMETHEUS_ENABLE" - value: "true" - - name: "CODER_VERBOSE" - value: "true" - - name: "CODER_EXPERIMENTS" - value: "" - - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" - value: "true" - image: - repo: ghcr.io/coder/coder - tag: latest - replicaCount: "30" - resources: - requests: - cpu: "100m" - memory: "512Mi" - limits: - cpu: "1000m" - memory: "1Gi" - securityContext: - readOnlyRootFilesystem: true - volumeMounts: - - mountPath: "/tmp" - name: cache - readOnly: false - volumes: - - emptyDir: - sizeLimit: 1024Mi - name: cache - EOT, - ] - + verify = false - + version = "2.18.1" - + wait = true - + wait_for_jobs = false - } - - # helm_release.provisionerd_primary will be created - + resource "helm_release" "provisionerd_primary" { - + atomic = false - + chart = "coder-provisioner" - + cleanup_on_fail = false - + create_namespace = false - + dependency_update = false - + disable_crd_hooks = false - + disable_openapi_validation = false - + disable_webhooks = false - + force_update = false - + id = (known after apply) - + lint = false - + manifest = (known after apply) - + max_history = 0 - + metadata = (known after apply) - + name = "d17-3-provisionerd" - + namespace = "coder" - + pass_credentials = false - + recreate_pods = false - + render_subchart_notes = true - + replace = false - + repository = "https://helm.coder.com/v2" - + reset_values = false - + reuse_values = false - + skip_crds = false - + status = "deployed" - + timeout = 300 - + values = [ - + <<-EOT - coder: - workspaceProxy: false - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "cloud.google.com/gke-nodepool" - operator: "In" - values: ["coder"] - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: "kubernetes.io/hostname" - labelSelector: - matchExpressions: - - key: "app.kubernetes.io/instance" - operator: "In" - values: ["d17-3-coder"] - env: - - name: "CODER_URL" - value: "http://d17-3-scaletest.f0ssel.io" - - name: "CODER_PROVISIONERD_TAGS" - value: "scope=organization,deployment=primary" - - name: "CODER_PROVISIONER_DAEMON_NAME" - value: "primary" - - name: "CODER_CONFIG_DIR" - value: "/tmp/config" - - name: "CODER_CACHE_DIRECTORY" - value: "/tmp/coder" - - name: "CODER_TELEMETRY_ENABLE" - value: "false" - - name: "CODER_LOGGING_HUMAN" - value: "/dev/null" - - name: "CODER_LOGGING_STACKDRIVER" - value: "/dev/stderr" - - name: "CODER_PROMETHEUS_ENABLE" - value: "true" - - name: "CODER_VERBOSE" - value: "true" - - name: "CODER_EXPERIMENTS" - value: "" - - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" - value: "true" - image: - repo: ghcr.io/coder/coder - tag: latest - replicaCount: "30" - resources: - requests: - cpu: "100m" - memory: "512Mi" - limits: - cpu: "1000m" - memory: "1Gi" - securityContext: - readOnlyRootFilesystem: true - volumeMounts: - - mountPath: "/tmp" - name: cache - readOnly: false - volumes: - - emptyDir: - sizeLimit: 1024Mi - name: cache - EOT, - ] - + verify = false - + version = "2.18.1" - + wait = true - + wait_for_jobs = false - } - - # kubernetes_job.create_workspaces_asia will be created - + resource "kubernetes_job" "create_workspaces_asia" { - + id = (known after apply) - + wait_for_completion = true - - + metadata { - + generation = (known after apply) - + labels = { - + "app.kubernetes.io/name" = "d17-3-create-workspaces" - } - + name = "d17-3-create-workspaces" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - - + spec { - + backoff_limit = 6 - + completion_mode = (known after apply) - + completions = 1 - + parallelism = 1 - - + selector (known after apply) - - + template { - + metadata { - + generation = (known after apply) - + name = (known after apply) - + resource_version = (known after apply) - + uid = (known after apply) - } - + spec { - + automount_service_account_token = true - + dns_policy = "ClusterFirst" - + enable_service_links = true - + host_ipc = false - + host_network = false - + host_pid = false - + hostname = (known after apply) - + node_name = (known after apply) - + restart_policy = "Never" - + scheduler_name = (known after apply) - + service_account_name = (known after apply) - + share_process_namespace = false - + termination_grace_period_seconds = 30 - - + affinity { - + node_affinity { - + required_during_scheduling_ignored_during_execution { - + node_selector_term { - + match_expressions { - + key = "cloud.google.com/gke-nodepool" - + operator = "In" - + values = [ - + "misc", - ] - } - } - } - } - } - - + container { - + command = (known after apply) - + image = "ghcr.io/coder/coder:latest" - + image_pull_policy = (known after apply) - + name = "cli" - + stdin = false - + stdin_once = false - + termination_message_path = "/dev/termination-log" - + termination_message_policy = (known after apply) - + tty = false - - + resources (known after apply) - } - - + image_pull_secrets (known after apply) - - + readiness_gate (known after apply) - } - } - } - - + timeouts { - + create = "600s" - } - } - - # kubernetes_job.create_workspaces_europe will be created - + resource "kubernetes_job" "create_workspaces_europe" { - + id = (known after apply) - + wait_for_completion = true - - + metadata { - + generation = (known after apply) - + labels = { - + "app.kubernetes.io/name" = "d17-3-create-workspaces" - } - + name = "d17-3-create-workspaces" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - - + spec { - + backoff_limit = 6 - + completion_mode = (known after apply) - + completions = 1 - + parallelism = 1 - - + selector (known after apply) - - + template { - + metadata { - + generation = (known after apply) - + name = (known after apply) - + resource_version = (known after apply) - + uid = (known after apply) - } - + spec { - + automount_service_account_token = true - + dns_policy = "ClusterFirst" - + enable_service_links = true - + host_ipc = false - + host_network = false - + host_pid = false - + hostname = (known after apply) - + node_name = (known after apply) - + restart_policy = "Never" - + scheduler_name = (known after apply) - + service_account_name = (known after apply) - + share_process_namespace = false - + termination_grace_period_seconds = 30 - - + affinity { - + node_affinity { - + required_during_scheduling_ignored_during_execution { - + node_selector_term { - + match_expressions { - + key = "cloud.google.com/gke-nodepool" - + operator = "In" - + values = [ - + "misc", - ] - } - } - } - } - } - - + container { - + command = (known after apply) - + image = "ghcr.io/coder/coder:latest" - + image_pull_policy = (known after apply) - + name = "cli" - + stdin = false - + stdin_once = false - + termination_message_path = "/dev/termination-log" - + termination_message_policy = (known after apply) - + tty = false - - + resources (known after apply) - } - - + image_pull_secrets (known after apply) - - + readiness_gate (known after apply) - } - } - } - - + timeouts { - + create = "600s" - } - } - - # kubernetes_job.create_workspaces_primary will be created - + resource "kubernetes_job" "create_workspaces_primary" { - + id = (known after apply) - + wait_for_completion = true - - + metadata { - + generation = (known after apply) - + labels = { - + "app.kubernetes.io/name" = "d17-3-create-workspaces" - } - + name = "d17-3-create-workspaces" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - - + spec { - + backoff_limit = 6 - + completion_mode = (known after apply) - + completions = 1 - + parallelism = 1 - - + selector (known after apply) - - + template { - + metadata { - + generation = (known after apply) - + name = (known after apply) - + resource_version = (known after apply) - + uid = (known after apply) - } - + spec { - + automount_service_account_token = true - + dns_policy = "ClusterFirst" - + enable_service_links = true - + host_ipc = false - + host_network = false - + host_pid = false - + hostname = (known after apply) - + node_name = (known after apply) - + restart_policy = "Never" - + scheduler_name = (known after apply) - + service_account_name = (known after apply) - + share_process_namespace = false - + termination_grace_period_seconds = 30 - - + affinity { - + node_affinity { - + required_during_scheduling_ignored_during_execution { - + node_selector_term { - + match_expressions { - + key = "cloud.google.com/gke-nodepool" - + operator = "In" - + values = [ - + "misc", - ] - } - } - } - } - } - - + container { - + command = (known after apply) - + image = "ghcr.io/coder/coder:latest" - + image_pull_policy = (known after apply) - + name = "cli" - + stdin = false - + stdin_once = false - + termination_message_path = "/dev/termination-log" - + termination_message_policy = (known after apply) - + tty = false - - + resources (known after apply) - } - - + image_pull_secrets (known after apply) - - + readiness_gate (known after apply) - } - } - } - - + timeouts { - + create = "600s" - } - } - - # kubernetes_job.push_template_asia will be created - + resource "kubernetes_job" "push_template_asia" { - + id = (known after apply) - + wait_for_completion = true - - + metadata { - + generation = (known after apply) - + labels = { - + "app.kubernetes.io/name" = "d17-3-push-template" - } - + name = "d17-3-push-template" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - - + spec { - + backoff_limit = 6 - + completion_mode = (known after apply) - + completions = 1 - + parallelism = 1 - - + selector (known after apply) - - + template { - + metadata { - + generation = (known after apply) - + name = (known after apply) - + resource_version = (known after apply) - + uid = (known after apply) - } - + spec { - + automount_service_account_token = true - + dns_policy = "ClusterFirst" - + enable_service_links = true - + host_ipc = false - + host_network = false - + host_pid = false - + hostname = (known after apply) - + node_name = (known after apply) - + restart_policy = "Never" - + scheduler_name = (known after apply) - + service_account_name = (known after apply) - + share_process_namespace = false - + termination_grace_period_seconds = 30 - - + affinity { - + node_affinity { - + required_during_scheduling_ignored_during_execution { - + node_selector_term { - + match_expressions { - + key = "cloud.google.com/gke-nodepool" - + operator = "In" - + values = [ - + "misc", - ] - } - } - } - } - } - - + container { - + command = (known after apply) - + image = "ghcr.io/coder/coder:latest" - + image_pull_policy = (known after apply) - + name = "cli" - + stdin = false - + stdin_once = false - + termination_message_path = "/dev/termination-log" - + termination_message_policy = (known after apply) - + tty = false - - + resources (known after apply) - - + volume_mount { - + mount_path = "/home/coder/template/main.tf" - + mount_propagation = "None" - + name = "coder-template" - + read_only = false - + sub_path = "main.tf" - } - } - - + image_pull_secrets (known after apply) - - + readiness_gate (known after apply) - - + volume { - + name = "coder-template" - - + config_map { - + default_mode = "0644" - + name = "coder-template" - } - } - } - } - } - } - - # kubernetes_job.push_template_europe will be created - + resource "kubernetes_job" "push_template_europe" { - + id = (known after apply) - + wait_for_completion = true - - + metadata { - + generation = (known after apply) - + labels = { - + "app.kubernetes.io/name" = "d17-3-push-template" - } - + name = "d17-3-push-template" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - - + spec { - + backoff_limit = 6 - + completion_mode = (known after apply) - + completions = 1 - + parallelism = 1 - - + selector (known after apply) - - + template { - + metadata { - + generation = (known after apply) - + name = (known after apply) - + resource_version = (known after apply) - + uid = (known after apply) - } - + spec { - + automount_service_account_token = true - + dns_policy = "ClusterFirst" - + enable_service_links = true - + host_ipc = false - + host_network = false - + host_pid = false - + hostname = (known after apply) - + node_name = (known after apply) - + restart_policy = "Never" - + scheduler_name = (known after apply) - + service_account_name = (known after apply) - + share_process_namespace = false - + termination_grace_period_seconds = 30 - - + affinity { - + node_affinity { - + required_during_scheduling_ignored_during_execution { - + node_selector_term { - + match_expressions { - + key = "cloud.google.com/gke-nodepool" - + operator = "In" - + values = [ - + "misc", - ] - } - } - } - } - } - - + container { - + command = (known after apply) - + image = "ghcr.io/coder/coder:latest" - + image_pull_policy = (known after apply) - + name = "cli" - + stdin = false - + stdin_once = false - + termination_message_path = "/dev/termination-log" - + termination_message_policy = (known after apply) - + tty = false - - + resources (known after apply) - - + volume_mount { - + mount_path = "/home/coder/template/main.tf" - + mount_propagation = "None" - + name = "coder-template" - + read_only = false - + sub_path = "main.tf" - } - } - - + image_pull_secrets (known after apply) - - + readiness_gate (known after apply) - - + volume { - + name = "coder-template" - - + config_map { - + default_mode = "0644" - + name = "coder-template" - } - } - } - } - } - } - - # kubernetes_job.push_template_primary will be created - + resource "kubernetes_job" "push_template_primary" { - + id = (known after apply) - + wait_for_completion = true - - + metadata { - + generation = (known after apply) - + labels = { - + "app.kubernetes.io/name" = "d17-3-push-template" - } - + name = "d17-3-push-template" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - - + spec { - + backoff_limit = 6 - + completion_mode = (known after apply) - + completions = 1 - + parallelism = 1 - - + selector (known after apply) - - + template { - + metadata { - + generation = (known after apply) - + name = (known after apply) - + resource_version = (known after apply) - + uid = (known after apply) - } - + spec { - + automount_service_account_token = true - + dns_policy = "ClusterFirst" - + enable_service_links = true - + host_ipc = false - + host_network = false - + host_pid = false - + hostname = (known after apply) - + node_name = (known after apply) - + restart_policy = "Never" - + scheduler_name = (known after apply) - + service_account_name = (known after apply) - + share_process_namespace = false - + termination_grace_period_seconds = 30 - - + affinity { - + node_affinity { - + required_during_scheduling_ignored_during_execution { - + node_selector_term { - + match_expressions { - + key = "cloud.google.com/gke-nodepool" - + operator = "In" - + values = [ - + "misc", - ] - } - } - } - } - } - - + container { - + command = (known after apply) - + image = "ghcr.io/coder/coder:latest" - + image_pull_policy = (known after apply) - + name = "cli" - + stdin = false - + stdin_once = false - + termination_message_path = "/dev/termination-log" - + termination_message_policy = (known after apply) - + tty = false - - + resources (known after apply) - - + volume_mount { - + mount_path = "/home/coder/template/main.tf" - + mount_propagation = "None" - + name = "coder-template" - + read_only = false - + sub_path = "main.tf" - } - } - - + image_pull_secrets (known after apply) - - + readiness_gate (known after apply) - - + volume { - + name = "coder-template" - - + config_map { - + default_mode = "0644" - + name = "coder-template" - } - } - } - } - } - } - - # kubernetes_job.workspace_traffic_asia will be created - + resource "kubernetes_job" "workspace_traffic_asia" { - + id = (known after apply) - + wait_for_completion = true - - + metadata { - + generation = (known after apply) - + labels = { - + "app.kubernetes.io/name" = "d17-3-workspace-traffic" - } - + name = "d17-3-workspace-traffic" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - - + spec { - + backoff_limit = 6 - + completion_mode = (known after apply) - + completions = 1 - + parallelism = 1 - - + selector (known after apply) - - + template { - + metadata { - + generation = (known after apply) - + name = (known after apply) - + resource_version = (known after apply) - + uid = (known after apply) - } - + spec { - + automount_service_account_token = true - + dns_policy = "ClusterFirst" - + enable_service_links = true - + host_ipc = false - + host_network = false - + host_pid = false - + hostname = (known after apply) - + node_name = (known after apply) - + restart_policy = "Never" - + scheduler_name = (known after apply) - + service_account_name = (known after apply) - + share_process_namespace = false - + termination_grace_period_seconds = 30 - - + affinity { - + node_affinity { - + required_during_scheduling_ignored_during_execution { - + node_selector_term { - + match_expressions { - + key = "cloud.google.com/gke-nodepool" - + operator = "In" - + values = [ - + "misc", - ] - } - } - } - } - } - - + container { - + command = (known after apply) - + image = "ghcr.io/coder/coder:latest" - + image_pull_policy = (known after apply) - + name = "cli" - + stdin = false - + stdin_once = false - + termination_message_path = "/dev/termination-log" - + termination_message_policy = (known after apply) - + tty = false - - + env { - + name = "CODER_SCALETEST_JOB_TIMEOUT" - + value = "300s" - } - - + resources (known after apply) - } - - + image_pull_secrets (known after apply) - - + readiness_gate (known after apply) - } - } - } - - + timeouts { - + create = "420s" - } - } - - # kubernetes_job.workspace_traffic_europe will be created - + resource "kubernetes_job" "workspace_traffic_europe" { - + id = (known after apply) - + wait_for_completion = true - - + metadata { - + generation = (known after apply) - + labels = { - + "app.kubernetes.io/name" = "d17-3-workspace-traffic" - } - + name = "d17-3-workspace-traffic" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - - + spec { - + backoff_limit = 6 - + completion_mode = (known after apply) - + completions = 1 - + parallelism = 1 - - + selector (known after apply) - - + template { - + metadata { - + generation = (known after apply) - + name = (known after apply) - + resource_version = (known after apply) - + uid = (known after apply) - } - + spec { - + automount_service_account_token = true - + dns_policy = "ClusterFirst" - + enable_service_links = true - + host_ipc = false - + host_network = false - + host_pid = false - + hostname = (known after apply) - + node_name = (known after apply) - + restart_policy = "Never" - + scheduler_name = (known after apply) - + service_account_name = (known after apply) - + share_process_namespace = false - + termination_grace_period_seconds = 30 - - + affinity { - + node_affinity { - + required_during_scheduling_ignored_during_execution { - + node_selector_term { - + match_expressions { - + key = "cloud.google.com/gke-nodepool" - + operator = "In" - + values = [ - + "misc", - ] - } - } - } - } - } - - + container { - + command = (known after apply) - + image = "ghcr.io/coder/coder:latest" - + image_pull_policy = (known after apply) - + name = "cli" - + stdin = false - + stdin_once = false - + termination_message_path = "/dev/termination-log" - + termination_message_policy = (known after apply) - + tty = false - - + env { - + name = "CODER_SCALETEST_JOB_TIMEOUT" - + value = "300s" - } - - + resources (known after apply) - } - - + image_pull_secrets (known after apply) - - + readiness_gate (known after apply) - } - } - } - - + timeouts { - + create = "420s" - } - } - - # kubernetes_job.workspace_traffic_primary will be created - + resource "kubernetes_job" "workspace_traffic_primary" { - + id = (known after apply) - + wait_for_completion = true - - + metadata { - + generation = (known after apply) - + labels = { - + "app.kubernetes.io/name" = "d17-3-workspace-traffic" - } - + name = "d17-3-workspace-traffic" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - - + spec { - + backoff_limit = 6 - + completion_mode = (known after apply) - + completions = 1 - + parallelism = 1 - - + selector (known after apply) - - + template { - + metadata { - + generation = (known after apply) - + name = (known after apply) - + resource_version = (known after apply) - + uid = (known after apply) - } - + spec { - + automount_service_account_token = true - + dns_policy = "ClusterFirst" - + enable_service_links = true - + host_ipc = false - + host_network = false - + host_pid = false - + hostname = (known after apply) - + node_name = (known after apply) - + restart_policy = "Never" - + scheduler_name = (known after apply) - + service_account_name = (known after apply) - + share_process_namespace = false - + termination_grace_period_seconds = 30 - - + affinity { - + node_affinity { - + required_during_scheduling_ignored_during_execution { - + node_selector_term { - + match_expressions { - + key = "cloud.google.com/gke-nodepool" - + operator = "In" - + values = [ - + "misc", - ] - } - } - } - } - } - - + container { - + command = (known after apply) - + image = "ghcr.io/coder/coder:latest" - + image_pull_policy = (known after apply) - + name = "cli" - + stdin = false - + stdin_once = false - + termination_message_path = "/dev/termination-log" - + termination_message_policy = (known after apply) - + tty = false - - + env { - + name = "CODER_SCALETEST_JOB_TIMEOUT" - + value = "300s" - } - - + resources (known after apply) - } - - + image_pull_secrets (known after apply) - - + readiness_gate (known after apply) - } - } - } - - + timeouts { - + create = "420s" - } - } - - # kubernetes_secret.coder_db will be created - + resource "kubernetes_secret" "coder_db" { - + data = (sensitive value) - + id = (known after apply) - + type = "Opaque" - + wait_for_service_account_token = true - - + metadata { - + generation = (known after apply) - + name = "coder-db-url" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - } - - # kubernetes_secret.proxy_token_asia will be created - + resource "kubernetes_secret" "proxy_token_asia" { - + data = (sensitive value) - + id = (known after apply) - + type = "Opaque" - + wait_for_service_account_token = true - - + metadata { - + generation = (known after apply) - + name = "coder-proxy-token" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - } - - # kubernetes_secret.proxy_token_europe will be created - + resource "kubernetes_secret" "proxy_token_europe" { - + data = (sensitive value) - + id = (known after apply) - + type = "Opaque" - + wait_for_service_account_token = true - - + metadata { - + generation = (known after apply) - + name = "coder-proxy-token" - + namespace = "coder" - + resource_version = (known after apply) - + uid = (known after apply) - } - } - - # local_file.kubeconfig["asia"] must be replaced --/+ resource "local_file" "kubeconfig" { - ~ content = (sensitive value) # forces replacement - ~ content_base64sha256 = "ChsjhIZFYD1lrZUEY/t3R9IqIfitDSo0cZa+Ghi9TNw=" -> (known after apply) - ~ content_base64sha512 = "j6538ThzmjKMy2O+immgLHgisp84fXvGy6yPMclhqVioqFAq41+IX4tDkLxSljh3KgZLV0RHrFP95brgaT5kYw==" -> (known after apply) - ~ content_md5 = "252bb519143e7c3f6da69d5db4efd2c6" -> (known after apply) - ~ content_sha1 = "67e73f2cb69ccee72f709b6d867ef847d15801e2" -> (known after apply) - ~ content_sha256 = "0a1b23848645603d65ad950463fb7747d22a21f8ad0d2a347196be1a18bd4cdc" -> (known after apply) - ~ content_sha512 = "8fae77f138739a328ccb63be8a69a02c7822b29f387d7bc6cbac8f31c961a958a8a8502ae35f885f8b4390bc529638772a064b574447ac53fde5bae0693e6463" -> (known after apply) - ~ id = "67e73f2cb69ccee72f709b6d867ef847d15801e2" -> (known after apply) - # (3 unchanged attributes hidden) - } - - # local_file.kubeconfig["europe"] must be replaced --/+ resource "local_file" "kubeconfig" { - ~ content = (sensitive value) # forces replacement - ~ content_base64sha256 = "rp27dn0J7NB/0ic1w4bcTSpclDisikp+bKcqs0tulIo=" -> (known after apply) - ~ content_base64sha512 = "Hddg/XNvES9rSiesqQglLWvZgxhgJeRywZ1JcBx47RHwQuSsLsbWLBKArfDT/J/K5Datgi8BxwRNWDc6d1U7gQ==" -> (known after apply) - ~ content_md5 = "6adc0d8b5e215839d20e2e26771701bb" -> (known after apply) - ~ content_sha1 = "f597afb96c32a7571ba3a81cc18f9f2c9fe50204" -> (known after apply) - ~ content_sha256 = "ae9dbb767d09ecd07fd22735c386dc4d2a5c9438ac8a4a7e6ca72ab34b6e948a" -> (known after apply) - ~ content_sha512 = "1dd760fd736f112f6b4a27aca908252d6bd983186025e472c19d49701c78ed11f042e4ac2ec6d62c1280adf0d3fc9fcae436ad822f01c7044d58373a77553b81" -> (known after apply) - ~ id = "f597afb96c32a7571ba3a81cc18f9f2c9fe50204" -> (known after apply) - # (3 unchanged attributes hidden) - } - - # local_file.kubeconfig["primary"] must be replaced --/+ resource "local_file" "kubeconfig" { - ~ content = (sensitive value) # forces replacement - ~ content_base64sha256 = "lKpnFyQi6ZjtcWZ8ptAM8GTSkLDpv6HPJePk/MyM5H8=" -> (known after apply) - ~ content_base64sha512 = "v0awiiwAfQsnU446qMXxWv1ks529P/+0KEWtlsDO/1Hz+nv1lOV0r9tSFqIfIA8ka/uPOcXcGkBJ8NiRK5+kMA==" -> (known after apply) - ~ content_md5 = "fd30e86fee98342ae062c502021ed792" -> (known after apply) - ~ content_sha1 = "1caab4f9002954e4a59be717dab5487cd1096614" -> (known after apply) - ~ content_sha256 = "94aa67172422e998ed71667ca6d00cf064d290b0e9bfa1cf25e3e4fccc8ce47f" -> (known after apply) - ~ content_sha512 = "bf46b08a2c007d0b27538e3aa8c5f15afd64b39dbd3fffb42845ad96c0ceff51f3fa7bf594e574afdb5216a21f200f246bfb8f39c5dc1a4049f0d8912b9fa430" -> (known after apply) - ~ id = "1caab4f9002954e4a59be717dab5487cd1096614" -> (known after apply) - # (3 unchanged attributes hidden) - } - - # null_resource.api_key will be created - + resource "null_resource" "api_key" { - + id = (known after apply) - } - - # null_resource.asia_proxy_token will be created - + resource "null_resource" "asia_proxy_token" { - + id = (known after apply) - } - - # null_resource.destroy_workspaces["asia"] will be created - + resource "null_resource" "destroy_workspaces" { - + id = (known after apply) - } - - # null_resource.destroy_workspaces["europe"] will be created - + resource "null_resource" "destroy_workspaces" { - + id = (known after apply) - } - - # null_resource.destroy_workspaces["primary"] will be created - + resource "null_resource" "destroy_workspaces" { - + id = (known after apply) - } - - # null_resource.europe_proxy_token will be created - + resource "null_resource" "europe_proxy_token" { - + id = (known after apply) - } - - # null_resource.license will be created - + resource "null_resource" "license" { - + id = (known after apply) - } - - # null_resource.pprof["primary"] will be created - + resource "null_resource" "pprof" { - + id = (known after apply) - } - - # time_sleep.wait_baseline will be created - + resource "time_sleep" "wait_baseline" { - + create_duration = "60s" - + id = (known after apply) - } - -Plan: 30 to add, 0 to change, 3 to destroy. - -Do you want to perform these actions? - Terraform will perform the actions described above. - Only 'yes' will be accepted to approve. - - Enter a value: - -Interrupt received. -Please wait for Terraform to exit or data loss may occur. -Gracefully shutting down... - From 619d69cb0d861932c3af55721e1b569aa14b01d5 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Wed, 18 Dec 2024 05:15:28 +0000 Subject: [PATCH 30/36] fix concurrency --- scaletest/terraform/action/coder_workspaces.tf | 6 +++--- scaletest/terraform/action/vars.tf | 5 ----- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/scaletest/terraform/action/coder_workspaces.tf b/scaletest/terraform/action/coder_workspaces.tf index 6ae7120eee0b1..37a834529d3fa 100644 --- a/scaletest/terraform/action/coder_workspaces.tf +++ b/scaletest/terraform/action/coder_workspaces.tf @@ -44,7 +44,7 @@ resource "kubernetes_job" "create_workspaces_primary" { "create-workspaces", "--count=${local.scenarios[var.scenario].workspaces.count_per_deployment}", "--template=kubernetes-primary", - "--concurrency=${var.workspace_create_concurrency}", + "--concurrency=${local.scenarios[var.scenario].provisionerd.replicas}", "--no-cleanup" ] } @@ -103,7 +103,7 @@ resource "kubernetes_job" "create_workspaces_europe" { "create-workspaces", "--count=${local.scenarios[var.scenario].workspaces.count_per_deployment}", "--template=kubernetes-europe", - "--concurrency=${var.workspace_create_concurrency}", + "--concurrency=${local.scenarios[var.scenario].provisionerd.replicas}", "--no-cleanup" ] } @@ -162,7 +162,7 @@ resource "kubernetes_job" "create_workspaces_asia" { "create-workspaces", "--count=${local.scenarios[var.scenario].workspaces.count_per_deployment}", "--template=kubernetes-asia", - "--concurrency=${var.workspace_create_concurrency}", + "--concurrency=${local.scenarios[var.scenario].provisionerd.replicas}", "--no-cleanup" ] } diff --git a/scaletest/terraform/action/vars.tf b/scaletest/terraform/action/vars.tf index 334cea498fd3d..264110e239845 100644 --- a/scaletest/terraform/action/vars.tf +++ b/scaletest/terraform/action/vars.tf @@ -85,8 +85,3 @@ variable "provisionerd_image_tag" { description = "Tag to use for Provisionerd image." default = "latest" } - -variable "workspace_create_concurrency" { - description = "Number of concurrent workspace creation jobs to run." - default = 10 -} From 4960713661275cf369528228ff7d326c5bbc77a5 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Wed, 18 Dec 2024 05:18:12 +0000 Subject: [PATCH 31/36] scenarios --- scaletest/terraform/action/scenarios.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scaletest/terraform/action/scenarios.tf b/scaletest/terraform/action/scenarios.tf index 2c053cfddea5d..4bfd88c9bc3de 100644 --- a/scaletest/terraform/action/scenarios.tf +++ b/scaletest/terraform/action/scenarios.tf @@ -31,7 +31,7 @@ locals { machine_type = "t2d-standard-4" } cloudsql = { - tier = "db-custom-4-15360" + tier = "db-f1-micro" max_connections = 500 } } @@ -81,7 +81,7 @@ locals { mem_limit = "12Gi" } provisionerd = { - replicas = 50 + replicas = 30 cpu_request = "100m" mem_request = "256Mi" cpu_limit = "1000m" From 5529b79704a0d42cd1c3823671bc4415d0dc91a6 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Thu, 19 Dec 2024 20:13:02 +0000 Subject: [PATCH 32/36] multi-traffic --- .../terraform/action/coder_helm_values.tftpl | 2 + scaletest/terraform/action/coder_pprof.tf | 76 --------- scaletest/terraform/action/coder_templates.tf | 154 ++++++++++-------- scaletest/terraform/action/coder_traffic.tf | 85 ++++++---- scaletest/terraform/action/scenarios.tf | 10 +- 5 files changed, 147 insertions(+), 180 deletions(-) delete mode 100644 scaletest/terraform/action/coder_pprof.tf diff --git a/scaletest/terraform/action/coder_helm_values.tftpl b/scaletest/terraform/action/coder_helm_values.tftpl index df8eada62eaa3..7d30a6fa825de 100644 --- a/scaletest/terraform/action/coder_helm_values.tftpl +++ b/scaletest/terraform/action/coder_helm_values.tftpl @@ -78,6 +78,8 @@ coder: value: "${experiments}" - name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS" value: "true" + - name: "CODER_DANGEROUS_ALLOW_PATH_APP_SITE_OWNER_ACCESS" + value: "true" image: repo: ${image_repo} tag: ${image_tag} diff --git a/scaletest/terraform/action/coder_pprof.tf b/scaletest/terraform/action/coder_pprof.tf deleted file mode 100644 index 8be483d3638cf..0000000000000 --- a/scaletest/terraform/action/coder_pprof.tf +++ /dev/null @@ -1,76 +0,0 @@ -locals { - pprof_interval = "30s" - pprof_duration = "30m" - - pprof_ports = { - primary = 6061 - europe = 7061 - asia = 8061 - } -} - -resource "local_file" "kubeconfig" { - for_each = local.deployments - - content = templatefile("${path.module}/kubeconfig.tftpl", { - name = google_container_cluster.cluster[each.key].name - endpoint = "https://${google_container_cluster.cluster[each.key].endpoint}" - cluster_ca_certificate = google_container_cluster.cluster[each.key].master_auth[0].cluster_ca_certificate - access_token = data.google_client_config.default.access_token - }) - filename = "${path.module}/.coderv2/kubeconfig/${each.key}.yaml" -} - -resource "null_resource" "pprof" { - for_each = { - primary = {} - } - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = < Date: Thu, 19 Dec 2024 20:17:19 +0000 Subject: [PATCH 33/36] pd name --- scaletest/terraform/action/coder_helm_values.tftpl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scaletest/terraform/action/coder_helm_values.tftpl b/scaletest/terraform/action/coder_helm_values.tftpl index 7d30a6fa825de..be24bf61cd5e3 100644 --- a/scaletest/terraform/action/coder_helm_values.tftpl +++ b/scaletest/terraform/action/coder_helm_values.tftpl @@ -36,7 +36,9 @@ coder: - name: "CODER_PROVISIONERD_TAGS" value: "scope=organization,deployment=${deployment}" - name: "CODER_PROVISIONER_DAEMON_NAME" - value: "${deployment}" + valueFrom: + fieldRef: + fieldPath: metadata.name - name: "CODER_CONFIG_DIR" value: "/tmp/config" %{~ endif ~} From 97769e06d0f5103ddc7e7b8a757ba220b6547c2d Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Mon, 13 Jan 2025 16:10:27 +0000 Subject: [PATCH 34/36] fmt --- scaletest/terraform/action/coder_traffic.tf | 61 +++++++++------- .../terraform/action/coder_workspaces.tf | 6 +- scaletest/terraform/action/gcp_clusters.tf | 4 +- scaletest/terraform/action/k8s_coder_asia.tf | 12 ++-- scaletest/terraform/action/scenarios.tf | 70 ------------------- 5 files changed, 45 insertions(+), 108 deletions(-) diff --git a/scaletest/terraform/action/coder_traffic.tf b/scaletest/terraform/action/coder_traffic.tf index c0e8d87f341d2..cb23c4f5318f0 100644 --- a/scaletest/terraform/action/coder_traffic.tf +++ b/scaletest/terraform/action/coder_traffic.tf @@ -1,23 +1,27 @@ locals { - wait_baseline_duration = "5m" - workspace_traffic_job_timeout = "15m" - workspace_traffic_duration = "10m" - bytes_per_tick = 1024 - tick_interval = "100ms" + wait_baseline_duration = "5m" + bytes_per_tick = 1024 + tick_interval = "100ms" traffic_types = { ssh = { - wait_duration_minutes = "0" + wait_duration = "0m" + duration = "30m" + job_timeout = "35m" flags = [ "--ssh", ] } webterminal = { - wait_duration_minutes = "5" - flags = [] + wait_duration = "5m" + duration = "25m" + job_timeout = "30m" + flags = [] } app = { - wait_duration_minutes = "10" + wait_duration = "10m" + duration = "20m" + job_timeout = "25m" flags = [ "--app=wsec", ] @@ -31,15 +35,18 @@ resource "time_sleep" "wait_baseline" { kubernetes_job.create_workspaces_europe, kubernetes_job.create_workspaces_asia, ] - # depends_on = [ - # kubernetes_job.push_template_primary, - # kubernetes_job.push_template_europe, - # kubernetes_job.push_template_asia, - # ] create_duration = local.wait_baseline_duration } +resource "time_sleep" "wait_traffic" { + for_each = local.traffic_types + + depends_on = [time_sleep.wait_baseline] + + create_duration = "${local.traffic_types[each.key].wait_duration_minutes}m" +} + resource "kubernetes_job" "workspace_traffic_primary" { provider = kubernetes.primary @@ -52,7 +59,7 @@ resource "kubernetes_job" "workspace_traffic_primary" { } } spec { - completions = 1 + completions = 1 backoff_limit = 0 template { metadata {} @@ -86,7 +93,7 @@ resource "kubernetes_job" "workspace_traffic_primary" { "--bytes-per-tick=${local.bytes_per_tick}", "--tick-interval=${local.tick_interval}", "--scaletest-prometheus-wait=30s", - "--job-timeout=${local.workspace_traffic_duration}", + "--job-timeout=${local.traffic_types[each.key].duration}", ], local.traffic_types[each.key].flags) } restart_policy = "Never" @@ -96,16 +103,16 @@ resource "kubernetes_job" "workspace_traffic_primary" { wait_for_completion = true timeouts { - create = local.workspace_traffic_job_timeout + create = local.traffic_types[each.key].job_timeout } - depends_on = [time_sleep.wait_baseline] + depends_on = [time_sleep.wait_baseline, time_sleep.wait_traffic[each.key]] } resource "kubernetes_job" "workspace_traffic_europe" { provider = kubernetes.europe -for_each = local.traffic_types + for_each = local.traffic_types metadata { name = "${var.name}-workspace-traffic-${each.key}" namespace = kubernetes_namespace.coder_europe.metadata.0.name @@ -114,7 +121,7 @@ for_each = local.traffic_types } } spec { - completions = 1 + completions = 1 backoff_limit = 0 template { metadata {} @@ -148,7 +155,7 @@ for_each = local.traffic_types "--bytes-per-tick=${local.bytes_per_tick}", "--tick-interval=${local.tick_interval}", "--scaletest-prometheus-wait=30s", - "--job-timeout=${local.workspace_traffic_duration}", + "--job-timeout=${local.traffic_types[each.key].duration}", "--workspace-proxy-url=${local.deployments.europe.url}", ], local.traffic_types[each.key].flags) } @@ -159,10 +166,10 @@ for_each = local.traffic_types wait_for_completion = true timeouts { - create = local.workspace_traffic_job_timeout + create = local.traffic_types[each.key].job_timeout } - depends_on = [time_sleep.wait_baseline] + depends_on = [time_sleep.wait_baseline, time_sleep.wait_traffic[each.key]] } resource "kubernetes_job" "workspace_traffic_asia" { @@ -177,7 +184,7 @@ resource "kubernetes_job" "workspace_traffic_asia" { } } spec { - completions = 1 + completions = 1 backoff_limit = 0 template { metadata {} @@ -211,7 +218,7 @@ resource "kubernetes_job" "workspace_traffic_asia" { "--bytes-per-tick=${local.bytes_per_tick}", "--tick-interval=${local.tick_interval}", "--scaletest-prometheus-wait=30s", - "--job-timeout=${local.workspace_traffic_duration}", + "--job-timeout=${local.traffic_types[each.key].duration}", "--workspace-proxy-url=${local.deployments.asia.url}", ], local.traffic_types[each.key].flags) } @@ -222,8 +229,8 @@ resource "kubernetes_job" "workspace_traffic_asia" { wait_for_completion = true timeouts { - create = local.workspace_traffic_job_timeout + create = local.traffic_types[each.key].job_timeout } - depends_on = [time_sleep.wait_baseline] + depends_on = [time_sleep.wait_baseline, time_sleep.wait_traffic[each.key]] } diff --git a/scaletest/terraform/action/coder_workspaces.tf b/scaletest/terraform/action/coder_workspaces.tf index 37a834529d3fa..f49c1c996864f 100644 --- a/scaletest/terraform/action/coder_workspaces.tf +++ b/scaletest/terraform/action/coder_workspaces.tf @@ -13,7 +13,7 @@ resource "kubernetes_job" "create_workspaces_primary" { } } spec { - completions = 1 + completions = 1 backoff_limit = 0 template { metadata {} @@ -72,7 +72,7 @@ resource "kubernetes_job" "create_workspaces_europe" { } } spec { - completions = 1 + completions = 1 backoff_limit = 0 template { metadata {} @@ -131,7 +131,7 @@ resource "kubernetes_job" "create_workspaces_asia" { } } spec { - completions = 1 + completions = 1 backoff_limit = 0 template { metadata {} diff --git a/scaletest/terraform/action/gcp_clusters.tf b/scaletest/terraform/action/gcp_clusters.tf index 47be7299cd8da..c41d06c6c1c83 100644 --- a/scaletest/terraform/action/gcp_clusters.tf +++ b/scaletest/terraform/action/gcp_clusters.tf @@ -141,8 +141,8 @@ resource "google_container_node_pool" "node_pool" { } kubelet_config { cpu_manager_policy = "" - cpu_cfs_quota = false - pod_pids_limit = 0 + cpu_cfs_quota = false + pod_pids_limit = 0 } } lifecycle { diff --git a/scaletest/terraform/action/k8s_coder_asia.tf b/scaletest/terraform/action/k8s_coder_asia.tf index 8adab94f65cab..307a50136ec28 100644 --- a/scaletest/terraform/action/k8s_coder_asia.tf +++ b/scaletest/terraform/action/k8s_coder_asia.tf @@ -85,12 +85,12 @@ resource "helm_release" "provisionerd_asia" { version = var.provisionerd_chart_version namespace = kubernetes_namespace.coder_asia.metadata.0.name values = [templatefile("${path.module}/coder_helm_values.tftpl", { - workspace_proxy = false, - provisionerd = true, - primary_url = null, - proxy_token = null, - db_secret = null, - ip_address = null, + workspace_proxy = false, + provisionerd = true, + primary_url = null, + proxy_token = null, + db_secret = null, + ip_address = null, provisionerd_psk = kubernetes_secret.provisionerd_psk_asia.metadata.0.name, access_url = local.deployments.primary.url, node_pool = google_container_node_pool.node_pool["asia_coder"].name, diff --git a/scaletest/terraform/action/scenarios.tf b/scaletest/terraform/action/scenarios.tf index 3559b8ae81ae4..bd22fa7c5b54f 100644 --- a/scaletest/terraform/action/scenarios.tf +++ b/scaletest/terraform/action/scenarios.tf @@ -1,75 +1,5 @@ locals { scenarios = { - small = { - coder = { - nodepool_size = 1 - machine_type = "t2d-standard-4" - replicas = 1 - cpu_request = "1000m" - mem_request = "6Gi" - cpu_limit = "2000m" - mem_limit = "12Gi" - } - provisionerd = { - replicas = 1 - cpu_request = "100m" - mem_request = "1Gi" - cpu_limit = "1000m" - mem_limit = "1Gi" - } - workspaces = { - count_per_deployment = 10 - nodepool_size = 1 - machine_type = "t2d-standard-8" - cpu_request = "100m" - mem_request = "128Mi" - cpu_limit = "100m" - mem_limit = "128Mi" - } - misc = { - nodepool_size = 1 - machine_type = "t2d-standard-4" - } - cloudsql = { - tier = "db-f1-micro" - max_connections = 500 - } - } - medium = { - coder = { - nodepool_size = 1 - machine_type = "t2d-standard-8" - replicas = 1 - cpu_request = "3000m" - mem_request = "12Gi" - cpu_limit = "6000m" - mem_limit = "24Gi" - } - provisionerd = { - replicas = 1 - cpu_request = "100m" - mem_request = "1Gi" - cpu_limit = "1000m" - mem_limit = "1Gi" - } - workspaces = { - count_per_deployment = 10 - nodepool_size = 1 - machine_type = "t2d-standard-8" - cpu_request = "100m" - mem_request = "128Mi" - cpu_limit = "100m" - mem_limit = "128Mi" - } - misc = { - nodepool_size = 1 - machine_type = "t2d-standard-4" - } - cloudsql = { - tier = "db-custom-1-3840" - max_connections = 500 - } - } large = { coder = { nodepool_size = 3 From 390c7914db58409862edb8589723703109cbd838 Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Mon, 13 Jan 2025 16:12:37 +0000 Subject: [PATCH 35/36] fix --- scaletest/terraform/action/coder_traffic.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scaletest/terraform/action/coder_traffic.tf b/scaletest/terraform/action/coder_traffic.tf index cb23c4f5318f0..bea829427af82 100644 --- a/scaletest/terraform/action/coder_traffic.tf +++ b/scaletest/terraform/action/coder_traffic.tf @@ -44,7 +44,7 @@ resource "time_sleep" "wait_traffic" { depends_on = [time_sleep.wait_baseline] - create_duration = "${local.traffic_types[each.key].wait_duration_minutes}m" + create_duration = local.traffic_types[each.key].wait_duration } resource "kubernetes_job" "workspace_traffic_primary" { From a5d3d3dadb2ce4aadbcfc60d7859c9dd8d64a7cb Mon Sep 17 00:00:00 2001 From: Garrett Delfosse Date: Mon, 13 Jan 2025 16:53:12 +0000 Subject: [PATCH 36/36] update coder provider in template --- scaletest/terraform/action/coder_templates.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scaletest/terraform/action/coder_templates.tf b/scaletest/terraform/action/coder_templates.tf index 224839cb69ac0..d27c25844b91e 100644 --- a/scaletest/terraform/action/coder_templates.tf +++ b/scaletest/terraform/action/coder_templates.tf @@ -5,7 +5,7 @@ terraform { required_providers { coder = { source = "coder/coder" - version = "~> 0.23.0" + version = "~> 2.1.0" } kubernetes = { source = "hashicorp/kubernetes"