Skip to content

chore(scaletest): update scaletest terraform to match big.cdr.dev #9860

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Sep 27, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
remove usage of null_resource
  • Loading branch information
johnstcn committed Sep 25, 2023
commit e0abf25a4ac7c56a6cf746fdb39f62e75c9964e4
16 changes: 8 additions & 8 deletions scaletest/terraform/infra/gcp_cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,10 @@ resource "google_container_cluster" "primary" {
}

resource "google_container_node_pool" "coder" {
name = "${var.name}-coder"
location = var.zone
project = var.project_id
cluster = google_container_cluster.primary.name
name = "${var.name}-coder"
location = var.zone
project = var.project_id
cluster = google_container_cluster.primary.name
autoscaling {
min_node_count = 1
max_node_count = var.nodepool_size_coder
Expand Down Expand Up @@ -80,10 +80,10 @@ resource "google_container_node_pool" "coder" {
}

resource "google_container_node_pool" "workspaces" {
name = "${var.name}-workspaces"
location = var.zone
project = var.project_id
cluster = google_container_cluster.primary.name
name = "${var.name}-workspaces"
location = var.zone
project = var.project_id
cluster = google_container_cluster.primary.name
autoscaling {
min_node_count = 0
max_node_count = var.nodepool_size_workspaces
Expand Down
325 changes: 137 additions & 188 deletions scaletest/terraform/k8s/coder.tf
Original file line number Diff line number Diff line change
@@ -1,53 +1,164 @@
data "google_client_config" "default" {}

locals {
coder_helm_repo = "https://helm.coder.com/v2"
coder_helm_chart = "coder"
coder_release_name = var.name
coder_namespace = "coder-${var.name}"
coder_admin_email = "admin@coder.com"
coder_admin_user = "coder"
coder_access_url = "http://${var.coder_address}"
coder_access_url = "http://${var.coder_address}"
coder_admin_email = "admin@coder.com"
coder_admin_user = "coder"
coder_helm_repo = "https://helm.coder.com/v2"
coder_helm_chart = "coder"
coder_namespace = "coder-${var.name}"
coder_release_name = var.name
provisionerd_helm_chart = "coder-provisioner"
provisionerd_release_name = "${var.name}-provisionerd"
}

resource "null_resource" "coder_namespace" {
triggers = {
namespace = local.coder_namespace
kubeconfig_path = var.kubernetes_kubeconfig_path
}
provisioner "local-exec" {
when = create
command = <<EOF
KUBECONFIG=${self.triggers.kubeconfig_path} kubectl create namespace ${self.triggers.namespace}
EOF
}
provisioner "local-exec" {
when = destroy
command = "true"
resource "kubernetes_namespace" "coder_namespace" {
metadata {
name = local.coder_namespace
}
}

resource "random_password" "provisionerd_psk" {
length = 26
}

resource "kubernetes_secret" "coder-db" {
type = "Opaque"
metadata {
name = "coder-db-url"
namespace = local.coder_namespace
namespace = kubernetes_namespace.coder_namespace.metadata.0.name
}
depends_on = [null_resource.coder_namespace]
data = {
url = var.coder_db_url
}
}

resource "kubernetes_secret" "provisionerd_psk" {
type = "Opaque"
metadata {
name = "coder-provisioner-psk"
namespace = kubernetes_namespace.coder_namespace.metadata.0.name
}
data = {
psk = random_password.provisionerd_psk.result
}
}

# OIDC secret needs to be manually provisioned for now.
data "kubernetes_secret" "coder_oidc" {
metadata {
namespace = kubernetes_namespace.coder_namespace.metadata.0.name
name = "coder-oidc"
}
}

# TLS needs to be provisioned manually for now.
data "kubernetes_secret" "coder_tls" {
metadata {
namespace = kubernetes_namespace.coder_namespace.metadata.0.name
name = "${var.name}-tls"
}
}

resource "helm_release" "coder-chart" {
repository = local.coder_helm_repo
chart = local.coder_helm_chart
name = local.coder_release_name
version = var.coder_chart_version
namespace = local.coder_namespace
depends_on = [
null_resource.coder_namespace
namespace = kubernetes_namespace.coder_namespace
values = [<<EOF
coder:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "cloud.google.com/gke-nodepool"
operator: "In"
values: ["${var.kubernetes_nodepool_coder}"]
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: "kubernetes.io/hostname"
labelSelector:
matchExpressions:
- key: "app.kubernetes.io/instance"
operator: "In"
values: ["${local.coder_release_name}"]
env:
- name: "CODER_ACCESS_URL"
value: "${local.coder_access_url}"
- name: "CODER_CACHE_DIRECTORY"
value: "/tmp/coder"
- name: "CODER_ENABLE_TELEMETRY"
value: "false"
- name: "CODER_LOGGING_HUMAN"
value: "/dev/null"
- name: "CODER_LOGGING_STACKDRIVER"
value: "/dev/stderr"
- name: "CODER_PG_CONNECTION_URL"
valueFrom:
secretKeyRef:
name: "${kubernetes_secret.coder-db.metadata.0.name}"
key: url
- name: "CODER_PPROF_ENABLE"
value: "true"
- name: "CODER_PROMETHEUS_ENABLE"
value: "true"
- name: "CODER_PROMETHEUS_COLLECT_AGENT_STATS"
value: "true"
- name: "CODER_PROMETHEUS_COLLECT_DB_METRICS"
value: "true"
- name: "CODER_VERBOSE"
value: "true"
- name: "CODER_EXPERIMENTS"
value: "${var.coder_experiments}"
- name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS"
value: "true"
# Disabling built-in provisioner daemons
- name: "CODER_PROVISIONER_DAEMONS"
value: "0"
- name: CODER_PROVISIONER_DAEMON_PSK
valueFrom:
secretKeyRef:
key: psk
name: "${kubernetes_secret.provisionerd_psk.metadata.0.name}"
image:
repo: ${var.coder_image_repo}
tag: ${var.coder_image_tag}
replicaCount: "${var.coder_replicas}"
resources:
requests:
cpu: "${var.coder_cpu_request}"
memory: "${var.coder_mem_request}"
limits:
cpu: "${var.coder_cpu_limit}"
memory: "${var.coder_mem_limit}"
securityContext:
readOnlyRootFilesystem: true
service:
enable: true
sessionAffinity: None
loadBalancerIP: "${var.coder_address}"
volumeMounts:
- mountPath: "/tmp"
name: cache
readOnly: false
volumes:
- emptyDir:
sizeLimit: 1024Mi
name: cache
EOF
]
}

resource "helm_release" "provisionerd_chart" {
repository = local.coder_helm_repo
chart = local.provisionerd_helm_chart
name = local.provisionerd_release_name
version = var.provisionerd_chart_version
namespace = kubernetes_namespace.coder_namespace
values = [<<EOF
coder:
affinity:
Expand Down Expand Up @@ -218,168 +329,6 @@ resource "local_file" "kubernetes_template" {
EOF
}

# TODO(cian): Remove this when we have support in the Helm chart.
# Ref: https://github.com/coder/coder/issues/8243
resource "local_file" "provisionerd_deployment" {
filename = "${path.module}/../.coderv2/provisionerd-deployment.yaml"
content = <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: ${var.name}
app.kubernetes.io/name: provisionerd
name: provisionerd
namespace: ${local.coder_namespace}
spec:
replicas: ${var.provisionerd_replicas}
selector:
matchLabels:
app.kubernetes.io/instance: ${var.name}
app.kubernetes.io/name: provisionerd
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: ${var.name}
app.kubernetes.io/name: provisionerd
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cloud.google.com/gke-nodepool
operator: In
values:
- ${var.kubernetes_nodepool_coder}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- ${var.name}
topologyKey: kubernetes.io/hostname
weight: 1
containers:
- args:
- server
command:
- /opt/coder
env:
- name: CODER_HTTP_ADDRESS
value: 0.0.0.0:8080
- name: CODER_PROMETHEUS_ADDRESS
value: 0.0.0.0:2112
- name: CODER_ACCESS_URL
value: ${local.coder_access_url}
- name: CODER_CACHE_DIRECTORY
value: /tmp/coder
- name: CODER_ENABLE_TELEMETRY
value: "false"
- name: CODER_LOGGING_HUMAN
value: /dev/null
- name: CODER_LOGGING_STACKDRIVER
value: /dev/stderr
- name: CODER_PG_CONNECTION_URL
valueFrom:
secretKeyRef:
key: url
name: coder-db-url
- name: CODER_PPROF_ENABLE
value: "true"
- name: CODER_PROMETHEUS_ENABLE
value: "true"
- name: CODER_PROMETHEUS_COLLECT_AGENT_STATS
value: "true"
- name: CODER_PROMETHEUS_COLLECT_DB_METRICS
value: "true"
- name: CODER_VERBOSE
value: "true"
- name: CODER_PROVISIONER_DAEMONS
value: "${var.provisionerd_concurrency}"
image: "${var.coder_image_repo}:${var.coder_image_tag}"
imagePullPolicy: IfNotPresent
lifecycle: {}
livenessProbe:
failureThreshold: 3
httpGet:
path: /api/v2/buildinfo
port: http
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: provisionerd
ports:
- containerPort: 8080
name: http
protocol: TCP
- containerPort: 2112
name: prometheus-http
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/v2/buildinfo
port: http
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
cpu: "${var.provisionerd_cpu_limit}"
memory: "${var.provisionerd_mem_limit}"
requests:
cpu: "${var.provisionerd_cpu_request}"
memory: "${var.provisionerd_mem_request}"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tmp
name: cache
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccount: coder
serviceAccountName: coder
terminationGracePeriodSeconds: 60
volumes:
- emptyDir:
sizeLimit: 10Gi
name: cache
EOF
}

resource "null_resource" "provisionerd_deployment_apply" {
depends_on = [helm_release.coder-chart, local_file.provisionerd_deployment]
triggers = {
kubeconfig_path = var.kubernetes_kubeconfig_path
manifest_path = local_file.provisionerd_deployment.filename
}
provisioner "local-exec" {
command = <<EOF
KUBECONFIG=${self.triggers.kubeconfig_path} kubectl apply -f ${self.triggers.manifest_path}
EOF
}
}

resource "local_file" "output_vars" {
filename = "${path.module}/../../.coderv2/url"
content = local.coder_access_url
Expand Down
Loading