Skip to content

Commit e0abf25

Browse files
committed
remove usage of null_resource
1 parent c5d0d16 commit e0abf25

File tree

4 files changed

+179
-248
lines changed

4 files changed

+179
-248
lines changed

scaletest/terraform/infra/gcp_cluster.tf

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,10 @@ resource "google_container_cluster" "primary" {
4444
}
4545

4646
resource "google_container_node_pool" "coder" {
47-
name = "${var.name}-coder"
48-
location = var.zone
49-
project = var.project_id
50-
cluster = google_container_cluster.primary.name
47+
name = "${var.name}-coder"
48+
location = var.zone
49+
project = var.project_id
50+
cluster = google_container_cluster.primary.name
5151
autoscaling {
5252
min_node_count = 1
5353
max_node_count = var.nodepool_size_coder
@@ -80,10 +80,10 @@ resource "google_container_node_pool" "coder" {
8080
}
8181

8282
resource "google_container_node_pool" "workspaces" {
83-
name = "${var.name}-workspaces"
84-
location = var.zone
85-
project = var.project_id
86-
cluster = google_container_cluster.primary.name
83+
name = "${var.name}-workspaces"
84+
location = var.zone
85+
project = var.project_id
86+
cluster = google_container_cluster.primary.name
8787
autoscaling {
8888
min_node_count = 0
8989
max_node_count = var.nodepool_size_workspaces

scaletest/terraform/k8s/coder.tf

Lines changed: 137 additions & 188 deletions
Original file line numberDiff line numberDiff line change
@@ -1,53 +1,164 @@
11
data "google_client_config" "default" {}
22

33
locals {
4-
coder_helm_repo = "https://helm.coder.com/v2"
5-
coder_helm_chart = "coder"
6-
coder_release_name = var.name
7-
coder_namespace = "coder-${var.name}"
8-
coder_admin_email = "admin@coder.com"
9-
coder_admin_user = "coder"
10-
coder_access_url = "http://${var.coder_address}"
4+
coder_access_url = "http://${var.coder_address}"
5+
coder_admin_email = "admin@coder.com"
6+
coder_admin_user = "coder"
7+
coder_helm_repo = "https://helm.coder.com/v2"
8+
coder_helm_chart = "coder"
9+
coder_namespace = "coder-${var.name}"
10+
coder_release_name = var.name
11+
provisionerd_helm_chart = "coder-provisioner"
12+
provisionerd_release_name = "${var.name}-provisionerd"
1113
}
1214

13-
resource "null_resource" "coder_namespace" {
14-
triggers = {
15-
namespace = local.coder_namespace
16-
kubeconfig_path = var.kubernetes_kubeconfig_path
17-
}
18-
provisioner "local-exec" {
19-
when = create
20-
command = <<EOF
21-
KUBECONFIG=${self.triggers.kubeconfig_path} kubectl create namespace ${self.triggers.namespace}
22-
EOF
23-
}
24-
provisioner "local-exec" {
25-
when = destroy
26-
command = "true"
15+
resource "kubernetes_namespace" "coder_namespace" {
16+
metadata {
17+
name = local.coder_namespace
2718
}
2819
}
2920

21+
resource "random_password" "provisionerd_psk" {
22+
length = 26
23+
}
24+
3025
resource "kubernetes_secret" "coder-db" {
3126
type = "Opaque"
3227
metadata {
3328
name = "coder-db-url"
34-
namespace = local.coder_namespace
29+
namespace = kubernetes_namespace.coder_namespace.metadata.0.name
3530
}
36-
depends_on = [null_resource.coder_namespace]
3731
data = {
3832
url = var.coder_db_url
3933
}
4034
}
4135

36+
resource "kubernetes_secret" "provisionerd_psk" {
37+
type = "Opaque"
38+
metadata {
39+
name = "coder-provisioner-psk"
40+
namespace = kubernetes_namespace.coder_namespace.metadata.0.name
41+
}
42+
data = {
43+
psk = random_password.provisionerd_psk.result
44+
}
45+
}
46+
47+
# OIDC secret needs to be manually provisioned for now.
48+
data "kubernetes_secret" "coder_oidc" {
49+
metadata {
50+
namespace = kubernetes_namespace.coder_namespace.metadata.0.name
51+
name = "coder-oidc"
52+
}
53+
}
54+
55+
# TLS needs to be provisioned manually for now.
56+
data "kubernetes_secret" "coder_tls" {
57+
metadata {
58+
namespace = kubernetes_namespace.coder_namespace.metadata.0.name
59+
name = "${var.name}-tls"
60+
}
61+
}
62+
4263
resource "helm_release" "coder-chart" {
4364
repository = local.coder_helm_repo
4465
chart = local.coder_helm_chart
4566
name = local.coder_release_name
4667
version = var.coder_chart_version
47-
namespace = local.coder_namespace
48-
depends_on = [
49-
null_resource.coder_namespace
68+
namespace = kubernetes_namespace.coder_namespace
69+
values = [<<EOF
70+
coder:
71+
affinity:
72+
nodeAffinity:
73+
requiredDuringSchedulingIgnoredDuringExecution:
74+
nodeSelectorTerms:
75+
- matchExpressions:
76+
- key: "cloud.google.com/gke-nodepool"
77+
operator: "In"
78+
values: ["${var.kubernetes_nodepool_coder}"]
79+
podAntiAffinity:
80+
preferredDuringSchedulingIgnoredDuringExecution:
81+
- weight: 1
82+
podAffinityTerm:
83+
topologyKey: "kubernetes.io/hostname"
84+
labelSelector:
85+
matchExpressions:
86+
- key: "app.kubernetes.io/instance"
87+
operator: "In"
88+
values: ["${local.coder_release_name}"]
89+
env:
90+
- name: "CODER_ACCESS_URL"
91+
value: "${local.coder_access_url}"
92+
- name: "CODER_CACHE_DIRECTORY"
93+
value: "/tmp/coder"
94+
- name: "CODER_ENABLE_TELEMETRY"
95+
value: "false"
96+
- name: "CODER_LOGGING_HUMAN"
97+
value: "/dev/null"
98+
- name: "CODER_LOGGING_STACKDRIVER"
99+
value: "/dev/stderr"
100+
- name: "CODER_PG_CONNECTION_URL"
101+
valueFrom:
102+
secretKeyRef:
103+
name: "${kubernetes_secret.coder-db.metadata.0.name}"
104+
key: url
105+
- name: "CODER_PPROF_ENABLE"
106+
value: "true"
107+
- name: "CODER_PROMETHEUS_ENABLE"
108+
value: "true"
109+
- name: "CODER_PROMETHEUS_COLLECT_AGENT_STATS"
110+
value: "true"
111+
- name: "CODER_PROMETHEUS_COLLECT_DB_METRICS"
112+
value: "true"
113+
- name: "CODER_VERBOSE"
114+
value: "true"
115+
- name: "CODER_EXPERIMENTS"
116+
value: "${var.coder_experiments}"
117+
- name: "CODER_DANGEROUS_DISABLE_RATE_LIMITS"
118+
value: "true"
119+
# Disabling built-in provisioner daemons
120+
- name: "CODER_PROVISIONER_DAEMONS"
121+
value: "0"
122+
- name: CODER_PROVISIONER_DAEMON_PSK
123+
valueFrom:
124+
secretKeyRef:
125+
key: psk
126+
name: "${kubernetes_secret.provisionerd_psk.metadata.0.name}"
127+
image:
128+
repo: ${var.coder_image_repo}
129+
tag: ${var.coder_image_tag}
130+
replicaCount: "${var.coder_replicas}"
131+
resources:
132+
requests:
133+
cpu: "${var.coder_cpu_request}"
134+
memory: "${var.coder_mem_request}"
135+
limits:
136+
cpu: "${var.coder_cpu_limit}"
137+
memory: "${var.coder_mem_limit}"
138+
securityContext:
139+
readOnlyRootFilesystem: true
140+
service:
141+
enable: true
142+
sessionAffinity: None
143+
loadBalancerIP: "${var.coder_address}"
144+
volumeMounts:
145+
- mountPath: "/tmp"
146+
name: cache
147+
readOnly: false
148+
volumes:
149+
- emptyDir:
150+
sizeLimit: 1024Mi
151+
name: cache
152+
EOF
50153
]
154+
}
155+
156+
resource "helm_release" "provisionerd_chart" {
157+
repository = local.coder_helm_repo
158+
chart = local.provisionerd_helm_chart
159+
name = local.provisionerd_release_name
160+
version = var.provisionerd_chart_version
161+
namespace = kubernetes_namespace.coder_namespace
51162
values = [<<EOF
52163
coder:
53164
affinity:
@@ -218,168 +329,6 @@ resource "local_file" "kubernetes_template" {
218329
EOF
219330
}
220331

221-
# TODO(cian): Remove this when we have support in the Helm chart.
222-
# Ref: https://github.com/coder/coder/issues/8243
223-
resource "local_file" "provisionerd_deployment" {
224-
filename = "${path.module}/../.coderv2/provisionerd-deployment.yaml"
225-
content = <<EOF
226-
apiVersion: apps/v1
227-
kind: Deployment
228-
metadata:
229-
labels:
230-
app.kubernetes.io/instance: ${var.name}
231-
app.kubernetes.io/name: provisionerd
232-
name: provisionerd
233-
namespace: ${local.coder_namespace}
234-
spec:
235-
replicas: ${var.provisionerd_replicas}
236-
selector:
237-
matchLabels:
238-
app.kubernetes.io/instance: ${var.name}
239-
app.kubernetes.io/name: provisionerd
240-
strategy:
241-
rollingUpdate:
242-
maxSurge: 25%
243-
maxUnavailable: 25%
244-
type: RollingUpdate
245-
template:
246-
metadata:
247-
creationTimestamp: null
248-
labels:
249-
app.kubernetes.io/instance: ${var.name}
250-
app.kubernetes.io/name: provisionerd
251-
spec:
252-
affinity:
253-
nodeAffinity:
254-
requiredDuringSchedulingIgnoredDuringExecution:
255-
nodeSelectorTerms:
256-
- matchExpressions:
257-
- key: cloud.google.com/gke-nodepool
258-
operator: In
259-
values:
260-
- ${var.kubernetes_nodepool_coder}
261-
podAntiAffinity:
262-
preferredDuringSchedulingIgnoredDuringExecution:
263-
- podAffinityTerm:
264-
labelSelector:
265-
matchExpressions:
266-
- key: app.kubernetes.io/instance
267-
operator: In
268-
values:
269-
- ${var.name}
270-
topologyKey: kubernetes.io/hostname
271-
weight: 1
272-
containers:
273-
- args:
274-
- server
275-
command:
276-
- /opt/coder
277-
env:
278-
- name: CODER_HTTP_ADDRESS
279-
value: 0.0.0.0:8080
280-
- name: CODER_PROMETHEUS_ADDRESS
281-
value: 0.0.0.0:2112
282-
- name: CODER_ACCESS_URL
283-
value: ${local.coder_access_url}
284-
- name: CODER_CACHE_DIRECTORY
285-
value: /tmp/coder
286-
- name: CODER_ENABLE_TELEMETRY
287-
value: "false"
288-
- name: CODER_LOGGING_HUMAN
289-
value: /dev/null
290-
- name: CODER_LOGGING_STACKDRIVER
291-
value: /dev/stderr
292-
- name: CODER_PG_CONNECTION_URL
293-
valueFrom:
294-
secretKeyRef:
295-
key: url
296-
name: coder-db-url
297-
- name: CODER_PPROF_ENABLE
298-
value: "true"
299-
- name: CODER_PROMETHEUS_ENABLE
300-
value: "true"
301-
- name: CODER_PROMETHEUS_COLLECT_AGENT_STATS
302-
value: "true"
303-
- name: CODER_PROMETHEUS_COLLECT_DB_METRICS
304-
value: "true"
305-
- name: CODER_VERBOSE
306-
value: "true"
307-
- name: CODER_PROVISIONER_DAEMONS
308-
value: "${var.provisionerd_concurrency}"
309-
image: "${var.coder_image_repo}:${var.coder_image_tag}"
310-
imagePullPolicy: IfNotPresent
311-
lifecycle: {}
312-
livenessProbe:
313-
failureThreshold: 3
314-
httpGet:
315-
path: /api/v2/buildinfo
316-
port: http
317-
scheme: HTTP
318-
periodSeconds: 10
319-
successThreshold: 1
320-
timeoutSeconds: 1
321-
name: provisionerd
322-
ports:
323-
- containerPort: 8080
324-
name: http
325-
protocol: TCP
326-
- containerPort: 2112
327-
name: prometheus-http
328-
protocol: TCP
329-
readinessProbe:
330-
failureThreshold: 3
331-
httpGet:
332-
path: /api/v2/buildinfo
333-
port: http
334-
scheme: HTTP
335-
periodSeconds: 10
336-
successThreshold: 1
337-
timeoutSeconds: 1
338-
resources:
339-
limits:
340-
cpu: "${var.provisionerd_cpu_limit}"
341-
memory: "${var.provisionerd_mem_limit}"
342-
requests:
343-
cpu: "${var.provisionerd_cpu_request}"
344-
memory: "${var.provisionerd_mem_request}"
345-
securityContext:
346-
allowPrivilegeEscalation: false
347-
readOnlyRootFilesystem: true
348-
runAsGroup: 1000
349-
runAsNonRoot: true
350-
runAsUser: 1000
351-
seccompProfile:
352-
type: RuntimeDefault
353-
terminationMessagePath: /dev/termination-log
354-
terminationMessagePolicy: File
355-
volumeMounts:
356-
- mountPath: /tmp
357-
name: cache
358-
dnsPolicy: ClusterFirst
359-
restartPolicy: Always
360-
serviceAccount: coder
361-
serviceAccountName: coder
362-
terminationGracePeriodSeconds: 60
363-
volumes:
364-
- emptyDir:
365-
sizeLimit: 10Gi
366-
name: cache
367-
EOF
368-
}
369-
370-
resource "null_resource" "provisionerd_deployment_apply" {
371-
depends_on = [helm_release.coder-chart, local_file.provisionerd_deployment]
372-
triggers = {
373-
kubeconfig_path = var.kubernetes_kubeconfig_path
374-
manifest_path = local_file.provisionerd_deployment.filename
375-
}
376-
provisioner "local-exec" {
377-
command = <<EOF
378-
KUBECONFIG=${self.triggers.kubeconfig_path} kubectl apply -f ${self.triggers.manifest_path}
379-
EOF
380-
}
381-
}
382-
383332
resource "local_file" "output_vars" {
384333
filename = "${path.module}/../../.coderv2/url"
385334
content = local.coder_access_url

0 commit comments

Comments
 (0)