Skip to content

Commit 0598aec

Browse files
bpmctjohnstcn
andauthored
chore: add cherry-picks for Coder v2.17.1 (#15454)
Co-authored-by: Cian Johnston <cian@coder.com>
1 parent 5a6d23a commit 0598aec

14 files changed

+230
-76
lines changed

coderd/provisionerdserver/acquirer_test.go

+7-3
Original file line numberDiff line numberDiff line change
@@ -523,8 +523,8 @@ func TestAcquirer_MatchTags(t *testing.T) {
523523
// Generate a table that can be copy-pasted into docs/admin/provisioners.md
524524
lines := []string{
525525
"\n",
526-
"| Provisioner Tags | Job Tags | Can Run Job? |",
527-
"|------------------|----------|--------------|",
526+
"| Provisioner Tags | Job Tags | Same Org | Can Run Job? |",
527+
"|------------------|----------|----------|--------------|",
528528
}
529529
// turn the JSON map into k=v for readability
530530
kvs := func(m map[string]string) string {
@@ -539,10 +539,14 @@ func TestAcquirer_MatchTags(t *testing.T) {
539539
}
540540
for _, tt := range testCases {
541541
acquire := "✅"
542+
sameOrg := "✅"
542543
if !tt.expectAcquire {
543544
acquire = "❌"
544545
}
545-
s := fmt.Sprintf("| %s | %s | %s |", kvs(tt.acquireJobTags), kvs(tt.provisionerJobTags), acquire)
546+
if tt.unmatchedOrg {
547+
sameOrg = "❌"
548+
}
549+
s := fmt.Sprintf("| %s | %s | %s | %s |", kvs(tt.acquireJobTags), kvs(tt.provisionerJobTags), sameOrg, acquire)
546550
lines = append(lines, s)
547551
}
548552
t.Logf("You can paste this into docs/admin/provisioners.md")

docs/admin/provisioners.md

+37-49
Original file line numberDiff line numberDiff line change
@@ -178,15 +178,17 @@ A provisioner can run a given build job if one of the below is true:
178178
1. If a job has any explicit tags, it can only run on a provisioner with those
179179
explicit tags (the provisioner could have additional tags).
180180

181-
The external provisioner in the above example can run build jobs with tags:
181+
The external provisioner in the above example can run build jobs in the same
182+
organization with tags:
182183

183184
- `environment=on_prem`
184185
- `datacenter=chicago`
185186
- `environment=on_prem datacenter=chicago`
186187

187188
However, it will not pick up any build jobs that do not have either of the
188189
`environment` or `datacenter` tags set. It will also not pick up any build jobs
189-
from templates with the tag `scope=user` set.
190+
from templates with the tag `scope=user` set, or build jobs from templates in
191+
different organizations.
190192

191193
> [!NOTE] If you only run tagged provisioners, you will need to specify a set of
192194
> tags that matches at least one provisioner for _all_ template import jobs and
@@ -198,34 +200,35 @@ from templates with the tag `scope=user` set.
198200

199201
This is illustrated in the below table:
200202

201-
| Provisioner Tags | Job Tags | Can Run Job? |
202-
| ----------------------------------------------------------------- | ---------------------------------------------------------------- | ------------ |
203-
| scope=organization owner= | scope=organization owner= ||
204-
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem ||
205-
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem ||
206-
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem datacenter=chicago ||
207-
| scope=user owner=aaa | scope=user owner=aaa ||
208-
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa ||
209-
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem ||
210-
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem ||
211-
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=chicago ||
212-
| scope=organization owner= | scope=organization owner= environment=on-prem ||
213-
| scope=organization owner= environment=on-prem | scope=organization owner= ||
214-
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem datacenter=chicago ||
215-
| scope=organization owner= environment=on-prem datacenter=new_york | scope=organization owner= environment=on-prem datacenter=chicago ||
216-
| scope=user owner=aaa | scope=organization owner= ||
217-
| scope=user owner=aaa | scope=user owner=bbb ||
218-
| scope=organization owner= | scope=user owner=aaa ||
219-
| scope=organization owner= | scope=user owner=aaa environment=on-prem ||
220-
| scope=user owner=aaa | scope=user owner=aaa environment=on-prem ||
221-
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem datacenter=chicago ||
222-
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=new_york ||
203+
| Provisioner Tags | Job Tags | Same Org | Can Run Job? |
204+
| ----------------------------------------------------------------- | ---------------------------------------------------------------- | -------- | ------------ |
205+
| scope=organization owner= | scope=organization owner= |||
206+
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem |||
207+
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem |||
208+
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem datacenter=chicago |||
209+
| scope=user owner=aaa | scope=user owner=aaa |||
210+
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa |||
211+
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem |||
212+
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem |||
213+
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=chicago |||
214+
| scope=organization owner= | scope=organization owner= environment=on-prem |||
215+
| scope=organization owner= environment=on-prem | scope=organization owner= |||
216+
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem datacenter=chicago |||
217+
| scope=organization owner= environment=on-prem datacenter=new_york | scope=organization owner= environment=on-prem datacenter=chicago |||
218+
| scope=user owner=aaa | scope=organization owner= |||
219+
| scope=user owner=aaa | scope=user owner=bbb |||
220+
| scope=organization owner= | scope=user owner=aaa |||
221+
| scope=organization owner= | scope=user owner=aaa environment=on-prem |||
222+
| scope=user owner=aaa | scope=user owner=aaa environment=on-prem |||
223+
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem datacenter=chicago |||
224+
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=new_york |||
225+
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem |||
223226

224227
> **Note to maintainers:** to generate this table, run the following command and
225228
> copy the output:
226229
>
227230
> ```
228-
> go test -v -count=1 ./coderd/provisionerserver/ -test.run='^TestAcquirer_MatchTags/GenTable$'
231+
> go test -v -count=1 ./coderd/provisionerdserver/ -test.run='^TestAcquirer_MatchTags/GenTable$'
229232
> ```
230233

231234
## Types of provisioners
@@ -288,8 +291,7 @@ will use in concert with the Helm chart for deploying the Coder server.
288291
```sh
289292
coder provisioner keys create my-cool-key --org default
290293
# Optionally, you can specify tags for the provisioner key:
291-
# coder provisioner keys create my-cool-key --org default --tags location=auh kind=k8s
292-
```
294+
# coder provisioner keys create my-cool-key --org default --tag location=auh --tag kind=k8s
293295
294296
Successfully created provisioner key kubernetes-key! Save this authentication
295297
token, it will not be shown again.
@@ -300,25 +302,7 @@ will use in concert with the Helm chart for deploying the Coder server.
300302
1. Store the key in a kubernetes secret:
301303

302304
```sh
303-
kubectl create secret generic coder-provisioner-psk --from-literal=key1=`<key omitted>`
304-
```
305-
306-
1. Modify your Coder `values.yaml` to include
307-
308-
```yaml
309-
provisionerDaemon:
310-
keySecretName: "coder-provisioner-keys"
311-
keySecretKey: "key1"
312-
```
313-
314-
1. Redeploy Coder with the new `values.yaml` to roll out the PSK. You can omit
315-
`--version <your version>` to also upgrade Coder to the latest version.
316-
317-
```sh
318-
helm upgrade coder coder-v2/coder \
319-
--namespace coder \
320-
--version <your version> \
321-
--values values.yaml
305+
kubectl create secret generic coder-provisioner-psk --from-literal=my-cool-key=`<key omitted>`
322306
```
323307

324308
1. Create a `provisioner-values.yaml` file for the provisioner daemons Helm
@@ -331,13 +315,17 @@ will use in concert with the Helm chart for deploying the Coder server.
331315
value: "https://coder.example.com"
332316
replicaCount: 10
333317
provisionerDaemon:
318+
# NOTE: in older versions of the Helm chart (2.17.0 and below), it is required to set this to an empty string.
319+
pskSecretName: ""
334320
keySecretName: "coder-provisioner-keys"
335-
keySecretKey: "key1"
321+
keySecretKey: "my-cool-key"
336322
```
337323

338324
This example creates a deployment of 10 provisioner daemons (for 10
339-
concurrent builds) with the listed tags. For generic provisioners, remove the
340-
tags.
325+
concurrent builds) authenticating using the above key. The daemons will
326+
authenticate using the provisioner key created in the previous step and
327+
acquire jobs matching the tags specified when the provisioner key was
328+
created. The set of tags is inferred automatically from the provisioner key.
341329

342330
> Refer to the
343331
> [values.yaml](https://github.com/coder/coder/blob/main/helm/provisioner/values.yaml)

helm/provisioner/templates/_coder.tpl

+12-11
Original file line numberDiff line numberDiff line change
@@ -34,22 +34,23 @@ env:
3434
value: "0.0.0.0:2112"
3535
{{- if and (empty .Values.provisionerDaemon.pskSecretName) (empty .Values.provisionerDaemon.keySecretName) }}
3636
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified." }}
37-
{{- else if and (.Values.provisionerDaemon.pskSecretName) (.Values.provisionerDaemon.keySecretName) }}
38-
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both." }}
39-
{{- end }}
40-
{{- if .Values.provisionerDaemon.pskSecretName }}
41-
- name: CODER_PROVISIONER_DAEMON_PSK
42-
valueFrom:
43-
secretKeyRef:
44-
name: {{ .Values.provisionerDaemon.pskSecretName | quote }}
45-
key: psk
46-
{{- end }}
47-
{{- if and .Values.provisionerDaemon.keySecretName .Values.provisionerDaemon.keySecretKey }}
37+
{{- else if and .Values.provisionerDaemon.keySecretName .Values.provisionerDaemon.keySecretKey }}
38+
{{- if and (not (empty .Values.provisionerDaemon.pskSecretName)) (ne .Values.provisionerDaemon.pskSecretName "coder-provisioner-psk") }}
39+
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both." }}
40+
{{- else if .Values.provisionerDaemon.tags }}
41+
{{ fail "provisionerDaemon.tags may not be specified with provisionerDaemon.keySecretName." }}
42+
{{- end }}
4843
- name: CODER_PROVISIONER_DAEMON_KEY
4944
valueFrom:
5045
secretKeyRef:
5146
name: {{ .Values.provisionerDaemon.keySecretName | quote }}
5247
key: {{ .Values.provisionerDaemon.keySecretKey | quote }}
48+
{{- else }}
49+
- name: CODER_PROVISIONER_DAEMON_PSK
50+
valueFrom:
51+
secretKeyRef:
52+
name: {{ .Values.provisionerDaemon.pskSecretName | quote }}
53+
key: psk
5354
{{- end }}
5455
{{- if include "provisioner.tags" . }}
5556
- name: CODER_PROVISIONERD_TAGS

helm/provisioner/tests/chart_test.go

+10
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,12 @@ var testCases = []testCase{
5656
name: "provisionerd_key",
5757
expectedError: "",
5858
},
59+
// Test explicitly for the workaround where setting provisionerDaemon.pskSecretName=""
60+
// was required to use provisioner keys.
61+
{
62+
name: "provisionerd_key_psk_empty_workaround",
63+
expectedError: "",
64+
},
5965
{
6066
name: "provisionerd_psk_and_key",
6167
expectedError: `Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both.`,
@@ -64,6 +70,10 @@ var testCases = []testCase{
6470
name: "provisionerd_no_psk_or_key",
6571
expectedError: `Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified.`,
6672
},
73+
{
74+
name: "provisionerd_key_tags",
75+
expectedError: `provisionerDaemon.tags may not be specified with provisionerDaemon.keySecretName.`,
76+
},
6777
{
6878
name: "extra_templates",
6979
expectedError: "",

helm/provisioner/tests/testdata/provisionerd_key.golden

-2
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,6 @@ spec:
112112
secretKeyRef:
113113
key: provisionerd-key
114114
name: coder-provisionerd-key
115-
- name: CODER_PROVISIONERD_TAGS
116-
value: clusterType=k8s,location=auh
117115
- name: CODER_URL
118116
value: http://coder.default.svc.cluster.local
119117
image: ghcr.io/coder/coder:latest

helm/provisioner/tests/testdata/provisionerd_key.yaml

-4
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,5 @@ coder:
22
image:
33
tag: latest
44
provisionerDaemon:
5-
pskSecretName: ""
65
keySecretName: "coder-provisionerd-key"
76
keySecretKey: "provisionerd-key"
8-
tags:
9-
location: auh
10-
clusterType: k8s
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
---
2+
# Source: coder-provisioner/templates/coder.yaml
3+
apiVersion: v1
4+
kind: ServiceAccount
5+
metadata:
6+
annotations: {}
7+
labels:
8+
app.kubernetes.io/instance: release-name
9+
app.kubernetes.io/managed-by: Helm
10+
app.kubernetes.io/name: coder-provisioner
11+
app.kubernetes.io/part-of: coder-provisioner
12+
app.kubernetes.io/version: 0.1.0
13+
helm.sh/chart: coder-provisioner-0.1.0
14+
name: coder-provisioner
15+
---
16+
# Source: coder-provisioner/templates/rbac.yaml
17+
apiVersion: rbac.authorization.k8s.io/v1
18+
kind: Role
19+
metadata:
20+
name: coder-provisioner-workspace-perms
21+
rules:
22+
- apiGroups: [""]
23+
resources: ["pods"]
24+
verbs:
25+
- create
26+
- delete
27+
- deletecollection
28+
- get
29+
- list
30+
- patch
31+
- update
32+
- watch
33+
- apiGroups: [""]
34+
resources: ["persistentvolumeclaims"]
35+
verbs:
36+
- create
37+
- delete
38+
- deletecollection
39+
- get
40+
- list
41+
- patch
42+
- update
43+
- watch
44+
- apiGroups:
45+
- apps
46+
resources:
47+
- deployments
48+
verbs:
49+
- create
50+
- delete
51+
- deletecollection
52+
- get
53+
- list
54+
- patch
55+
- update
56+
- watch
57+
---
58+
# Source: coder-provisioner/templates/rbac.yaml
59+
apiVersion: rbac.authorization.k8s.io/v1
60+
kind: RoleBinding
61+
metadata:
62+
name: "coder-provisioner"
63+
subjects:
64+
- kind: ServiceAccount
65+
name: "coder-provisioner"
66+
roleRef:
67+
apiGroup: rbac.authorization.k8s.io
68+
kind: Role
69+
name: coder-provisioner-workspace-perms
70+
---
71+
# Source: coder-provisioner/templates/coder.yaml
72+
apiVersion: apps/v1
73+
kind: Deployment
74+
metadata:
75+
annotations: {}
76+
labels:
77+
app.kubernetes.io/instance: release-name
78+
app.kubernetes.io/managed-by: Helm
79+
app.kubernetes.io/name: coder-provisioner
80+
app.kubernetes.io/part-of: coder-provisioner
81+
app.kubernetes.io/version: 0.1.0
82+
helm.sh/chart: coder-provisioner-0.1.0
83+
name: coder-provisioner
84+
spec:
85+
replicas: 1
86+
selector:
87+
matchLabels:
88+
app.kubernetes.io/instance: release-name
89+
app.kubernetes.io/name: coder-provisioner
90+
template:
91+
metadata:
92+
annotations: {}
93+
labels:
94+
app.kubernetes.io/instance: release-name
95+
app.kubernetes.io/managed-by: Helm
96+
app.kubernetes.io/name: coder-provisioner
97+
app.kubernetes.io/part-of: coder-provisioner
98+
app.kubernetes.io/version: 0.1.0
99+
helm.sh/chart: coder-provisioner-0.1.0
100+
spec:
101+
containers:
102+
- args:
103+
- provisionerd
104+
- start
105+
command:
106+
- /opt/coder
107+
env:
108+
- name: CODER_PROMETHEUS_ADDRESS
109+
value: 0.0.0.0:2112
110+
- name: CODER_PROVISIONER_DAEMON_KEY
111+
valueFrom:
112+
secretKeyRef:
113+
key: provisionerd-key
114+
name: coder-provisionerd-key
115+
- name: CODER_URL
116+
value: http://coder.default.svc.cluster.local
117+
image: ghcr.io/coder/coder:latest
118+
imagePullPolicy: IfNotPresent
119+
lifecycle: {}
120+
name: coder
121+
ports: null
122+
resources: {}
123+
securityContext:
124+
allowPrivilegeEscalation: false
125+
readOnlyRootFilesystem: null
126+
runAsGroup: 1000
127+
runAsNonRoot: true
128+
runAsUser: 1000
129+
seccompProfile:
130+
type: RuntimeDefault
131+
volumeMounts: []
132+
restartPolicy: Always
133+
serviceAccountName: coder-provisioner
134+
terminationGracePeriodSeconds: 600
135+
volumes: []
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
coder:
2+
image:
3+
tag: latest
4+
provisionerDaemon:
5+
pskSecretName: ""
6+
keySecretName: "coder-provisionerd-key"
7+
keySecretKey: "provisionerd-key"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
coder:
2+
image:
3+
tag: latest
4+
provisionerDaemon:
5+
keySecretName: "coder-provisionerd-key"
6+
keySecretKey: "provisionerd-key"
7+
tags:
8+
location: auh
9+
clusterType: k8s

0 commit comments

Comments
 (0)