Skip to content

Commit e6215c0

Browse files
committed
Merge remote-tracking branch 'origin/main' into experiment-visual-2/kira-pilot
2 parents c120794 + 8efa123 commit e6215c0

File tree

29 files changed

+562
-53
lines changed

29 files changed

+562
-53
lines changed

cli/exp_scaletest.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1241,7 +1241,7 @@ func (r *runnableTraceWrapper) Run(ctx context.Context, id string, logs io.Write
12411241
return r.runner.Run(ctx2, id, logs)
12421242
}
12431243

1244-
func (r *runnableTraceWrapper) Cleanup(ctx context.Context, id string) error {
1244+
func (r *runnableTraceWrapper) Cleanup(ctx context.Context, id string, logs io.Writer) error {
12451245
c, ok := r.runner.(harness.Cleanable)
12461246
if !ok {
12471247
return nil
@@ -1253,7 +1253,7 @@ func (r *runnableTraceWrapper) Cleanup(ctx context.Context, id string) error {
12531253
ctx, span := r.tracer.Start(ctx, r.spanName+" cleanup")
12541254
defer span.End()
12551255

1256-
return c.Cleanup(ctx, id)
1256+
return c.Cleanup(ctx, id, logs)
12571257
}
12581258

12591259
// newScaleTestUser returns a random username and email address that can be used

coderd/coderd.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1108,6 +1108,7 @@ func (api *API) CreateInMemoryProvisionerDaemon(ctx context.Context) (client pro
11081108
logger := api.Logger.Named(fmt.Sprintf("inmem-provisionerd-%s", name))
11091109
logger.Info(ctx, "starting in-memory provisioner daemon")
11101110
srv, err := provisionerdserver.NewServer(
1111+
api.ctx,
11111112
api.AccessURL,
11121113
uuid.New(),
11131114
logger,

coderd/provisionerdserver/provisionerdserver.go

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,10 @@ type Options struct {
5858
}
5959

6060
type server struct {
61+
// lifecycleCtx must be tied to the API server's lifecycle
62+
// as when the API server shuts down, we want to cancel any
63+
// long-running operations.
64+
lifecycleCtx context.Context
6165
AccessURL *url.URL
6266
ID uuid.UUID
6367
Logger slog.Logger
@@ -107,6 +111,7 @@ func (t Tags) Valid() error {
107111
}
108112

109113
func NewServer(
114+
lifecycleCtx context.Context,
110115
accessURL *url.URL,
111116
id uuid.UUID,
112117
logger slog.Logger,
@@ -124,7 +129,10 @@ func NewServer(
124129
deploymentValues *codersdk.DeploymentValues,
125130
options Options,
126131
) (proto.DRPCProvisionerDaemonServer, error) {
127-
// Panic early if pointers are nil
132+
// Fail-fast if pointers are nil
133+
if lifecycleCtx == nil {
134+
return nil, xerrors.New("ctx is nil")
135+
}
128136
if quotaCommitter == nil {
129137
return nil, xerrors.New("quotaCommitter is nil")
130138
}
@@ -153,6 +161,7 @@ func NewServer(
153161
options.AcquireJobLongPollDur = DefaultAcquireJobLongPollDur
154162
}
155163
return &server{
164+
lifecycleCtx: lifecycleCtx,
156165
AccessURL: accessURL,
157166
ID: id,
158167
Logger: logger,
@@ -1184,16 +1193,21 @@ func (s *server) CompleteJob(ctx context.Context, completed *proto.CompletedJob)
11841193
}
11851194
go func() {
11861195
for _, wait := range updates {
1187-
// Wait for the next potential timeout to occur. Note that we
1188-
// can't listen on the context here because we will hang around
1189-
// after this function has returned. The s also doesn't
1190-
// have a shutdown signal we can listen to.
1191-
<-wait
1192-
if err := s.Pubsub.Publish(codersdk.WorkspaceNotifyChannel(workspaceBuild.WorkspaceID), []byte{}); err != nil {
1193-
s.Logger.Error(ctx, "workspace notification after agent timeout failed",
1196+
select {
1197+
case <-s.lifecycleCtx.Done():
1198+
// If the server is shutting down, we don't want to wait around.
1199+
s.Logger.Debug(ctx, "stopping notifications due to server shutdown",
11941200
slog.F("workspace_build_id", workspaceBuild.ID),
1195-
slog.Error(err),
11961201
)
1202+
return
1203+
case <-wait:
1204+
// Wait for the next potential timeout to occur.
1205+
if err := s.Pubsub.Publish(codersdk.WorkspaceNotifyChannel(workspaceBuild.WorkspaceID), []byte{}); err != nil {
1206+
s.Logger.Error(ctx, "workspace notification after agent timeout failed",
1207+
slog.F("workspace_build_id", workspaceBuild.ID),
1208+
slog.Error(err),
1209+
)
1210+
}
11971211
}
11981212
}
11991213
}()

coderd/provisionerdserver/provisionerdserver_test.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1733,6 +1733,7 @@ func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisi
17331733
}
17341734

17351735
srv, err := provisionerdserver.NewServer(
1736+
ctx,
17361737
&url.URL{},
17371738
srvID,
17381739
slogtest.Make(t, &slogtest.Options{IgnoreErrors: ignoreLogErrors}),

enterprise/coderd/provisionerdaemons.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,7 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
243243
logger := api.Logger.Named(fmt.Sprintf("ext-provisionerd-%s", name))
244244
logger.Info(ctx, "starting external provisioner daemon")
245245
srv, err := provisionerdserver.NewServer(
246+
api.ctx,
246247
api.AccessURL,
247248
uuid.New(),
248249
logger,

helm/coder/tests/chart_test.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,10 @@ var testCases = []testCase{
7676
name: "env_from",
7777
expectedError: "",
7878
},
79+
{
80+
name: "extra_templates",
81+
expectedError: "",
82+
},
7983
}
8084

8185
type testCase struct {
Lines changed: 199 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,199 @@
1+
---
2+
# Source: coder/templates/coder.yaml
3+
apiVersion: v1
4+
kind: ServiceAccount
5+
metadata:
6+
annotations: {}
7+
labels:
8+
app.kubernetes.io/instance: release-name
9+
app.kubernetes.io/managed-by: Helm
10+
app.kubernetes.io/name: coder
11+
app.kubernetes.io/part-of: coder
12+
app.kubernetes.io/version: 0.1.0
13+
helm.sh/chart: coder-0.1.0
14+
name: coder
15+
---
16+
# Source: coder/templates/extra-templates.yaml
17+
apiVersion: v1
18+
kind: ConfigMap
19+
metadata:
20+
name: some-config
21+
namespace: default
22+
data:
23+
key: some-value
24+
---
25+
# Source: coder/templates/rbac.yaml
26+
apiVersion: rbac.authorization.k8s.io/v1
27+
kind: Role
28+
metadata:
29+
name: coder-workspace-perms
30+
rules:
31+
- apiGroups: [""]
32+
resources: ["pods"]
33+
verbs:
34+
- create
35+
- delete
36+
- deletecollection
37+
- get
38+
- list
39+
- patch
40+
- update
41+
- watch
42+
- apiGroups: [""]
43+
resources: ["persistentvolumeclaims"]
44+
verbs:
45+
- create
46+
- delete
47+
- deletecollection
48+
- get
49+
- list
50+
- patch
51+
- update
52+
- watch
53+
- apiGroups:
54+
- apps
55+
resources:
56+
- deployments
57+
verbs:
58+
- create
59+
- delete
60+
- deletecollection
61+
- get
62+
- list
63+
- patch
64+
- update
65+
- watch
66+
---
67+
# Source: coder/templates/rbac.yaml
68+
apiVersion: rbac.authorization.k8s.io/v1
69+
kind: RoleBinding
70+
metadata:
71+
name: "coder"
72+
subjects:
73+
- kind: ServiceAccount
74+
name: "coder"
75+
roleRef:
76+
apiGroup: rbac.authorization.k8s.io
77+
kind: Role
78+
name: coder-workspace-perms
79+
---
80+
# Source: coder/templates/service.yaml
81+
apiVersion: v1
82+
kind: Service
83+
metadata:
84+
name: coder
85+
labels:
86+
helm.sh/chart: coder-0.1.0
87+
app.kubernetes.io/name: coder
88+
app.kubernetes.io/instance: release-name
89+
app.kubernetes.io/part-of: coder
90+
app.kubernetes.io/version: "0.1.0"
91+
app.kubernetes.io/managed-by: Helm
92+
annotations:
93+
{}
94+
spec:
95+
type: LoadBalancer
96+
sessionAffinity: None
97+
ports:
98+
- name: "http"
99+
port: 80
100+
targetPort: "http"
101+
protocol: TCP
102+
103+
externalTrafficPolicy: "Cluster"
104+
selector:
105+
app.kubernetes.io/name: coder
106+
app.kubernetes.io/instance: release-name
107+
---
108+
# Source: coder/templates/coder.yaml
109+
apiVersion: apps/v1
110+
kind: Deployment
111+
metadata:
112+
annotations: {}
113+
labels:
114+
app.kubernetes.io/instance: release-name
115+
app.kubernetes.io/managed-by: Helm
116+
app.kubernetes.io/name: coder
117+
app.kubernetes.io/part-of: coder
118+
app.kubernetes.io/version: 0.1.0
119+
helm.sh/chart: coder-0.1.0
120+
name: coder
121+
spec:
122+
replicas: 1
123+
selector:
124+
matchLabels:
125+
app.kubernetes.io/instance: release-name
126+
app.kubernetes.io/name: coder
127+
template:
128+
metadata:
129+
annotations: {}
130+
labels:
131+
app.kubernetes.io/instance: release-name
132+
app.kubernetes.io/managed-by: Helm
133+
app.kubernetes.io/name: coder
134+
app.kubernetes.io/part-of: coder
135+
app.kubernetes.io/version: 0.1.0
136+
helm.sh/chart: coder-0.1.0
137+
spec:
138+
affinity:
139+
podAntiAffinity:
140+
preferredDuringSchedulingIgnoredDuringExecution:
141+
- podAffinityTerm:
142+
labelSelector:
143+
matchExpressions:
144+
- key: app.kubernetes.io/instance
145+
operator: In
146+
values:
147+
- coder
148+
topologyKey: kubernetes.io/hostname
149+
weight: 1
150+
containers:
151+
- args:
152+
- server
153+
command:
154+
- /opt/coder
155+
env:
156+
- name: CODER_HTTP_ADDRESS
157+
value: 0.0.0.0:8080
158+
- name: CODER_PROMETHEUS_ADDRESS
159+
value: 0.0.0.0:2112
160+
- name: CODER_ACCESS_URL
161+
value: http://coder.default.svc.cluster.local
162+
- name: KUBE_POD_IP
163+
valueFrom:
164+
fieldRef:
165+
fieldPath: status.podIP
166+
- name: CODER_DERP_SERVER_RELAY_URL
167+
value: http://$(KUBE_POD_IP):8080
168+
image: ghcr.io/coder/coder:latest
169+
imagePullPolicy: IfNotPresent
170+
lifecycle: {}
171+
livenessProbe:
172+
httpGet:
173+
path: /healthz
174+
port: http
175+
scheme: HTTP
176+
name: coder
177+
ports:
178+
- containerPort: 8080
179+
name: http
180+
protocol: TCP
181+
readinessProbe:
182+
httpGet:
183+
path: /healthz
184+
port: http
185+
scheme: HTTP
186+
resources: {}
187+
securityContext:
188+
allowPrivilegeEscalation: false
189+
readOnlyRootFilesystem: null
190+
runAsGroup: 1000
191+
runAsNonRoot: true
192+
runAsUser: 1000
193+
seccompProfile:
194+
type: RuntimeDefault
195+
volumeMounts: []
196+
restartPolicy: Always
197+
serviceAccountName: coder
198+
terminationGracePeriodSeconds: 60
199+
volumes: []
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
coder:
2+
image:
3+
tag: latest
4+
extraTemplates:
5+
- |
6+
apiVersion: v1
7+
kind: ConfigMap
8+
metadata:
9+
name: some-config
10+
namespace: {{ .Release.Namespace }}
11+
data:
12+
key: some-value
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
{{- range .Values.extraTemplates }}
2+
---
3+
{{ include "coder.renderTemplate" (dict "value" . "context" $) }}
4+
{{- end }}

helm/provisioner/tests/chart_test.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,10 @@ var testCases = []testCase{
5252
name: "provisionerd_psk",
5353
expectedError: "",
5454
},
55+
{
56+
name: "extra_templates",
57+
expectedError: "",
58+
},
5559
}
5660

5761
type testCase struct {

0 commit comments

Comments
 (0)