@@ -223,6 +223,12 @@ func (a *agent) collectMetadata(ctx context.Context, md codersdk.WorkspaceAgentM
223
223
var out bytes.Buffer
224
224
225
225
result := & codersdk.WorkspaceAgentMetadataResult {
226
+ // CollectedAt is set here for testing purposes and overrided by
227
+ // the server to the time the server received the result to protect
228
+ // against clock skew.
229
+ //
230
+ // In the future, the server may accept the timestamp from the agent
231
+ // if it is certain the clocks are in sync.
226
232
CollectedAt : time .Now (),
227
233
}
228
234
cmd , err := a .createCommand (ctx , md .Script , nil )
@@ -237,7 +243,7 @@ func (a *agent) collectMetadata(ctx context.Context, md codersdk.WorkspaceAgentM
237
243
// The error isn't mutually exclusive with useful output.
238
244
err = cmd .Run ()
239
245
240
- const bufLimit = 10 << 14
246
+ const bufLimit = 10 << 10
241
247
if out .Len () > bufLimit {
242
248
err = errors .Join (
243
249
err ,
@@ -293,7 +299,7 @@ func (a *agent) reportMetadataLoop(ctx context.Context) {
293
299
// If we're backpressured on sending back results, we risk
294
300
// runaway goroutine growth and/or overloading coderd. So,
295
301
// we just skip the collection. Since we never update
296
- // "lastCollectedAt" for this key , we'll retry the collection
302
+ // the collections map , we'll retry the collection
297
303
// on the next tick.
298
304
a .logger .Debug (
299
305
ctx , "metadata collection backpressured" ,
@@ -317,8 +323,9 @@ func (a *agent) reportMetadataLoop(ctx context.Context) {
317
323
}
318
324
}
319
325
320
- // Spawn a goroutine for each metadata collection, and use channels
321
- // to synchronize the results and avoid messy mutex logic.
326
+ // Spawn a goroutine for each metadata collection, and use a
327
+ // channel to synchronize the results and avoid both messy
328
+ // mutex logic and overloading the API.
322
329
for _ , md := range manifest .Metadata {
323
330
collectedAt , ok := lastCollectedAts [md .Key ]
324
331
if ok {
0 commit comments