Skip to content

Commit 1a8c658

Browse files
committed
comments
1 parent 0f056e8 commit 1a8c658

File tree

5 files changed

+33
-18
lines changed

5 files changed

+33
-18
lines changed

coderd/provisionerdserver/provisionerdserver.go

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1334,6 +1334,20 @@ UploadFileStream:
13341334
}
13351335

13361336
switch typed := msg.Type.(type) {
1337+
case *proto.UploadFileRequest_DataUpload:
1338+
if file != nil {
1339+
return xerrors.New("unexpected file upload while waiting for file completion")
1340+
}
1341+
1342+
file, err = sdkproto.NewDataBuilder(&sdkproto.DataUpload{
1343+
UploadType: sdkproto.DataUploadType(typed.DataUpload.UploadType),
1344+
DataHash: typed.DataUpload.DataHash,
1345+
FileSize: typed.DataUpload.FileSize,
1346+
Chunks: typed.DataUpload.Chunks,
1347+
})
1348+
if err != nil {
1349+
return xerrors.Errorf("unable to create file upload: %w", err)
1350+
}
13371351
case *proto.UploadFileRequest_ChunkPiece:
13381352
if file == nil {
13391353
return xerrors.New("unexpected chunk piece while waiting for file upload")
@@ -1351,20 +1365,6 @@ UploadFileStream:
13511365
if done {
13521366
break UploadFileStream
13531367
}
1354-
case *proto.UploadFileRequest_DataUpload:
1355-
if file != nil {
1356-
return xerrors.New("unexpected file upload while waiting for file completion")
1357-
}
1358-
1359-
file, err = sdkproto.NewDataBuilder(&sdkproto.DataUpload{
1360-
UploadType: sdkproto.DataUploadType(typed.DataUpload.UploadType),
1361-
DataHash: typed.DataUpload.DataHash,
1362-
FileSize: typed.DataUpload.FileSize,
1363-
Chunks: typed.DataUpload.Chunks,
1364-
})
1365-
if err != nil {
1366-
return xerrors.Errorf("unable to create file upload: %w", err)
1367-
}
13681368
}
13691369
}
13701370

@@ -1405,6 +1405,8 @@ UploadFileStream:
14051405
slog.F("type", file.Type.String()),
14061406
slog.F("hash", hash),
14071407
slog.F("size", len(fileData)),
1408+
// new_insert indicates whether the file was newly inserted or already existed.
1409+
slog.F("new_insert", err == nil),
14081410
)
14091411

14101412
return nil

provisioner/terraform/executor.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,9 @@ func (e *executor) plan(ctx, killCtx context.Context, env, vars []string, logr l
315315
graphTimings.ingest(createGraphTimingsEvent(timingGraphComplete))
316316

317317
var moduleFiles []byte
318-
// Skipping modules archiving is useful if the caller does not need it, eg during a workspace build.
318+
// Skipping modules archiving is useful if the caller does not need it, eg during
319+
// a workspace build. This removes some added costs of sending the modules
320+
// payload back to coderd if coderd is just going to ignore it.
319321
if !req.OmitModuleFiles {
320322
moduleFiles, err = GetModulesArchive(os.DirFS(e.workdir))
321323
if err != nil {

provisioner/terraform/modules.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,11 @@ func GetModulesArchive(root fs.FS) ([]byte, error) {
8787
empty := true
8888
var b bytes.Buffer
8989

90-
// Limit to 20MB for now.
90+
// Limit to 20MB for now. This is the total size of a modules archive.
9191
// TODO: Determine what a reasonable limit is for modules
92+
// If we start hitting this limit, we might want to consider adding
93+
// configurable filters? Files like images could blow up the size of a
94+
// module.
9295
lw := xio.NewLimitWriter(&b, 20<<20)
9396
w := tar.NewWriter(lw)
9497

provisionerd/provisionerd.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -518,9 +518,14 @@ func (p *Server) FailJob(ctx context.Context, in *proto.FailedJob) error {
518518
return err
519519
}
520520

521+
// UploadModuleFiles will insert a file into the database of coderd.
521522
func (p *Server) UploadModuleFiles(ctx context.Context, moduleFiles []byte) error {
522523
// Send the files separately if the message size is too large.
523524
_, err := clientDoWithRetries(ctx, p.client, func(ctx context.Context, client proto.DRPCProvisionerDaemonClient) (*proto.Empty, error) {
525+
// Add some timeout to prevent the stream from hanging indefinitely.
526+
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
527+
defer cancel()
528+
524529
stream, err := client.UploadFile(ctx)
525530
if err != nil {
526531
return nil, xerrors.Errorf("failed to start CompleteJobWithFiles stream: %w", err)

provisionersdk/session.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,8 +166,11 @@ func (s *Session) handleRequests() error {
166166
resp.Type = &proto.Response_Plan{Plan: complete}
167167

168168
if protobuf.Size(resp) > drpcsdk.MaxMessageSize {
169-
// Send the modules over as a stream
170-
s.Logger.Info(s.Context(), "plan response too large, sending modules as stream")
169+
// It is likely the modules that is pushing the message size over the limit.
170+
// Send the modules over a stream of messages instead.
171+
s.Logger.Info(s.Context(), "plan response too large, sending modules as stream",
172+
slog.F("size_bytes", len(complete.ModuleFiles)),
173+
)
171174
dataUp, chunks := proto.BytesToDataUpload(proto.DataUploadType_UPLOAD_TYPE_MODULE_FILES, complete.ModuleFiles)
172175

173176
complete.ModuleFiles = nil // sent over the stream

0 commit comments

Comments
 (0)