From bb1a5d0990006c3898de01cd39cce5ade27bb3a7 Mon Sep 17 00:00:00 2001 From: Kyle Carberry Date: Mon, 27 Mar 2023 19:49:22 +0000 Subject: [PATCH] fix: check if logs are completed before publishing --- coderd/provisionerjobs.go | 72 ++++++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/coderd/provisionerjobs.go b/coderd/provisionerjobs.go index 40089b32bd450..e03f5b9ffd28d 100644 --- a/coderd/provisionerjobs.go +++ b/coderd/provisionerjobs.go @@ -54,6 +54,47 @@ func (api *API) provisionerJobLogs(rw http.ResponseWriter, r *http.Request, job } } + if !follow { + logs, err := api.Database.GetProvisionerLogsAfterID(ctx, database.GetProvisionerLogsAfterIDParams{ + JobID: job.ID, + CreatedAfter: after, + }) + if errors.Is(err, sql.ErrNoRows) { + err = nil + } + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error fetching provisioner logs.", + Detail: err.Error(), + }) + return + } + if logs == nil { + logs = []database.ProvisionerJobLog{} + } + + logger.Debug(ctx, "Finished non-follow job logs") + httpapi.Write(ctx, rw, http.StatusOK, convertProvisionerJobLogs(logs)) + return + } + + // if we are following logs, start the subscription before we query the database, so that we don't miss any logs + // between the end of our query and the start of the subscription. We might get duplicates, so we'll keep track + // of processed IDs. + var bufferedLogs <-chan *database.ProvisionerJobLog + if follow { + bl, closeFollow, err := api.followProvisionerJobLogs(actor, job.ID) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error watching provisioner logs.", + Detail: err.Error(), + }) + return + } + defer closeFollow() + bufferedLogs = bl + } + logs, err := api.Database.GetProvisionerLogsAfterID(ctx, database.GetProvisionerLogsAfterIDParams{ JobID: job.ID, CreatedAfter: after, @@ -72,12 +113,6 @@ func (api *API) provisionerJobLogs(rw http.ResponseWriter, r *http.Request, job logs = []database.ProvisionerJobLog{} } - if !follow { - logger.Debug(ctx, "Finished non-follow job logs") - httpapi.Write(ctx, rw, http.StatusOK, convertProvisionerJobLogs(logs)) - return - } - api.WebsocketWaitMutex.Lock() api.WebsocketWaitGroup.Add(1) api.WebsocketWaitMutex.Unlock() @@ -106,28 +141,19 @@ func (api *API) provisionerJobLogs(rw http.ResponseWriter, r *http.Request, job return } } + job, err = api.Database.GetProvisionerJobByID(ctx, job.ID) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error fetching provisioner job.", + Detail: err.Error(), + }) + return + } if job.CompletedAt.Valid { // job was complete before we queried the database for historical logs return } - // if we are following logs, start the subscription before we query the database, so that we don't miss any logs - // between the end of our query and the start of the subscription. We might get duplicates, so we'll keep track - // of processed IDs. - var bufferedLogs <-chan *database.ProvisionerJobLog - if follow { - bl, closeFollow, err := api.followProvisionerJobLogs(actor, job.ID) - if err != nil { - httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ - Message: "Internal error watching provisioner logs.", - Detail: err.Error(), - }) - return - } - defer closeFollow() - bufferedLogs = bl - } - for { select { case <-ctx.Done():