Skip to content

[pull] main from coder:main #61

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
May 27, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion agent/agent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2226,7 +2226,7 @@ func TestAgent_DevcontainerRecreate(t *testing.T) {
// devcontainer, we do it in a goroutine so we can process logs
// concurrently.
go func(container codersdk.WorkspaceAgentContainer) {
err := conn.RecreateDevcontainer(ctx, container.ID)
_, err := conn.RecreateDevcontainer(ctx, container.ID)
assert.NoError(t, err, "recreate devcontainer should succeed")
}(container)

Expand Down
26 changes: 22 additions & 4 deletions agent/agentcontainers/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -403,6 +403,7 @@ func (api *API) processUpdatedContainersLocked(ctx context.Context, updated code
// Check if the container is running and update the known devcontainers.
for i := range updated.Containers {
container := &updated.Containers[i] // Grab a reference to the container to allow mutating it.
container.DevcontainerStatus = "" // Reset the status for the container (updated later).
container.DevcontainerDirty = false // Reset dirty state for the container (updated later).

workspaceFolder := container.Labels[DevcontainerLocalFolderLabel]
Expand Down Expand Up @@ -465,16 +466,25 @@ func (api *API) processUpdatedContainersLocked(ctx context.Context, updated code
for _, dc := range api.knownDevcontainers {
switch {
case dc.Status == codersdk.WorkspaceAgentDevcontainerStatusStarting:
if dc.Container != nil {
dc.Container.DevcontainerStatus = dc.Status
dc.Container.DevcontainerDirty = dc.Dirty
}
continue // This state is handled by the recreation routine.

case dc.Status == codersdk.WorkspaceAgentDevcontainerStatusError && (dc.Container == nil || dc.Container.CreatedAt.Before(api.recreateErrorTimes[dc.WorkspaceFolder])):
if dc.Container != nil {
dc.Container.DevcontainerStatus = dc.Status
dc.Container.DevcontainerDirty = dc.Dirty
}
continue // The devcontainer needs to be recreated.

case dc.Container != nil:
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStopped
if dc.Container.Running {
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusRunning
}
dc.Container.DevcontainerStatus = dc.Status

dc.Dirty = false
if lastModified, hasModTime := api.configFileModifiedTimes[dc.ConfigPath]; hasModTime && dc.Container.CreatedAt.Before(lastModified) {
Expand Down Expand Up @@ -608,6 +618,9 @@ func (api *API) handleDevcontainerRecreate(w http.ResponseWriter, r *http.Reques
// Update the status so that we don't try to recreate the
// devcontainer multiple times in parallel.
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStarting
if dc.Container != nil {
dc.Container.DevcontainerStatus = dc.Status
}
api.knownDevcontainers[dc.WorkspaceFolder] = dc
api.recreateWg.Add(1)
go api.recreateDevcontainer(dc, configPath)
Expand Down Expand Up @@ -680,6 +693,9 @@ func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, con
api.mu.Lock()
dc = api.knownDevcontainers[dc.WorkspaceFolder]
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusError
if dc.Container != nil {
dc.Container.DevcontainerStatus = dc.Status
}
api.knownDevcontainers[dc.WorkspaceFolder] = dc
api.recreateErrorTimes[dc.WorkspaceFolder] = api.clock.Now("recreate", "errorTimes")
api.mu.Unlock()
Expand All @@ -695,10 +711,12 @@ func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, con
// allows the update routine to update the devcontainer status, but
// to minimize the time between API consistency, we guess the status
// based on the container state.
if dc.Container != nil && dc.Container.Running {
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusRunning
} else {
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStopped
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStopped
if dc.Container != nil {
if dc.Container.Running {
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusRunning
}
dc.Container.DevcontainerStatus = dc.Status
}
dc.Dirty = false
api.recreateSuccessTimes[dc.WorkspaceFolder] = api.clock.Now("recreate", "successTimes")
Expand Down
48 changes: 37 additions & 11 deletions agent/agentcontainers/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ func TestAPI(t *testing.T) {

// Make sure the ticker function has been registered
// before advancing the clock.
tickerTrap.MustWait(ctx).Release()
tickerTrap.MustWait(ctx).MustRelease(ctx)
tickerTrap.Close()

// Initial request returns the initial data.
Expand Down Expand Up @@ -432,7 +432,7 @@ func TestAPI(t *testing.T) {

// Make sure the ticker function has been registered
// before advancing the clock.
tickerTrap.MustWait(ctx).Release()
tickerTrap.MustWait(ctx).MustRelease(ctx)
tickerTrap.Close()

for i := range tt.wantStatus {
Expand Down Expand Up @@ -477,6 +477,8 @@ func TestAPI(t *testing.T) {
require.NoError(t, err, "unmarshal response failed")
require.Len(t, resp.Devcontainers, 1, "expected one devcontainer in response")
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusStarting, resp.Devcontainers[0].Status, "devcontainer is not starting")
require.NotNil(t, resp.Devcontainers[0].Container, "devcontainer should have container reference")
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusStarting, resp.Devcontainers[0].Container.DevcontainerStatus, "container dc status is not starting")

// Allow the devcontainer CLI to continue the up process.
close(tt.devcontainerCLI.continueUp)
Expand All @@ -486,7 +488,7 @@ func TestAPI(t *testing.T) {
nowRecreateSuccessTrap.Close()
// The timestamp for the error will be stored, which gives
// us a good anchor point to know when to do our request.
nowRecreateErrorTrap.MustWait(ctx).Release()
nowRecreateErrorTrap.MustWait(ctx).MustRelease(ctx)
nowRecreateErrorTrap.Close()

// Advance the clock to run the devcontainer state update routine.
Expand All @@ -503,11 +505,13 @@ func TestAPI(t *testing.T) {
require.NoError(t, err, "unmarshal response failed after error")
require.Len(t, resp.Devcontainers, 1, "expected one devcontainer in response after error")
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusError, resp.Devcontainers[0].Status, "devcontainer is not in an error state after up failure")
require.NotNil(t, resp.Devcontainers[0].Container, "devcontainer should have container reference after up failure")
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusError, resp.Devcontainers[0].Container.DevcontainerStatus, "container dc status is not error after up failure")
return
}

// Ensure the devcontainer ends up in success state.
nowRecreateSuccessTrap.MustWait(ctx).Release()
nowRecreateSuccessTrap.MustWait(ctx).MustRelease(ctx)
nowRecreateSuccessTrap.Close()

// Advance the clock to run the devcontainer state update routine.
Expand All @@ -525,7 +529,9 @@ func TestAPI(t *testing.T) {
err = json.NewDecoder(rec.Body).Decode(&resp)
require.NoError(t, err, "unmarshal response failed after recreation")
require.Len(t, resp.Devcontainers, 1, "expected one devcontainer in response after recreation")
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, resp.Devcontainers[0].Status, "devcontainer is not stopped after recreation")
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, resp.Devcontainers[0].Status, "devcontainer is not running after recreation")
require.NotNil(t, resp.Devcontainers[0].Container, "devcontainer should have container reference after recreation")
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, resp.Devcontainers[0].Container.DevcontainerStatus, "container dc status is not running after recreation")
})
}
})
Expand Down Expand Up @@ -620,6 +626,7 @@ func TestAPI(t *testing.T) {
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, dc.Status)
require.NotNil(t, dc.Container)
assert.Equal(t, "runtime-container-1", dc.Container.ID)
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, dc.Container.DevcontainerStatus)
},
},
{
Expand Down Expand Up @@ -660,12 +667,14 @@ func TestAPI(t *testing.T) {
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusStopped, known2.Status)
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, runtime1.Status)

require.NotNil(t, known1.Container)
assert.Nil(t, known2.Container)
require.NotNil(t, runtime1.Container)

require.NotNil(t, known1.Container)
assert.Equal(t, "known-container-1", known1.Container.ID)
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, known1.Container.DevcontainerStatus)
require.NotNil(t, runtime1.Container)
assert.Equal(t, "runtime-container-1", runtime1.Container.ID)
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, runtime1.Container.DevcontainerStatus)
},
},
{
Expand Down Expand Up @@ -704,10 +713,12 @@ func TestAPI(t *testing.T) {
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusStopped, nonRunning.Status)

require.NotNil(t, running.Container, "running container should have container reference")
require.NotNil(t, nonRunning.Container, "non-running container should have container reference")

assert.Equal(t, "running-container", running.Container.ID)
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, running.Container.DevcontainerStatus)

require.NotNil(t, nonRunning.Container, "non-running container should have container reference")
assert.Equal(t, "non-running-container", nonRunning.Container.ID)
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusStopped, nonRunning.Container.DevcontainerStatus)
},
},
{
Expand Down Expand Up @@ -743,6 +754,7 @@ func TestAPI(t *testing.T) {
assert.NotEmpty(t, dc2.ConfigPath)
require.NotNil(t, dc2.Container)
assert.Equal(t, "known-container-2", dc2.Container.ID)
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, dc2.Container.DevcontainerStatus)
},
},
{
Expand Down Expand Up @@ -811,9 +823,14 @@ func TestAPI(t *testing.T) {

logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)

mClock := quartz.NewMock(t)
mClock.Set(time.Now()).MustWait(testutil.Context(t, testutil.WaitShort))
tickerTrap := mClock.Trap().TickerFunc("updaterLoop")

// Setup router with the handler under test.
r := chi.NewRouter()
apiOptions := []agentcontainers.Option{
agentcontainers.WithClock(mClock),
agentcontainers.WithLister(tt.lister),
agentcontainers.WithWatcher(watcher.NewNoop()),
}
Expand All @@ -838,6 +855,15 @@ func TestAPI(t *testing.T) {

ctx := testutil.Context(t, testutil.WaitShort)

// Make sure the ticker function has been registered
// before advancing the clock.
tickerTrap.MustWait(ctx).MustRelease(ctx)
tickerTrap.Close()

// Advance the clock to run the updater loop.
_, aw := mClock.AdvanceNext()
aw.MustWait(ctx)

req := httptest.NewRequest(http.MethodGet, "/devcontainers", nil).
WithContext(ctx)
rec := httptest.NewRecorder()
Expand Down Expand Up @@ -911,7 +937,7 @@ func TestAPI(t *testing.T) {

// Make sure the ticker function has been registered
// before advancing any use of mClock.Advance.
tickerTrap.MustWait(ctx).Release()
tickerTrap.MustWait(ctx).MustRelease(ctx)
tickerTrap.Close()

// Make sure the start loop has been called.
Expand Down Expand Up @@ -1007,7 +1033,7 @@ func TestAPI(t *testing.T) {

// Make sure the ticker function has been registered
// before advancing any use of mClock.Advance.
tickerTrap.MustWait(ctx).Release()
tickerTrap.MustWait(ctx).MustRelease(ctx)
tickerTrap.Close()

// Call the list endpoint first to ensure config files are
Expand Down
18 changes: 9 additions & 9 deletions agent/apphealth_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func TestAppHealth_Healthy(t *testing.T) {
healthchecksStarted := make([]string, 2)
for i := 0; i < 2; i++ {
c := healthcheckTrap.MustWait(ctx)
c.Release()
c.MustRelease(ctx)
healthchecksStarted[i] = c.Tags[1]
}
slices.Sort(healthchecksStarted)
Expand All @@ -87,7 +87,7 @@ func TestAppHealth_Healthy(t *testing.T) {
// advance the clock 1ms before the report ticker starts, so that it's not
// simultaneous with the checks.
mClock.Advance(time.Millisecond).MustWait(ctx)
reportTrap.MustWait(ctx).Release()
reportTrap.MustWait(ctx).MustRelease(ctx)

mClock.Advance(999 * time.Millisecond).MustWait(ctx) // app2 is now healthy

Expand Down Expand Up @@ -143,11 +143,11 @@ func TestAppHealth_500(t *testing.T) {

fakeAPI, closeFn := setupAppReporter(ctx, t, slices.Clone(apps), handlers, mClock)
defer closeFn()
healthcheckTrap.MustWait(ctx).Release()
healthcheckTrap.MustWait(ctx).MustRelease(ctx)
// advance the clock 1ms before the report ticker starts, so that it's not
// simultaneous with the checks.
mClock.Advance(time.Millisecond).MustWait(ctx)
reportTrap.MustWait(ctx).Release()
reportTrap.MustWait(ctx).MustRelease(ctx)

mClock.Advance(999 * time.Millisecond).MustWait(ctx) // check gets triggered
mClock.Advance(time.Millisecond).MustWait(ctx) // report gets triggered, but unsent since we are at the threshold
Expand Down Expand Up @@ -202,25 +202,25 @@ func TestAppHealth_Timeout(t *testing.T) {

fakeAPI, closeFn := setupAppReporter(ctx, t, apps, handlers, mClock)
defer closeFn()
healthcheckTrap.MustWait(ctx).Release()
healthcheckTrap.MustWait(ctx).MustRelease(ctx)
// advance the clock 1ms before the report ticker starts, so that it's not
// simultaneous with the checks.
mClock.Set(ms(1)).MustWait(ctx)
reportTrap.MustWait(ctx).Release()
reportTrap.MustWait(ctx).MustRelease(ctx)

w := mClock.Set(ms(1000)) // 1st check starts
timeoutTrap.MustWait(ctx).Release()
timeoutTrap.MustWait(ctx).MustRelease(ctx)
mClock.Set(ms(1001)).MustWait(ctx) // report tick, no change
mClock.Set(ms(1999)) // timeout pops
w.MustWait(ctx) // 1st check finished
w = mClock.Set(ms(2000)) // 2nd check starts
timeoutTrap.MustWait(ctx).Release()
timeoutTrap.MustWait(ctx).MustRelease(ctx)
mClock.Set(ms(2001)).MustWait(ctx) // report tick, no change
mClock.Set(ms(2999)) // timeout pops
w.MustWait(ctx) // 2nd check finished
// app is now unhealthy after 2 timeouts
mClock.Set(ms(3000)) // 3rd check starts
timeoutTrap.MustWait(ctx).Release()
timeoutTrap.MustWait(ctx).MustRelease(ctx)
mClock.Set(ms(3001)).MustWait(ctx) // report tick, sends changes

update := testutil.TryReceive(ctx, t, fakeAPI.AppHealthCh())
Expand Down
2 changes: 1 addition & 1 deletion cli/ssh_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ func TestCloserStack_Timeout(t *testing.T) {
defer close(closed)
uut.close(nil)
}()
trap.MustWait(ctx).Release()
trap.MustWait(ctx).MustRelease(ctx)
// top starts right away, but it hangs
testutil.TryReceive(ctx, t, ac[2].started)
// timer pops and we start the middle one
Expand Down
2 changes: 1 addition & 1 deletion cli/testdata/coder_list_--output_json.golden
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
"workspace_id": "===========[workspace ID]===========",
"workspace_name": "test-workspace",
"workspace_owner_id": "==========[first user ID]===========",
"workspace_owner_name": "testuser",
"workspace_owner_username": "testuser",
"template_version_id": "============[version ID]============",
"template_version_name": "===========[version name]===========",
"build_number": 1,
Expand Down
33 changes: 31 additions & 2 deletions coderd/apidoc/docs.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading
Loading