Skip to content

Commit 6e67082

Browse files
Merge branch 'main' into dm-pr-feedback-oops
2 parents 22eb5c9 + 8b27983 commit 6e67082

File tree

9 files changed

+61
-27
lines changed

9 files changed

+61
-27
lines changed

agent/agentssh/agentssh.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -454,7 +454,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
454454

455455
x11, hasX11 := session.X11()
456456
if hasX11 {
457-
display, handled := s.x11Handler(session.Context(), x11)
457+
display, handled := s.x11Handler(ctx, x11)
458458
if !handled {
459459
logger.Error(ctx, "x11 handler failed")
460460
closeCause("x11 handler failed")
@@ -973,7 +973,7 @@ func (s *Server) handleConn(l net.Listener, c net.Conn) {
973973
return
974974
}
975975
defer s.trackConn(l, c, false)
976-
logger.Info(context.Background(), "started serving connection")
976+
logger.Info(context.Background(), "started serving ssh connection")
977977
// note: srv.ConnectionCompleteCallback logs completion of the connection
978978
s.srv.HandleConn(c)
979979
}

cli/server.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2360,10 +2360,12 @@ func ConnectToPostgres(ctx context.Context, logger slog.Logger, driver string, d
23602360
return nil, xerrors.Errorf("get postgres version: %w", err)
23612361
}
23622362
defer version.Close()
2363-
if version.Err() != nil {
2364-
return nil, xerrors.Errorf("version select: %w", version.Err())
2365-
}
23662363
if !version.Next() {
2364+
// it's critical we assign to the err variable, otherwise the defer statement
2365+
// that runs db.Close() will not execute it
2366+
if err = version.Err(); err != nil {
2367+
return nil, xerrors.Errorf("no rows returned for version select: %w", err)
2368+
}
23672369
return nil, xerrors.Errorf("no rows returned for version select")
23682370
}
23692371
var versionNum int

coderd/database/dbmem/dbmem.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8500,7 +8500,7 @@ func (q *FakeQuerier) HasTemplateVersionsWithAITask(_ context.Context) (bool, er
85008500
defer q.mutex.RUnlock()
85018501

85028502
for _, templateVersion := range q.templateVersions {
8503-
if templateVersion.HasAITask {
8503+
if templateVersion.HasAITask.Valid && templateVersion.HasAITask.Bool {
85048504
return true, nil
85058505
}
85068506
}

coderd/files/cache.go

Lines changed: 31 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -140,20 +140,33 @@ type cacheEntry struct {
140140

141141
type fetcher func(context.Context, uuid.UUID) (CacheEntryValue, error)
142142

143+
var _ fs.FS = (*CloseFS)(nil)
144+
145+
// CloseFS is a wrapper around fs.FS that implements io.Closer. The Close()
146+
// method tells the cache to release the fileID. Once all open references are
147+
// closed, the file is removed from the cache.
148+
type CloseFS struct {
149+
fs.FS
150+
151+
close func()
152+
}
153+
154+
func (f *CloseFS) Close() { f.close() }
155+
143156
// Acquire will load the fs.FS for the given file. It guarantees that parallel
144157
// calls for the same fileID will only result in one fetch, and that parallel
145158
// calls for distinct fileIDs will fetch in parallel.
146159
//
147160
// Safety: Every call to Acquire that does not return an error must have a
148161
// matching call to Release.
149-
func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) {
150-
// It's important that this `Load` call occurs outside of `prepare`, after the
162+
func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (*CloseFS, error) {
163+
// It's important that this `Load` call occurs outside `prepare`, after the
151164
// mutex has been released, or we would continue to hold the lock until the
152165
// entire file has been fetched, which may be slow, and would prevent other
153166
// files from being fetched in parallel.
154167
it, err := c.prepare(ctx, fileID).Load()
155168
if err != nil {
156-
c.Release(fileID)
169+
c.release(fileID)
157170
return nil, err
158171
}
159172

@@ -163,11 +176,19 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) {
163176
}
164177
// Always check the caller can actually read the file.
165178
if err := c.authz.Authorize(ctx, subject, policy.ActionRead, it.Object); err != nil {
166-
c.Release(fileID)
179+
c.release(fileID)
167180
return nil, err
168181
}
169182

170-
return it.FS, err
183+
var once sync.Once
184+
return &CloseFS{
185+
FS: it.FS,
186+
close: func() {
187+
// sync.Once makes the Close() idempotent, so we can call it
188+
// multiple times without worrying about double-releasing.
189+
once.Do(func() { c.release(fileID) })
190+
},
191+
}, nil
171192
}
172193

173194
func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithError[CacheEntryValue] {
@@ -203,9 +224,12 @@ func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithEr
203224
return entry.value
204225
}
205226

206-
// Release decrements the reference count for the given fileID, and frees the
227+
// release decrements the reference count for the given fileID, and frees the
207228
// backing data if there are no further references being held.
208-
func (c *Cache) Release(fileID uuid.UUID) {
229+
//
230+
// release should only be called after a successful call to Acquire using the Release()
231+
// method on the returned *CloseFS.
232+
func (c *Cache) release(fileID uuid.UUID) {
209233
c.lock.Lock()
210234
defer c.lock.Unlock()
211235

coderd/files/cache_test.go

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ func TestCacheRBAC(t *testing.T) {
7575
require.Equal(t, 0, cache.Count())
7676

7777
// Read the file with a file reader to put it into the cache.
78-
_, err := cache.Acquire(cacheReader, file.ID)
78+
a, err := cache.Acquire(cacheReader, file.ID)
7979
require.NoError(t, err)
8080
require.Equal(t, 1, cache.Count())
8181

@@ -86,12 +86,12 @@ func TestCacheRBAC(t *testing.T) {
8686
require.Equal(t, 1, cache.Count())
8787

8888
// UserReader can
89-
_, err = cache.Acquire(userReader, file.ID)
89+
b, err := cache.Acquire(userReader, file.ID)
9090
require.NoError(t, err)
9191
require.Equal(t, 1, cache.Count())
9292

93-
cache.Release(file.ID)
94-
cache.Release(file.ID)
93+
a.Close()
94+
b.Close()
9595
require.Equal(t, 0, cache.Count())
9696

9797
rec.AssertActorID(t, nobodyID.String(), rec.Pair(policy.ActionRead, file))
@@ -179,13 +179,15 @@ func TestRelease(t *testing.T) {
179179
ids = append(ids, uuid.New())
180180
}
181181

182+
releases := make(map[uuid.UUID][]func(), 0)
182183
// Acquire a bunch of references
183184
batchSize := 10
184185
for openedIdx, id := range ids {
185186
for batchIdx := range batchSize {
186187
it, err := c.Acquire(ctx, id)
187188
require.NoError(t, err)
188-
require.Equal(t, emptyFS, it)
189+
require.Equal(t, emptyFS, it.FS)
190+
releases[id] = append(releases[id], it.Close)
189191

190192
// Each time a new file is opened, the metrics should be updated as so:
191193
opened := openedIdx + 1
@@ -206,7 +208,8 @@ func TestRelease(t *testing.T) {
206208
for closedIdx, id := range ids {
207209
stillOpen := len(ids) - closedIdx
208210
for closingIdx := range batchSize {
209-
c.Release(id)
211+
releases[id][0]()
212+
releases[id] = releases[id][1:]
210213

211214
// Each time a file is released, the metrics should decrement the file refs
212215
require.Equal(t, (stillOpen*batchSize)-(closingIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))

coderd/parameters.go

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import (
44
"context"
55
"database/sql"
66
"encoding/json"
7+
"io/fs"
78
"net/http"
89
"time"
910

@@ -144,15 +145,19 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r *
144145
}
145146

146147
// Add the file first. Calling `Release` if it fails is a no-op, so this is safe.
147-
templateFS, err := api.FileCache.Acquire(fileCtx, fileID)
148+
var templateFS fs.FS
149+
closeableTemplateFS, err := api.FileCache.Acquire(fileCtx, fileID)
148150
if err != nil {
149151
httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{
150152
Message: "Internal error fetching template version Terraform.",
151153
Detail: err.Error(),
152154
})
153155
return
154156
}
155-
defer api.FileCache.Release(fileID)
157+
defer closeableTemplateFS.Close()
158+
// templateFS does not implement the Close method. For it to be later merged with
159+
// the module files, we need to convert it to an OverlayFS.
160+
templateFS = closeableTemplateFS
156161

157162
// Having the Terraform plan available for the evaluation engine is helpful
158163
// for populating values from data blocks, but isn't strictly required. If
@@ -171,9 +176,9 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r *
171176
})
172177
return
173178
}
174-
defer api.FileCache.Release(tf.CachedModuleFiles.UUID)
179+
defer moduleFilesFS.Close()
175180

176-
templateFS = files.NewOverlayFS(templateFS, []files.Overlay{{Path: ".terraform/modules", FS: moduleFilesFS}})
181+
templateFS = files.NewOverlayFS(closeableTemplateFS, []files.Overlay{{Path: ".terraform/modules", FS: moduleFilesFS}})
177182
}
178183

179184
owner, err := getWorkspaceOwnerData(ctx, api.Database, apikey.UserID, templateVersion.OrganizationID)

coderd/provisionerdserver/provisionerdserver.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ func (s *server) AcquireJob(ctx context.Context, _ *proto.Empty) (*proto.Acquire
321321
acqCtx, acqCancel := context.WithTimeout(ctx, s.acquireJobLongPollDur)
322322
defer acqCancel()
323323
job, err := s.Acquirer.AcquireJob(acqCtx, s.OrganizationID, s.ID, s.Provisioners, s.Tags)
324-
if xerrors.Is(err, context.DeadlineExceeded) {
324+
if database.IsQueryCanceledError(err) {
325325
s.Logger.Debug(ctx, "successful cancel")
326326
return &proto.AcquiredJob{}, nil
327327
}
@@ -368,7 +368,7 @@ func (s *server) AcquireJobWithCancel(stream proto.DRPCProvisionerDaemon_Acquire
368368
je = <-jec
369369
case je = <-jec:
370370
}
371-
if xerrors.Is(je.err, context.Canceled) {
371+
if database.IsQueryCanceledError(je.err) {
372372
s.Logger.Debug(streamCtx, "successful cancel")
373373
err := stream.Send(&proto.AcquiredJob{})
374374
if err != nil {

site/src/pages/CreateWorkspacePage/CreateWorkspacePageViewExperimental.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -393,7 +393,7 @@ export const CreateWorkspacePageViewExperimental: FC<
393393
</TooltipProvider>
394394
</span>
395395
<FeatureStageBadge
396-
contentType={"early_access"}
396+
contentType={"beta"}
397397
size="sm"
398398
labelText="Dynamic parameters"
399399
/>

site/src/pages/WorkspaceSettingsPage/WorkspaceParametersPage/WorkspaceParametersPageExperimental.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@ const WorkspaceParametersPageExperimental: FC = () => {
237237
</span>
238238
</span>
239239
<FeatureStageBadge
240-
contentType={"early_access"}
240+
contentType={"beta"}
241241
size="sm"
242242
labelText="Dynamic parameters"
243243
/>

0 commit comments

Comments
 (0)