diff --git a/coderd/files/cache.go b/coderd/files/cache.go index 484507d2ac5b0..8127c3989129b 100644 --- a/coderd/files/cache.go +++ b/coderd/files/cache.go @@ -140,20 +140,33 @@ type cacheEntry struct { type fetcher func(context.Context, uuid.UUID) (CacheEntryValue, error) +var _ fs.FS = (*CloseFS)(nil) + +// CloseFS is a wrapper around fs.FS that implements io.Closer. The Close() +// method tells the cache to release the fileID. Once all open references are +// closed, the file is removed from the cache. +type CloseFS struct { + fs.FS + + close func() +} + +func (f *CloseFS) Close() { f.close() } + // Acquire will load the fs.FS for the given file. It guarantees that parallel // calls for the same fileID will only result in one fetch, and that parallel // calls for distinct fileIDs will fetch in parallel. // // Safety: Every call to Acquire that does not return an error must have a // matching call to Release. -func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) { - // It's important that this `Load` call occurs outside of `prepare`, after the +func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (*CloseFS, error) { + // It's important that this `Load` call occurs outside `prepare`, after the // mutex has been released, or we would continue to hold the lock until the // entire file has been fetched, which may be slow, and would prevent other // files from being fetched in parallel. it, err := c.prepare(ctx, fileID).Load() if err != nil { - c.Release(fileID) + c.release(fileID) return nil, err } @@ -163,11 +176,19 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) { } // Always check the caller can actually read the file. if err := c.authz.Authorize(ctx, subject, policy.ActionRead, it.Object); err != nil { - c.Release(fileID) + c.release(fileID) return nil, err } - return it.FS, err + var once sync.Once + return &CloseFS{ + FS: it.FS, + close: func() { + // sync.Once makes the Close() idempotent, so we can call it + // multiple times without worrying about double-releasing. + once.Do(func() { c.release(fileID) }) + }, + }, nil } func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithError[CacheEntryValue] { @@ -203,9 +224,12 @@ func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithEr return entry.value } -// Release decrements the reference count for the given fileID, and frees the +// release decrements the reference count for the given fileID, and frees the // backing data if there are no further references being held. -func (c *Cache) Release(fileID uuid.UUID) { +// +// release should only be called after a successful call to Acquire using the Release() +// method on the returned *CloseFS. +func (c *Cache) release(fileID uuid.UUID) { c.lock.Lock() defer c.lock.Unlock() diff --git a/coderd/files/cache_test.go b/coderd/files/cache_test.go index 469520b4139fe..5efb4ba19be28 100644 --- a/coderd/files/cache_test.go +++ b/coderd/files/cache_test.go @@ -75,7 +75,7 @@ func TestCacheRBAC(t *testing.T) { require.Equal(t, 0, cache.Count()) // Read the file with a file reader to put it into the cache. - _, err := cache.Acquire(cacheReader, file.ID) + a, err := cache.Acquire(cacheReader, file.ID) require.NoError(t, err) require.Equal(t, 1, cache.Count()) @@ -86,12 +86,12 @@ func TestCacheRBAC(t *testing.T) { require.Equal(t, 1, cache.Count()) // UserReader can - _, err = cache.Acquire(userReader, file.ID) + b, err := cache.Acquire(userReader, file.ID) require.NoError(t, err) require.Equal(t, 1, cache.Count()) - cache.Release(file.ID) - cache.Release(file.ID) + a.Close() + b.Close() require.Equal(t, 0, cache.Count()) rec.AssertActorID(t, nobodyID.String(), rec.Pair(policy.ActionRead, file)) @@ -179,13 +179,15 @@ func TestRelease(t *testing.T) { ids = append(ids, uuid.New()) } + releases := make(map[uuid.UUID][]func(), 0) // Acquire a bunch of references batchSize := 10 for openedIdx, id := range ids { for batchIdx := range batchSize { it, err := c.Acquire(ctx, id) require.NoError(t, err) - require.Equal(t, emptyFS, it) + require.Equal(t, emptyFS, it.FS) + releases[id] = append(releases[id], it.Close) // Each time a new file is opened, the metrics should be updated as so: opened := openedIdx + 1 @@ -206,7 +208,8 @@ func TestRelease(t *testing.T) { for closedIdx, id := range ids { stillOpen := len(ids) - closedIdx for closingIdx := range batchSize { - c.Release(id) + releases[id][0]() + releases[id] = releases[id][1:] // Each time a file is released, the metrics should decrement the file refs require.Equal(t, (stillOpen*batchSize)-(closingIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) diff --git a/coderd/parameters.go b/coderd/parameters.go index c88199956392d..dacd8de812ab8 100644 --- a/coderd/parameters.go +++ b/coderd/parameters.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "encoding/json" + "io/fs" "net/http" "time" @@ -144,7 +145,8 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r * } // Add the file first. Calling `Release` if it fails is a no-op, so this is safe. - templateFS, err := api.FileCache.Acquire(fileCtx, fileID) + var templateFS fs.FS + closeableTemplateFS, err := api.FileCache.Acquire(fileCtx, fileID) if err != nil { httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{ Message: "Internal error fetching template version Terraform.", @@ -152,7 +154,10 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r * }) return } - defer api.FileCache.Release(fileID) + defer closeableTemplateFS.Close() + // templateFS does not implement the Close method. For it to be later merged with + // the module files, we need to convert it to an OverlayFS. + templateFS = closeableTemplateFS // Having the Terraform plan available for the evaluation engine is helpful // for populating values from data blocks, but isn't strictly required. If @@ -171,9 +176,9 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r * }) return } - defer api.FileCache.Release(tf.CachedModuleFiles.UUID) + defer moduleFilesFS.Close() - templateFS = files.NewOverlayFS(templateFS, []files.Overlay{{Path: ".terraform/modules", FS: moduleFilesFS}}) + templateFS = files.NewOverlayFS(closeableTemplateFS, []files.Overlay{{Path: ".terraform/modules", FS: moduleFilesFS}}) } owner, err := getWorkspaceOwnerData(ctx, api.Database, apikey.UserID, templateVersion.OrganizationID)