Skip to content

Commit 038bb57

Browse files
committed
fix import loop, but had to make the file cache entry exported 😢
1 parent bc25afa commit 038bb57

File tree

2 files changed

+25
-24
lines changed

2 files changed

+25
-24
lines changed

coderd/files/cache.go

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -22,20 +22,20 @@ import (
2222
// NewFromStore returns a file cache that will fetch files from the provided
2323
// database.
2424
func NewFromStore(store database.Store, registerer prometheus.Registerer, authz rbac.Authorizer) *Cache {
25-
fetch := func(ctx context.Context, fileID uuid.UUID) (cacheEntryValue, error) {
25+
fetch := func(ctx context.Context, fileID uuid.UUID) (CacheEntryValue, error) {
2626
// Make sure the read does not fail due to authorization issues.
2727
// Authz is checked on the Acquire call, so this is safe.
2828
//nolint:gocritic
2929
file, err := store.GetFileByID(dbauthz.AsFileReader(ctx), fileID)
3030
if err != nil {
31-
return cacheEntryValue{}, xerrors.Errorf("failed to read file from database: %w", err)
31+
return CacheEntryValue{}, xerrors.Errorf("failed to read file from database: %w", err)
3232
}
3333

3434
content := bytes.NewBuffer(file.Data)
35-
return cacheEntryValue{
36-
object: file.RBACObject(),
35+
return CacheEntryValue{
36+
Object: file.RBACObject(),
3737
FS: archivefs.FromTarReader(content),
38-
size: int64(content.Len()),
38+
Size: int64(content.Len()),
3939
}, nil
4040
}
4141

@@ -126,19 +126,19 @@ type cacheMetrics struct {
126126
totalCacheSize prometheus.Counter
127127
}
128128

129-
type cacheEntryValue struct {
130-
object rbac.Object
129+
type CacheEntryValue struct {
130+
Object rbac.Object
131131
fs.FS
132-
size int64
132+
Size int64
133133
}
134134

135135
type cacheEntry struct {
136136
// refCount must only be accessed while the Cache lock is held.
137137
refCount int
138-
value *lazy.ValueWithError[cacheEntryValue]
138+
value *lazy.ValueWithError[CacheEntryValue]
139139
}
140140

141-
type fetcher func(context.Context, uuid.UUID) (cacheEntryValue, error)
141+
type fetcher func(context.Context, uuid.UUID) (CacheEntryValue, error)
142142

143143
// Acquire will load the fs.FS for the given file. It guarantees that parallel
144144
// calls for the same fileID will only result in one fetch, and that parallel
@@ -162,27 +162,27 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) {
162162
return nil, dbauthz.ErrNoActor
163163
}
164164
// Always check the caller can actually read the file.
165-
if err := c.authz.Authorize(ctx, subject, policy.ActionRead, it.object); err != nil {
165+
if err := c.authz.Authorize(ctx, subject, policy.ActionRead, it.Object); err != nil {
166166
c.Release(fileID)
167167
return nil, err
168168
}
169169

170170
return it.FS, err
171171
}
172172

173-
func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithError[cacheEntryValue] {
173+
func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithError[CacheEntryValue] {
174174
c.lock.Lock()
175175
defer c.lock.Unlock()
176176

177177
entry, ok := c.data[fileID]
178178
if !ok {
179-
value := lazy.NewWithError(func() (cacheEntryValue, error) {
179+
value := lazy.NewWithError(func() (CacheEntryValue, error) {
180180
val, err := c.fetcher(ctx, fileID)
181181

182182
// Always add to the cache size the bytes of the file loaded.
183183
if err == nil {
184-
c.currentCacheSize.Add(float64(val.size))
185-
c.totalCacheSize.Add(float64(val.size))
184+
c.currentCacheSize.Add(float64(val.Size))
185+
c.totalCacheSize.Add(float64(val.Size))
186186
}
187187

188188
return val, err
@@ -227,7 +227,7 @@ func (c *Cache) Release(fileID uuid.UUID) {
227227

228228
ev, err := entry.value.Load()
229229
if err == nil {
230-
c.currentCacheSize.Add(-1 * float64(ev.size))
230+
c.currentCacheSize.Add(-1 * float64(ev.Size))
231231
}
232232

233233
delete(c.data, fileID)

coderd/files/cache_internal_test.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package files
1+
package files_test
22

33
import (
44
"context"
@@ -15,6 +15,7 @@ import (
1515
"github.com/coder/coder/v2/coderd/coderdtest"
1616
"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
1717
"github.com/coder/coder/v2/coderd/database/dbauthz"
18+
"github.com/coder/coder/v2/coderd/files"
1819
"github.com/coder/coder/v2/testutil"
1920
)
2021

@@ -31,12 +32,12 @@ func TestConcurrency(t *testing.T) {
3132
emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs()))
3233
var fetches atomic.Int64
3334
reg := prometheus.NewRegistry()
34-
c := New(func(_ context.Context, _ uuid.UUID) (cacheEntryValue, error) {
35+
c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) {
3536
fetches.Add(1)
3637
// Wait long enough before returning to make sure that all of the goroutines
3738
// will be waiting in line, ensuring that no one duplicated a fetch.
3839
time.Sleep(testutil.IntervalMedium)
39-
return cacheEntryValue{FS: emptyFS, size: fileSize}, nil
40+
return files.CacheEntryValue{FS: emptyFS, Size: fileSize}, nil
4041
}, reg, &coderdtest.FakeAuthorizer{})
4142

4243
batches := 1000
@@ -84,10 +85,10 @@ func TestRelease(t *testing.T) {
8485
const fileSize = 10
8586
emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs()))
8687
reg := prometheus.NewRegistry()
87-
c := New(func(_ context.Context, _ uuid.UUID) (cacheEntryValue, error) {
88-
return cacheEntryValue{
88+
c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) {
89+
return files.CacheEntryValue{
8990
FS: emptyFS,
90-
size: fileSize,
91+
Size: fileSize,
9192
}, nil
9293
}, reg, &coderdtest.FakeAuthorizer{})
9394

@@ -118,7 +119,7 @@ func TestRelease(t *testing.T) {
118119
}
119120

120121
// Make sure cache is fully loaded
121-
require.Equal(t, len(c.data), batches)
122+
require.Equal(t, c.Count(), batches)
122123

123124
// Now release all of the references
124125
for closedIdx, id := range ids {
@@ -142,7 +143,7 @@ func TestRelease(t *testing.T) {
142143
}
143144

144145
// ...and make sure that the cache has emptied itself.
145-
require.Equal(t, len(c.data), 0)
146+
require.Equal(t, c.Count(), 0)
146147

147148
// Verify all the counts & metrics are correct.
148149
// All existing files are closed

0 commit comments

Comments
 (0)