Skip to content

Commit 3b73371

Browse files
committed
move test
1 parent 038bb57 commit 3b73371

File tree

2 files changed

+145
-159
lines changed

2 files changed

+145
-159
lines changed

coderd/files/cache_internal_test.go

Lines changed: 0 additions & 159 deletions
This file was deleted.

coderd/files/cache_test.go

Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,20 @@
11
package files_test
22

33
import (
4+
"context"
5+
"sync/atomic"
46
"testing"
7+
"time"
58

69
"github.com/google/uuid"
710
"github.com/prometheus/client_golang/prometheus"
11+
"github.com/spf13/afero"
812
"github.com/stretchr/testify/require"
13+
"golang.org/x/sync/errgroup"
914

1015
"cdr.dev/slog/sloggers/slogtest"
1116
"github.com/coder/coder/v2/coderd/coderdtest"
17+
"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
1218
"github.com/coder/coder/v2/coderd/database"
1319
"github.com/coder/coder/v2/coderd/database/dbauthz"
1420
"github.com/coder/coder/v2/coderd/database/dbgen"
@@ -94,6 +100,145 @@ func TestCacheRBAC(t *testing.T) {
94100
})
95101
}
96102

103+
func cachePromMetricName(metric string) string {
104+
return "coderd_file_cache_" + metric
105+
}
106+
107+
func TestConcurrency(t *testing.T) {
108+
t.Parallel()
109+
//nolint:gocritic // Unit testing
110+
ctx := dbauthz.AsFileReader(t.Context())
111+
112+
const fileSize = 10
113+
emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs()))
114+
var fetches atomic.Int64
115+
reg := prometheus.NewRegistry()
116+
c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) {
117+
fetches.Add(1)
118+
// Wait long enough before returning to make sure that all of the goroutines
119+
// will be waiting in line, ensuring that no one duplicated a fetch.
120+
time.Sleep(testutil.IntervalMedium)
121+
return files.CacheEntryValue{FS: emptyFS, Size: fileSize}, nil
122+
}, reg, &coderdtest.FakeAuthorizer{})
123+
124+
batches := 1000
125+
groups := make([]*errgroup.Group, 0, batches)
126+
for range batches {
127+
groups = append(groups, new(errgroup.Group))
128+
}
129+
130+
// Call Acquire with a unique ID per batch, many times per batch, with many
131+
// batches all in parallel. This is pretty much the worst-case scenario:
132+
// thousands of concurrent reads, with both warm and cold loads happening.
133+
batchSize := 10
134+
for _, g := range groups {
135+
id := uuid.New()
136+
for range batchSize {
137+
g.Go(func() error {
138+
// We don't bother to Release these references because the Cache will be
139+
// released at the end of the test anyway.
140+
_, err := c.Acquire(ctx, id)
141+
return err
142+
})
143+
}
144+
}
145+
146+
for _, g := range groups {
147+
require.NoError(t, g.Wait())
148+
}
149+
require.Equal(t, int64(batches), fetches.Load())
150+
151+
// Verify all the counts & metrics are correct.
152+
require.Equal(t, batches, c.Count())
153+
require.Equal(t, batches*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil))
154+
require.Equal(t, batches*fileSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_size_bytes_total"), nil))
155+
require.Equal(t, batches, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil))
156+
require.Equal(t, batches, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_total"), nil))
157+
require.Equal(t, batches*batchSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))
158+
require.Equal(t, batches*batchSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_file_refs_total"), nil))
159+
}
160+
161+
func TestRelease(t *testing.T) {
162+
t.Parallel()
163+
//nolint:gocritic // Unit testing
164+
ctx := dbauthz.AsFileReader(t.Context())
165+
166+
const fileSize = 10
167+
emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs()))
168+
reg := prometheus.NewRegistry()
169+
c := files.New(func(_ context.Context, _ uuid.UUID) (files.CacheEntryValue, error) {
170+
return files.CacheEntryValue{
171+
FS: emptyFS,
172+
Size: fileSize,
173+
}, nil
174+
}, reg, &coderdtest.FakeAuthorizer{})
175+
176+
batches := 100
177+
ids := make([]uuid.UUID, 0, batches)
178+
for range batches {
179+
ids = append(ids, uuid.New())
180+
}
181+
182+
// Acquire a bunch of references
183+
batchSize := 10
184+
for openedIdx, id := range ids {
185+
for batchIdx := range batchSize {
186+
it, err := c.Acquire(ctx, id)
187+
require.NoError(t, err)
188+
require.Equal(t, emptyFS, it)
189+
190+
// Each time a new file is opened, the metrics should be updated as so:
191+
opened := openedIdx + 1
192+
// Number of unique files opened is equal to the idx of the ids.
193+
require.Equal(t, opened, c.Count())
194+
require.Equal(t, opened, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil))
195+
// Current file size is unique files * file size.
196+
require.Equal(t, opened*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil))
197+
// The number of refs is the current iteration of both loops.
198+
require.Equal(t, ((opened-1)*batchSize)+(batchIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))
199+
}
200+
}
201+
202+
// Make sure cache is fully loaded
203+
require.Equal(t, c.Count(), batches)
204+
205+
// Now release all of the references
206+
for closedIdx, id := range ids {
207+
stillOpen := len(ids) - closedIdx
208+
for closingIdx := range batchSize {
209+
c.Release(id)
210+
211+
// Each time a file is released, the metrics should decrement the file refs
212+
require.Equal(t, (stillOpen*batchSize)-(closingIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))
213+
214+
closed := closingIdx+1 == batchSize
215+
if closed {
216+
continue
217+
}
218+
219+
// File ref still exists, so the counts should not change yet.
220+
require.Equal(t, stillOpen, c.Count())
221+
require.Equal(t, stillOpen, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil))
222+
require.Equal(t, stillOpen*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil))
223+
}
224+
}
225+
226+
// ...and make sure that the cache has emptied itself.
227+
require.Equal(t, c.Count(), 0)
228+
229+
// Verify all the counts & metrics are correct.
230+
// All existing files are closed
231+
require.Equal(t, 0, c.Count())
232+
require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_bytes_current"), nil))
233+
require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil))
234+
require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))
235+
236+
// Total counts remain
237+
require.Equal(t, batches*fileSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_size_bytes_total"), nil))
238+
require.Equal(t, batches, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_total"), nil))
239+
require.Equal(t, batches*batchSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_file_refs_total"), nil))
240+
}
241+
97242
func cacheAuthzSetup(t *testing.T) (database.Store, *files.Cache, *coderdtest.RecordingAuthorizer) {
98243
t.Helper()
99244

0 commit comments

Comments
 (0)