Skip to content
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
chore: add unit test to ensure metrics are correct
  • Loading branch information
Emyrk committed May 28, 2025
commit 1bf620cc70bca80c91d0fd78b446178dbf1525f4
76 changes: 63 additions & 13 deletions coderd/files/cache_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,28 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"

"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
"github.com/coder/coder/v2/testutil"
)

func cachePromMetricName(metric string) string {
return "coderd_file_cache_" + metric
}

func TestConcurrency(t *testing.T) {
t.Parallel()

const fileSize = 10
emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs()))
var fetches atomic.Int64
c := newTestCache(func(_ context.Context, _ uuid.UUID) (fs.FS, int64, error) {
reg := prometheus.NewRegistry()
c := New(func(_ context.Context, _ uuid.UUID) (fs.FS, int64, error) {
fetches.Add(1)
// Wait long enough before returning to make sure that all of the goroutines
// will be waiting in line, ensuring that no one duplicated a fetch.
time.Sleep(testutil.IntervalMedium)
return emptyFS, 0, nil
})
return emptyFS, fileSize, nil
}, reg)

batches := 1000
groups := make([]*errgroup.Group, 0, batches)
Expand Down Expand Up @@ -55,15 +62,26 @@ func TestConcurrency(t *testing.T) {
require.NoError(t, g.Wait())
}
require.Equal(t, int64(batches), fetches.Load())

// Verify all the counts & metrics are correct.
require.Equal(t, batches, c.Count())
require.Equal(t, batches*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_current"), nil))
require.Equal(t, batches*fileSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_size_total"), nil))
require.Equal(t, batches, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil))
require.Equal(t, batches, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_total"), nil))
require.Equal(t, batches*batchSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))
require.Equal(t, batches*batchSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_file_refs_total"), nil))
}

func TestRelease(t *testing.T) {
t.Parallel()

const fileSize = 10
emptyFS := afero.NewIOFS(afero.NewReadOnlyFs(afero.NewMemMapFs()))
c := newTestCache(func(_ context.Context, _ uuid.UUID) (fs.FS, int64, error) {
return emptyFS, 0, nil
})
reg := prometheus.NewRegistry()
c := New(func(_ context.Context, _ uuid.UUID) (fs.FS, int64, error) {
return emptyFS, fileSize, nil
}, reg)

batches := 100
ids := make([]uuid.UUID, 0, batches)
Expand All @@ -73,28 +91,60 @@ func TestRelease(t *testing.T) {

// Acquire a bunch of references
batchSize := 10
for _, id := range ids {
for range batchSize {
for openedIdx, id := range ids {
for batchIdx := range batchSize {
it, err := c.Acquire(t.Context(), id)
require.NoError(t, err)
require.Equal(t, emptyFS, it)

// Each time a new file is opened, the metrics should be updated as so:
opened := openedIdx + 1
// Number of unique files opened is equal to the idx of the ids.
require.Equal(t, opened, c.Count())
require.Equal(t, opened, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil))
// Current file size is unique files * file size.
require.Equal(t, opened*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_current"), nil))
// The number of refs is the current iteration of both loops.
require.Equal(t, ((opened-1)*batchSize)+(batchIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))
}
}

// Make sure cache is fully loaded
require.Equal(t, len(c.data), batches)

// Now release all of the references
for _, id := range ids {
for range batchSize {
for closedIdx, id := range ids {
stillOpen := len(ids) - closedIdx
for closingIdx := range batchSize {
c.Release(id)

// Each time a file is released, the metrics should decrement the file refs
require.Equal(t, (stillOpen*batchSize)-(closingIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))

closed := closingIdx+1 == batchSize
if closed {
continue
}

// File ref still exists, so the counts should not change yet.
require.Equal(t, stillOpen, c.Count())
require.Equal(t, stillOpen, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil))
require.Equal(t, stillOpen*fileSize, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_current"), nil))
}
}

// ...and make sure that the cache has emptied itself.
require.Equal(t, len(c.data), 0)
}

func newTestCache(fetcher func(context.Context, uuid.UUID) (fs.FS, int64, error)) *Cache {
return New(fetcher, prometheus.NewRegistry())
// Verify all the counts & metrics are correct.
// All existing files are closed
require.Equal(t, 0, c.Count())
require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_size_current"), nil))
require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_files_current"), nil))
require.Equal(t, 0, promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))

// Total counts remain
require.Equal(t, batches*fileSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_size_total"), nil))
require.Equal(t, batches, promhelp.CounterValue(t, reg, cachePromMetricName("open_files_total"), nil))
require.Equal(t, batches*batchSize, promhelp.CounterValue(t, reg, cachePromMetricName("open_file_refs_total"), nil))
}
Loading