@@ -13,21 +13,28 @@ import (
13
13
"github.com/stretchr/testify/require"
14
14
"golang.org/x/sync/errgroup"
15
15
16
+ "github.com/coder/coder/v2/coderd/coderdtest/promhelp"
16
17
"github.com/coder/coder/v2/testutil"
17
18
)
18
19
20
+ func cachePromMetricName (metric string ) string {
21
+ return "coderd_file_cache_" + metric
22
+ }
23
+
19
24
func TestConcurrency (t * testing.T ) {
20
25
t .Parallel ()
21
26
27
+ const fileSize = 10
22
28
emptyFS := afero .NewIOFS (afero .NewReadOnlyFs (afero .NewMemMapFs ()))
23
29
var fetches atomic.Int64
24
- c := newTestCache (func (_ context.Context , _ uuid.UUID ) (fs.FS , int64 , error ) {
30
+ reg := prometheus .NewRegistry ()
31
+ c := New (func (_ context.Context , _ uuid.UUID ) (fs.FS , int64 , error ) {
25
32
fetches .Add (1 )
26
33
// Wait long enough before returning to make sure that all of the goroutines
27
34
// will be waiting in line, ensuring that no one duplicated a fetch.
28
35
time .Sleep (testutil .IntervalMedium )
29
- return emptyFS , 0 , nil
30
- })
36
+ return emptyFS , fileSize , nil
37
+ }, reg )
31
38
32
39
batches := 1000
33
40
groups := make ([]* errgroup.Group , 0 , batches )
@@ -55,15 +62,26 @@ func TestConcurrency(t *testing.T) {
55
62
require .NoError (t , g .Wait ())
56
63
}
57
64
require .Equal (t , int64 (batches ), fetches .Load ())
65
+
66
+ // Verify all the counts & metrics are correct.
67
+ require .Equal (t , batches , c .Count ())
68
+ require .Equal (t , batches * fileSize , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_size_current" ), nil ))
69
+ require .Equal (t , batches * fileSize , promhelp .CounterValue (t , reg , cachePromMetricName ("open_files_size_total" ), nil ))
70
+ require .Equal (t , batches , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_current" ), nil ))
71
+ require .Equal (t , batches , promhelp .CounterValue (t , reg , cachePromMetricName ("open_files_total" ), nil ))
72
+ require .Equal (t , batches * batchSize , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_file_refs_current" ), nil ))
73
+ require .Equal (t , batches * batchSize , promhelp .CounterValue (t , reg , cachePromMetricName ("open_file_refs_total" ), nil ))
58
74
}
59
75
60
76
func TestRelease (t * testing.T ) {
61
77
t .Parallel ()
62
78
79
+ const fileSize = 10
63
80
emptyFS := afero .NewIOFS (afero .NewReadOnlyFs (afero .NewMemMapFs ()))
64
- c := newTestCache (func (_ context.Context , _ uuid.UUID ) (fs.FS , int64 , error ) {
65
- return emptyFS , 0 , nil
66
- })
81
+ reg := prometheus .NewRegistry ()
82
+ c := New (func (_ context.Context , _ uuid.UUID ) (fs.FS , int64 , error ) {
83
+ return emptyFS , fileSize , nil
84
+ }, reg )
67
85
68
86
batches := 100
69
87
ids := make ([]uuid.UUID , 0 , batches )
@@ -73,28 +91,60 @@ func TestRelease(t *testing.T) {
73
91
74
92
// Acquire a bunch of references
75
93
batchSize := 10
76
- for _ , id := range ids {
77
- for range batchSize {
94
+ for openedIdx , id := range ids {
95
+ for batchIdx := range batchSize {
78
96
it , err := c .Acquire (t .Context (), id )
79
97
require .NoError (t , err )
80
98
require .Equal (t , emptyFS , it )
99
+
100
+ // Each time a new file is opened, the metrics should be updated as so:
101
+ opened := openedIdx + 1
102
+ // Number of unique files opened is equal to the idx of the ids.
103
+ require .Equal (t , opened , c .Count ())
104
+ require .Equal (t , opened , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_current" ), nil ))
105
+ // Current file size is unique files * file size.
106
+ require .Equal (t , opened * fileSize , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_size_current" ), nil ))
107
+ // The number of refs is the current iteration of both loops.
108
+ require .Equal (t , ((opened - 1 )* batchSize )+ (batchIdx + 1 ), promhelp .GaugeValue (t , reg , cachePromMetricName ("open_file_refs_current" ), nil ))
81
109
}
82
110
}
83
111
84
112
// Make sure cache is fully loaded
85
113
require .Equal (t , len (c .data ), batches )
86
114
87
115
// Now release all of the references
88
- for _ , id := range ids {
89
- for range batchSize {
116
+ for closedIdx , id := range ids {
117
+ stillOpen := len (ids ) - closedIdx
118
+ for closingIdx := range batchSize {
90
119
c .Release (id )
120
+
121
+ // Each time a file is released, the metrics should decrement the file refs
122
+ require .Equal (t , (stillOpen * batchSize )- (closingIdx + 1 ), promhelp .GaugeValue (t , reg , cachePromMetricName ("open_file_refs_current" ), nil ))
123
+
124
+ closed := closingIdx + 1 == batchSize
125
+ if closed {
126
+ continue
127
+ }
128
+
129
+ // File ref still exists, so the counts should not change yet.
130
+ require .Equal (t , stillOpen , c .Count ())
131
+ require .Equal (t , stillOpen , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_current" ), nil ))
132
+ require .Equal (t , stillOpen * fileSize , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_size_current" ), nil ))
91
133
}
92
134
}
93
135
94
136
// ...and make sure that the cache has emptied itself.
95
137
require .Equal (t , len (c .data ), 0 )
96
- }
97
138
98
- func newTestCache (fetcher func (context.Context , uuid.UUID ) (fs.FS , int64 , error )) * Cache {
99
- return New (fetcher , prometheus .NewRegistry ())
139
+ // Verify all the counts & metrics are correct.
140
+ // All existing files are closed
141
+ require .Equal (t , 0 , c .Count ())
142
+ require .Equal (t , 0 , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_size_current" ), nil ))
143
+ require .Equal (t , 0 , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_current" ), nil ))
144
+ require .Equal (t , 0 , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_file_refs_current" ), nil ))
145
+
146
+ // Total counts remain
147
+ require .Equal (t , batches * fileSize , promhelp .CounterValue (t , reg , cachePromMetricName ("open_files_size_total" ), nil ))
148
+ require .Equal (t , batches , promhelp .CounterValue (t , reg , cachePromMetricName ("open_files_total" ), nil ))
149
+ require .Equal (t , batches * batchSize , promhelp .CounterValue (t , reg , cachePromMetricName ("open_file_refs_total" ), nil ))
100
150
}
0 commit comments