7
7
"sync"
8
8
9
9
"github.com/google/uuid"
10
+ "github.com/prometheus/client_golang/prometheus"
11
+ "github.com/prometheus/client_golang/prometheus/promauto"
10
12
"golang.org/x/xerrors"
11
13
12
14
archivefs "github.com/coder/coder/v2/archive/fs"
@@ -16,22 +18,75 @@ import (
16
18
17
19
// NewFromStore returns a file cache that will fetch files from the provided
18
20
// database.
19
- func NewFromStore (store database.Store ) * Cache {
20
- fetcher := func (ctx context.Context , fileID uuid.UUID ) (fs.FS , error ) {
21
+ func NewFromStore (store database.Store , registerer prometheus. Registerer ) * Cache {
22
+ fetch := func (ctx context.Context , fileID uuid.UUID ) (fs.FS , int64 , error ) {
21
23
file , err := store .GetFileByID (ctx , fileID )
22
24
if err != nil {
23
- return nil , xerrors .Errorf ("failed to read file from database: %w" , err )
25
+ return nil , 0 , xerrors .Errorf ("failed to read file from database: %w" , err )
24
26
}
25
27
26
28
content := bytes .NewBuffer (file .Data )
27
- return archivefs .FromTarReader (content ), nil
29
+ return archivefs .FromTarReader (content ), int64 ( content . Len ()), nil
28
30
}
29
31
30
- return & Cache {
32
+ return New (fetch , registerer )
33
+ }
34
+
35
+ func New (fetch fetcher , registerer prometheus.Registerer ) * Cache {
36
+ return (& Cache {
31
37
lock : sync.Mutex {},
32
38
data : make (map [uuid.UUID ]* cacheEntry ),
33
- fetcher : fetcher ,
34
- }
39
+ fetcher : fetch ,
40
+ }).registerMetrics (registerer )
41
+ }
42
+
43
+ func (c * Cache ) registerMetrics (registerer prometheus.Registerer ) * Cache {
44
+ subsystem := "file_cache"
45
+ f := promauto .With (registerer )
46
+
47
+ c .currentCacheSize = f .NewGauge (prometheus.GaugeOpts {
48
+ Namespace : "coderd" ,
49
+ Subsystem : subsystem ,
50
+ Name : "open_files_size_current" ,
51
+ Help : "The current size of all files currently open in the file cache." ,
52
+ })
53
+
54
+ c .totalCacheSize = f .NewCounter (prometheus.CounterOpts {
55
+ Namespace : "coderd" ,
56
+ Subsystem : subsystem ,
57
+ Name : "open_files_size_total" ,
58
+ Help : "The total size of all files opened in the file cache." ,
59
+ })
60
+
61
+ c .currentOpenFiles = f .NewGauge (prometheus.GaugeOpts {
62
+ Namespace : "coderd" ,
63
+ Subsystem : subsystem ,
64
+ Name : "open_files_current" ,
65
+ Help : "The number of unique files currently open in the file cache." ,
66
+ })
67
+
68
+ c .totalOpenedFiles = f .NewCounter (prometheus.CounterOpts {
69
+ Namespace : "coderd" ,
70
+ Subsystem : subsystem ,
71
+ Name : "open_files_total" ,
72
+ Help : "The number of unique files opened in the file cache." ,
73
+ })
74
+
75
+ c .currentOpenFileReferences = f .NewGauge (prometheus.GaugeOpts {
76
+ Namespace : "coderd" ,
77
+ Subsystem : subsystem ,
78
+ Name : "open_file_refs_current" ,
79
+ Help : "The number of file references currently open in the file cache." ,
80
+ })
81
+
82
+ c .totalOpenFileReferences = f .NewCounter (prometheus.CounterOpts {
83
+ Namespace : "coderd" ,
84
+ Subsystem : subsystem ,
85
+ Name : "open_file_refs_total" ,
86
+ Help : "The number of file references currently open in the file cache." ,
87
+ })
88
+
89
+ return c
35
90
}
36
91
37
92
// Cache persists the files for template versions, and is used by dynamic
@@ -43,15 +98,30 @@ type Cache struct {
43
98
lock sync.Mutex
44
99
data map [uuid.UUID ]* cacheEntry
45
100
fetcher
101
+
102
+ // metrics
103
+ currentOpenFileReferences prometheus.Gauge
104
+ totalOpenFileReferences prometheus.Counter
105
+
106
+ currentOpenFiles prometheus.Gauge
107
+ totalOpenedFiles prometheus.Counter
108
+
109
+ currentCacheSize prometheus.Gauge
110
+ totalCacheSize prometheus.Counter
111
+ }
112
+
113
+ type cacheEntryValue struct {
114
+ dir fs.FS
115
+ size int64
46
116
}
47
117
48
118
type cacheEntry struct {
49
119
// refCount must only be accessed while the Cache lock is held.
50
120
refCount int
51
- value * lazy.ValueWithError [fs. FS ]
121
+ value * lazy.ValueWithError [cacheEntryValue ]
52
122
}
53
123
54
- type fetcher func (context.Context , uuid.UUID ) (fs.FS , error )
124
+ type fetcher func (context.Context , uuid.UUID ) (dir fs.FS , size int64 , err error )
55
125
56
126
// Acquire will load the fs.FS for the given file. It guarantees that parallel
57
127
// calls for the same fileID will only result in one fetch, and that parallel
@@ -67,26 +137,41 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) {
67
137
if err != nil {
68
138
c .Release (fileID )
69
139
}
70
- return it , err
140
+ return it . dir , err
71
141
}
72
142
73
- func (c * Cache ) prepare (ctx context.Context , fileID uuid.UUID ) * lazy.ValueWithError [fs. FS ] {
143
+ func (c * Cache ) prepare (ctx context.Context , fileID uuid.UUID ) * lazy.ValueWithError [cacheEntryValue ] {
74
144
c .lock .Lock ()
75
145
defer c .lock .Unlock ()
76
146
77
147
entry , ok := c .data [fileID ]
78
148
if ! ok {
79
- value := lazy .NewWithError (func () (fs.FS , error ) {
80
- return c .fetcher (ctx , fileID )
149
+ value := lazy .NewWithError (func () (cacheEntryValue , error ) {
150
+ dir , size , err := c .fetcher (ctx , fileID )
151
+
152
+ // Always add to the cache size the bytes of the file loaded.
153
+ if err == nil {
154
+ c .currentCacheSize .Add (float64 (size ))
155
+ c .totalCacheSize .Add (float64 (size ))
156
+ }
157
+
158
+ return cacheEntryValue {
159
+ dir : dir ,
160
+ size : size ,
161
+ }, err
81
162
})
82
163
83
164
entry = & cacheEntry {
84
165
value : value ,
85
166
refCount : 0 ,
86
167
}
87
168
c .data [fileID ] = entry
169
+ c .currentOpenFiles .Inc ()
170
+ c .totalOpenedFiles .Inc ()
88
171
}
89
172
173
+ c .currentOpenFileReferences .Inc ()
174
+ c .totalOpenFileReferences .Inc ()
90
175
entry .refCount ++
91
176
return entry .value
92
177
}
@@ -105,11 +190,19 @@ func (c *Cache) Release(fileID uuid.UUID) {
105
190
return
106
191
}
107
192
193
+ c .currentOpenFileReferences .Dec ()
108
194
entry .refCount --
109
195
if entry .refCount > 0 {
110
196
return
111
197
}
112
198
199
+ c .currentOpenFiles .Dec ()
200
+
201
+ ev , err := entry .value .Load ()
202
+ if err == nil {
203
+ c .currentCacheSize .Add (- 1 * float64 (ev .size ))
204
+ }
205
+
113
206
delete (c .data , fileID )
114
207
}
115
208
0 commit comments