@@ -28,21 +28,26 @@ const (
28
28
29
29
func NewMetrics (reg prometheus.Registerer ) * Metrics {
30
30
return & Metrics {
31
- DispatchedCount : promauto .With (reg ).NewCounterVec (prometheus.CounterOpts {Name : "dispatched_count" , Namespace : ns , Subsystem : subsystem ,
31
+ DispatchedCount : promauto .With (reg ).NewCounterVec (prometheus.CounterOpts {
32
+ Name : "dispatched_count" , Namespace : ns , Subsystem : subsystem ,
32
33
Help : "The count of notifications successfully dispatched." ,
33
34
}, []string {LabelMethod , LabelTemplateID }),
34
- TempFailureCount : promauto .With (reg ).NewCounterVec (prometheus.CounterOpts {Name : "temporary_failures_count" , Namespace : ns , Subsystem : subsystem ,
35
+ TempFailureCount : promauto .With (reg ).NewCounterVec (prometheus.CounterOpts {
36
+ Name : "temporary_failures_count" , Namespace : ns , Subsystem : subsystem ,
35
37
Help : "The count of notifications which failed but have retry attempts remaining." ,
36
38
}, []string {LabelMethod , LabelTemplateID }),
37
- PermFailureCount : promauto .With (reg ).NewCounterVec (prometheus.CounterOpts {Name : "permanent_failures_count" , Namespace : ns , Subsystem : subsystem ,
39
+ PermFailureCount : promauto .With (reg ).NewCounterVec (prometheus.CounterOpts {
40
+ Name : "permanent_failures_count" , Namespace : ns , Subsystem : subsystem ,
38
41
Help : "The count of notifications which failed and have exceeded their retry attempts." ,
39
42
}, []string {LabelMethod , LabelTemplateID }),
40
- RetryCount : promauto .With (reg ).NewCounterVec (prometheus.CounterOpts {Name : "retry_count" , Namespace : ns , Subsystem : subsystem ,
43
+ RetryCount : promauto .With (reg ).NewCounterVec (prometheus.CounterOpts {
44
+ Name : "retry_count" , Namespace : ns , Subsystem : subsystem ,
41
45
Help : "The count of notification dispatch retry attempts." ,
42
46
}, []string {LabelMethod , LabelTemplateID }),
43
47
44
48
// Aggregating on LabelTemplateID as well would cause a cardinality explosion.
45
- QueuedSeconds : promauto .With (reg ).NewHistogramVec (prometheus.HistogramOpts {Name : "queued_seconds" , Namespace : ns , Subsystem : subsystem ,
49
+ QueuedSeconds : promauto .With (reg ).NewHistogramVec (prometheus.HistogramOpts {
50
+ Name : "queued_seconds" , Namespace : ns , Subsystem : subsystem ,
46
51
Buckets : []float64 {0.1 , 1 , 5 , 15 , 30 , 60 , 120 , 300 , 600 , 3600 , 86400 },
47
52
Help : "The time elapsed between a notification being enqueued in the store and retrieved for processing " +
48
53
"(measures the latency of the notifications system). This should generally be within CODER_NOTIFICATIONS_FETCH_INTERVAL " +
@@ -51,13 +56,15 @@ func NewMetrics(reg prometheus.Registerer) *Metrics {
51
56
}, []string {LabelMethod }),
52
57
53
58
// Aggregating on LabelTemplateID as well would cause a cardinality explosion.
54
- DispatcherSendSeconds : promauto .With (reg ).NewHistogramVec (prometheus.HistogramOpts {Name : "dispatcher_send_seconds" , Namespace : ns , Subsystem : subsystem ,
59
+ DispatcherSendSeconds : promauto .With (reg ).NewHistogramVec (prometheus.HistogramOpts {
60
+ Name : "dispatcher_send_seconds" , Namespace : ns , Subsystem : subsystem ,
55
61
Buckets : []float64 {0.001 , 0.05 , 0.1 , 0.5 , 1 , 2 , 5 , 10 , 15 , 30 , 60 , 120 },
56
62
Help : "The time taken to dispatch notifications." ,
57
63
}, []string {LabelMethod }),
58
64
59
65
// Currently no requirement to discriminate between success and failure updates which are pending.
60
- PendingUpdates : promauto .With (reg ).NewGauge (prometheus.GaugeOpts {Name : "pending_updates" , Namespace : ns , Subsystem : subsystem ,
66
+ PendingUpdates : promauto .With (reg ).NewGauge (prometheus.GaugeOpts {
67
+ Name : "pending_updates" , Namespace : ns , Subsystem : subsystem ,
61
68
Help : "The number of updates waiting to be flushed to the store." ,
62
69
}),
63
70
}
0 commit comments