From 477fef95040c029d0b2945fb995e9b6b997b48be Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 17:58:22 +0100 Subject: [PATCH 01/19] docs: Prometheus metrics --- docs/admin/prometheus.md | 50 +++++++++++++++++++++++++++++++++++++ docs/images/icons/speed.svg | 1 + docs/manifest.json | 6 +++++ 3 files changed, 57 insertions(+) create mode 100644 docs/admin/prometheus.md create mode 100644 docs/images/icons/speed.svg diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md new file mode 100644 index 0000000000000..c46d1ae4da6e9 --- /dev/null +++ b/docs/admin/prometheus.md @@ -0,0 +1,50 @@ +# Prometheus + +Coder has support for Prometheus metrics using the dedicated [Go client library](github.com/prometheus/client_golang). The library exposes various [metrics types](https://prometheus.io/docs/concepts/metric_types/), such as gauges, histograms, and timers, that give insight into the live Coder deployment. + +Feel free to browse through the [Getting started](https://prometheus.io/docs/prometheus/latest/getting_started/) guide, if you don't have an installation of the Prometheus server. + +## Enable Prometheus metrics + +Coder server exports metrics via the HTTP endpoint, which can be enabled using either the environment variable `CODER_PROMETHEUS_ENABLE` or the flag` --prometheus-enable`. + +Use either the environment variable `CODER_PROMETHEUS_ADDRESS` or the flag ` --prometheus-address :` to select a custom endpoint. + +Once the `code server --prometheus-enable` is started, you can preview the metrics endpoint: http://localhost:2112/ (default endpoint). + +``` +# HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour. +# TYPE coderd_api_active_users_duration_hour gauge +coderd_api_active_users_duration_hour 0 +# HELP coderd_api_concurrent_requests The number of concurrent API requests +# TYPE coderd_api_concurrent_requests gauge +coderd_api_concurrent_requests 2 +# HELP coderd_api_concurrent_websockets The total number of concurrent API websockets +# TYPE coderd_api_concurrent_websockets gauge +coderd_api_concurrent_websockets 1 +# HELP coderd_api_request_latencies_ms Latency distribution of requests in milliseconds +# TYPE coderd_api_request_latencies_ms histogram +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="1"} 10 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="5"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="10"} 14 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="25"} 15 +... +``` + +## Explore collected metrics + +### Coderd + +[Coderd](../about/architecture.md#coderd) is the service responsible for managing workspaces, provisioners, and users. Coder resources are controlled using the authorized HTTP API - Coderd API. + +The Prometheus collector tracks and exposes activity statistics for [platform users](https://github.com/coder/coder/blob/main/coderd/prometheusmetrics/prometheusmetrics.go#L15-L54) and [workspace](https://github.com/coder/coder/blob/main/coderd/prometheusmetrics/prometheusmetrics.go#L57-L108). + +It also exposes [operational metrics](https://github.com/coder/coder/blob/main/coderd/httpmw/prometheus.go#L21-L61) for HTTP requests and WebSocket connections, including a total number of calls, HTTP status, active WebSockets, request duration, etc. + +### Provisionerd + +[Provisionerd](../about/architecture.md#provisionerd) is the execution context for infrastructure providers. The runner exposes [statistics for executed jobs](https://github.com/coder/coder/blob/main/provisionerd/provisionerd.go#L133-L154) - a number of jobs currently running, and execution timings. + +### Go runtime, process stats + +[Common collectors](https://github.com/coder/coder/blob/main/cli/server.go#L555-L556) monitor the Go runtime - memory usage, garbage collection, active threads, goroutines, etc. Additionally, on Linux and on Windows, they collect CPU stats, memory, file descriptors, and process uptime. diff --git a/docs/images/icons/speed.svg b/docs/images/icons/speed.svg new file mode 100644 index 0000000000000..a7e07f36c030a --- /dev/null +++ b/docs/images/icons/speed.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/manifest.json b/docs/manifest.json index bac69202bbf1a..433571a3cb668 100644 --- a/docs/manifest.json +++ b/docs/manifest.json @@ -274,6 +274,12 @@ "path": "./admin/high-availability.md", "state": "enterprise" }, + { + "title": "Prometheus", + "description": "Learn how to collect Prometheus metrics", + "icon_path": "./images/icons/speed.svg", + "path": "./admin/prometheus.md" + }, { "title": "Telemetry", "description": "Learn what usage telemetry Coder collects", From 887ca4eeef92544ece55fa38a61297771dea388f Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:04:18 +0100 Subject: [PATCH 02/19] Fix --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index c46d1ae4da6e9..4373005137fa9 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -31,7 +31,7 @@ coderd_api_request_latencies_ms_bucket{method="GET",path="",le="25"} 15 ... ``` -## Explore collected metrics +## Available collectors ### Coderd From 663d2e0f64a2fd7373d5a422e1adc0d26c99cfb3 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:05:54 +0100 Subject: [PATCH 03/19] Typo --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index 4373005137fa9..32732d862e9f3 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -10,7 +10,7 @@ Coder server exports metrics via the HTTP endpoint, which can be enabled using e Use either the environment variable `CODER_PROMETHEUS_ADDRESS` or the flag ` --prometheus-address :` to select a custom endpoint. -Once the `code server --prometheus-enable` is started, you can preview the metrics endpoint: http://localhost:2112/ (default endpoint). +For `code server --prometheus-enable` is started locally, you can preview the metrics endpoint: http://localhost:2112/ (default endpoint). ``` # HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour. From ce7a623f2b680135f95d26f207fa0e64cced9291 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:06:12 +0100 Subject: [PATCH 04/19] Typo --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index 32732d862e9f3..fe8efe6ada113 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -10,7 +10,7 @@ Coder server exports metrics via the HTTP endpoint, which can be enabled using e Use either the environment variable `CODER_PROMETHEUS_ADDRESS` or the flag ` --prometheus-address :` to select a custom endpoint. -For `code server --prometheus-enable` is started locally, you can preview the metrics endpoint: http://localhost:2112/ (default endpoint). +For `coder server --prometheus-enable` is started locally, you can preview the metrics endpoint: http://localhost:2112/ (default endpoint). ``` # HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour. From d93b2fdcd0f7a134116c73e25bc6a340c725e05e Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:06:38 +0100 Subject: [PATCH 05/19] Typo --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index fe8efe6ada113..30b8c317e9951 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -10,7 +10,7 @@ Coder server exports metrics via the HTTP endpoint, which can be enabled using e Use either the environment variable `CODER_PROMETHEUS_ADDRESS` or the flag ` --prometheus-address :` to select a custom endpoint. -For `coder server --prometheus-enable` is started locally, you can preview the metrics endpoint: http://localhost:2112/ (default endpoint). +If `coder server --prometheus-enable` is started locally, you can preview the metrics endpoint: http://localhost:2112/ (default endpoint). ``` # HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour. From 08edfc5518f39cd064bb3e3584919e9b85423c6c Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:10:40 +0100 Subject: [PATCH 06/19] Fix: link --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index 30b8c317e9951..a321fb8f0c0f2 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -1,6 +1,6 @@ # Prometheus -Coder has support for Prometheus metrics using the dedicated [Go client library](github.com/prometheus/client_golang). The library exposes various [metrics types](https://prometheus.io/docs/concepts/metric_types/), such as gauges, histograms, and timers, that give insight into the live Coder deployment. +Coder has support for Prometheus metrics using the dedicated [Go client library](https://github.com/prometheus/client_golang). The library exposes various [metrics types](https://prometheus.io/docs/concepts/metric_types/), such as gauges, histograms, and timers, that give insight into the live Coder deployment. Feel free to browse through the [Getting started](https://prometheus.io/docs/prometheus/latest/getting_started/) guide, if you don't have an installation of the Prometheus server. From a7b903f02d2b3e4d69a6727cc6b0c0f911c00ab9 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:32:02 +0100 Subject: [PATCH 07/19] Update docs/admin/prometheus.md Co-authored-by: Dean Sheather --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index a321fb8f0c0f2..5457109eb3f3a 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -6,7 +6,7 @@ Feel free to browse through the [Getting started](https://prometheus.io/docs/pro ## Enable Prometheus metrics -Coder server exports metrics via the HTTP endpoint, which can be enabled using either the environment variable `CODER_PROMETHEUS_ENABLE` or the flag` --prometheus-enable`. +Coder server exports metrics via the HTTP endpoint, which can be enabled using either the environment variable `CODER_PROMETHEUS_ENABLE` or the flag `--prometheus-enable`. Use either the environment variable `CODER_PROMETHEUS_ADDRESS` or the flag ` --prometheus-address :` to select a custom endpoint. From 553c799d7a427b4ca6ba920a52fc704c144016a9 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:32:19 +0100 Subject: [PATCH 08/19] Update docs/admin/prometheus.md Co-authored-by: Dean Sheather --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index 5457109eb3f3a..bc6de85962e33 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -1,6 +1,6 @@ # Prometheus -Coder has support for Prometheus metrics using the dedicated [Go client library](https://github.com/prometheus/client_golang). The library exposes various [metrics types](https://prometheus.io/docs/concepts/metric_types/), such as gauges, histograms, and timers, that give insight into the live Coder deployment. +Coder exposes many metrics which can be consumed by a Prometheus server, and give insight into the current state of a live Coder deployment. Feel free to browse through the [Getting started](https://prometheus.io/docs/prometheus/latest/getting_started/) guide, if you don't have an installation of the Prometheus server. From 483f9bc39dc1f8ad50882b27bced5b4f109b01a7 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:32:38 +0100 Subject: [PATCH 09/19] Update docs/admin/prometheus.md Co-authored-by: Dean Sheather --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index bc6de85962e33..5ac2efbb55187 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -8,7 +8,7 @@ Feel free to browse through the [Getting started](https://prometheus.io/docs/pro Coder server exports metrics via the HTTP endpoint, which can be enabled using either the environment variable `CODER_PROMETHEUS_ENABLE` or the flag `--prometheus-enable`. -Use either the environment variable `CODER_PROMETHEUS_ADDRESS` or the flag ` --prometheus-address :` to select a custom endpoint. +The Prometheus endpoint address is `http://localhost:2112/` by default. You can use either the environment variable `CODER_PROMETHEUS_ADDRESS` or the flag ` --prometheus-address :` to select a different listen address. If `coder server --prometheus-enable` is started locally, you can preview the metrics endpoint: http://localhost:2112/ (default endpoint). From 77d9feccbea31d9f0a6d5dbec318cf4179ef9c6d Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:33:00 +0100 Subject: [PATCH 10/19] Update docs/admin/prometheus.md Co-authored-by: Dean Sheather --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index 5ac2efbb55187..8846d57ff9dd6 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -10,7 +10,7 @@ Coder server exports metrics via the HTTP endpoint, which can be enabled using e The Prometheus endpoint address is `http://localhost:2112/` by default. You can use either the environment variable `CODER_PROMETHEUS_ADDRESS` or the flag ` --prometheus-address :` to select a different listen address. -If `coder server --prometheus-enable` is started locally, you can preview the metrics endpoint: http://localhost:2112/ (default endpoint). +If `coder server --prometheus-enable` is started locally, you can preview the metrics endpoint in your browser or by using curl: http://localhost:2112/. ``` # HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour. From 22908567251fb14fe40cf0afd4ec0ef0cc643d86 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:33:15 +0100 Subject: [PATCH 11/19] Update docs/admin/prometheus.md Co-authored-by: Dean Sheather --- docs/admin/prometheus.md | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index 8846d57ff9dd6..8b5324525adb8 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -13,21 +13,10 @@ The Prometheus endpoint address is `http://localhost:2112/` by default. You can If `coder server --prometheus-enable` is started locally, you can preview the metrics endpoint in your browser or by using curl: http://localhost:2112/. ``` +$ curl http://localhost:2112/ # HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour. # TYPE coderd_api_active_users_duration_hour gauge coderd_api_active_users_duration_hour 0 -# HELP coderd_api_concurrent_requests The number of concurrent API requests -# TYPE coderd_api_concurrent_requests gauge -coderd_api_concurrent_requests 2 -# HELP coderd_api_concurrent_websockets The total number of concurrent API websockets -# TYPE coderd_api_concurrent_websockets gauge -coderd_api_concurrent_websockets 1 -# HELP coderd_api_request_latencies_ms Latency distribution of requests in milliseconds -# TYPE coderd_api_request_latencies_ms histogram -coderd_api_request_latencies_ms_bucket{method="GET",path="",le="1"} 10 -coderd_api_request_latencies_ms_bucket{method="GET",path="",le="5"} 13 -coderd_api_request_latencies_ms_bucket{method="GET",path="",le="10"} 14 -coderd_api_request_latencies_ms_bucket{method="GET",path="",le="25"} 15 ... ``` From 8d89937a4edb8b2a91ffc274015dbe5955fb5dc7 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 18:34:14 +0100 Subject: [PATCH 12/19] Rephrase --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index 8b5324525adb8..317ee683c6892 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -2,7 +2,7 @@ Coder exposes many metrics which can be consumed by a Prometheus server, and give insight into the current state of a live Coder deployment. -Feel free to browse through the [Getting started](https://prometheus.io/docs/prometheus/latest/getting_started/) guide, if you don't have an installation of the Prometheus server. +If you don't have an Prometheus server installed, you can follow the Prometheus [Getting started](https://prometheus.io/docs/prometheus/latest/getting_started/) guide. ## Enable Prometheus metrics From 69d97a62421a8af5bdc0f8baea61552514082ea0 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 28 Nov 2022 22:16:00 +0100 Subject: [PATCH 13/19] notice --- docs/admin/prometheus.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index 317ee683c6892..2526032f22b54 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -10,6 +10,8 @@ Coder server exports metrics via the HTTP endpoint, which can be enabled using e The Prometheus endpoint address is `http://localhost:2112/` by default. You can use either the environment variable `CODER_PROMETHEUS_ADDRESS` or the flag ` --prometheus-address :` to select a different listen address. +__Notice__: Prometheus endpoint is not supported by the official Coder Helm chart yet. + If `coder server --prometheus-enable` is started locally, you can preview the metrics endpoint in your browser or by using curl: http://localhost:2112/. ``` From a29dfae05b04a05afe0d43510d52915c23e6931d Mon Sep 17 00:00:00 2001 From: Geoffrey Huntley Date: Tue, 29 Nov 2022 11:34:40 +1000 Subject: [PATCH 14/19] use ```shell --- docs/admin/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index 2526032f22b54..f771190438911 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -14,7 +14,7 @@ __Notice__: Prometheus endpoint is not supported by the official Coder Helm char If `coder server --prometheus-enable` is started locally, you can preview the metrics endpoint in your browser or by using curl: http://localhost:2112/. -``` +```shell $ curl http://localhost:2112/ # HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour. # TYPE coderd_api_active_users_duration_hour gauge From 15d58a30b63122ce8b6af6a93cb380260773af74 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Tue, 29 Nov 2022 15:16:00 +0100 Subject: [PATCH 15/19] Generator --- Makefile | 9 +- docs/admin/prometheus.md | 63 +++- scripts/metricsdocgen/main.go | 160 ++++++++++ scripts/metricsdocgen/metrics | 576 ++++++++++++++++++++++++++++++++++ 4 files changed, 789 insertions(+), 19 deletions(-) create mode 100644 scripts/metricsdocgen/main.go create mode 100644 scripts/metricsdocgen/metrics diff --git a/Makefile b/Makefile index 02d71810c9165..6971b138bcda8 100644 --- a/Makefile +++ b/Makefile @@ -399,13 +399,14 @@ gen: \ coderd/database/querier.go \ provisionersdk/proto/provisioner.pb.go \ provisionerd/proto/provisionerd.pb.go \ - site/src/api/typesGenerated.ts + site/src/api/typesGenerated.ts \ + docs/admin/prometheus.md .PHONY: gen # Mark all generated files as fresh so make thinks they're up-to-date. This is # used during releases so we don't run generation scripts. gen/mark-fresh: - files="coderd/database/dump.sql coderd/database/querier.go provisionersdk/proto/provisioner.pb.go provisionerd/proto/provisionerd.pb.go site/src/api/typesGenerated.ts" + files="coderd/database/dump.sql coderd/database/querier.go provisionersdk/proto/provisioner.pb.go provisionerd/proto/provisionerd.pb.go site/src/api/typesGenerated.ts docs/admin/prometheus.md" for file in $$files; do echo "$$file" if [ ! -f "$$file" ]; then @@ -448,6 +449,10 @@ site/src/api/typesGenerated.ts: scripts/apitypings/main.go $(shell find codersdk cd site yarn run format:types +docs/admin/prometheus.md: scripts/metricsdocgen/main.go + go run scripts/metricsdocgen/main.go +.PHONY: docs/admin/prometheus.md # As the .md file can be edited manually and the generator works in-place, we need to use .PHONY. + update-golden-files: cli/testdata/.gen-golden .PHONY: update-golden-files diff --git a/docs/admin/prometheus.md b/docs/admin/prometheus.md index f771190438911..76f48e07c4909 100644 --- a/docs/admin/prometheus.md +++ b/docs/admin/prometheus.md @@ -22,20 +22,49 @@ coderd_api_active_users_duration_hour 0 ... ``` -## Available collectors - -### Coderd - -[Coderd](../about/architecture.md#coderd) is the service responsible for managing workspaces, provisioners, and users. Coder resources are controlled using the authorized HTTP API - Coderd API. - -The Prometheus collector tracks and exposes activity statistics for [platform users](https://github.com/coder/coder/blob/main/coderd/prometheusmetrics/prometheusmetrics.go#L15-L54) and [workspace](https://github.com/coder/coder/blob/main/coderd/prometheusmetrics/prometheusmetrics.go#L57-L108). - -It also exposes [operational metrics](https://github.com/coder/coder/blob/main/coderd/httpmw/prometheus.go#L21-L61) for HTTP requests and WebSocket connections, including a total number of calls, HTTP status, active WebSockets, request duration, etc. - -### Provisionerd - -[Provisionerd](../about/architecture.md#provisionerd) is the execution context for infrastructure providers. The runner exposes [statistics for executed jobs](https://github.com/coder/coder/blob/main/provisionerd/provisionerd.go#L133-L154) - a number of jobs currently running, and execution timings. - -### Go runtime, process stats - -[Common collectors](https://github.com/coder/coder/blob/main/cli/server.go#L555-L556) monitor the Go runtime - memory usage, garbage collection, active threads, goroutines, etc. Additionally, on Linux and on Windows, they collect CPU stats, memory, file descriptors, and process uptime. +## Available metrics + + + +| Name | Type | Description | Labels | +| - | - | - | - | +| `coderd_api_active_users_duration_hour` | gauge | The number of users that have been active within the last hour. | | +| `coderd_api_concurrent_requests` | gauge | The number of concurrent API requests | | +| `coderd_api_concurrent_websockets` | gauge | The total number of concurrent API websockets | | +| `coderd_api_request_latencies_ms` | histogram | Latency distribution of requests in milliseconds | `method` `path` | +| `coderd_api_requests_processed_total` | counter | The total number of processed API requests | `code` `method` `path` | +| `coderd_api_websocket_durations_ms` | histogram | Websocket duration distribution of requests in milliseconds | `path` | +| `coderd_api_workspace_latest_build_total` | gauge | The latest workspace builds with a status. | `status` | +| `coderd_provisionerd_job_timings_ms` | histogram | | `provisioner` `status` | +| `coderd_provisionerd_jobs_current` | gauge | | `provisioner` | +| `go_gc_duration_seconds` | summary | A summary of the pause duration of garbage collection cycles. | | +| `go_goroutines` | gauge | Number of goroutines that currently exist. | | +| `go_info` | gauge | Information about the Go environment. | `version` | +| `go_memstats_alloc_bytes` | gauge | Number of bytes allocated and still in use. | | +| `go_memstats_alloc_bytes_total` | counter | Total number of bytes allocated, even if freed. | | +| `go_memstats_buck_hash_sys_bytes` | gauge | Number of bytes used by the profiling bucket hash table. | | +| `go_memstats_frees_total` | counter | Total number of frees. | | +| `go_memstats_gc_sys_bytes` | gauge | Number of bytes used for garbage collection system metadata. | | +| `go_memstats_heap_alloc_bytes` | gauge | Number of heap bytes allocated and still in use. | | +| `go_memstats_heap_idle_bytes` | gauge | Number of heap bytes waiting to be used. | | +| `go_memstats_heap_inuse_bytes` | gauge | Number of heap bytes that are in use. | | +| `go_memstats_heap_objects` | gauge | Number of allocated objects. | | +| `go_memstats_heap_released_bytes` | gauge | Number of heap bytes released to OS. | | +| `go_memstats_heap_sys_bytes` | gauge | Number of heap bytes obtained from system. | | +| `go_memstats_last_gc_time_seconds` | gauge | Number of seconds since 1970 of last garbage collection. | | +| `go_memstats_lookups_total` | counter | Total number of pointer lookups. | | +| `go_memstats_mallocs_total` | counter | Total number of mallocs. | | +| `go_memstats_mcache_inuse_bytes` | gauge | Number of bytes in use by mcache structures. | | +| `go_memstats_mcache_sys_bytes` | gauge | Number of bytes used for mcache structures obtained from system. | | +| `go_memstats_mspan_inuse_bytes` | gauge | Number of bytes in use by mspan structures. | | +| `go_memstats_mspan_sys_bytes` | gauge | Number of bytes used for mspan structures obtained from system. | | +| `go_memstats_next_gc_bytes` | gauge | Number of heap bytes when next garbage collection will take place. | | +| `go_memstats_other_sys_bytes` | gauge | Number of bytes used for other system allocations. | | +| `go_memstats_stack_inuse_bytes` | gauge | Number of bytes in use by the stack allocator. | | +| `go_memstats_stack_sys_bytes` | gauge | Number of bytes obtained from system for stack allocator. | | +| `go_memstats_sys_bytes` | gauge | Number of bytes obtained from system. | | +| `go_threads` | gauge | Number of OS threads created. | | +| `promhttp_metric_handler_requests_in_flight` | gauge | Current number of scrapes being served. | | +| `promhttp_metric_handler_requests_total` | counter | Total number of scrapes by HTTP status code. | `code` | + + diff --git a/scripts/metricsdocgen/main.go b/scripts/metricsdocgen/main.go new file mode 100644 index 0000000000000..b9318f4afaba9 --- /dev/null +++ b/scripts/metricsdocgen/main.go @@ -0,0 +1,160 @@ +package main + +import ( + "bytes" + "errors" + "flag" + "io" + "log" + "os" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "golang.org/x/xerrors" +) + +var ( + metricsFile string + prometheusDocFile string + dryRun bool + + generatorPrefix = []byte("") + generatorSuffix = []byte("") +) + +func init() { + flag.StringVar(&metricsFile, "metrics-file", "scripts/metricsdocgen/metrics", "Path to Prometheus metrics file") + flag.StringVar(&prometheusDocFile, "prometheus-doc-file", "docs/admin/prometheus.md", "Path to prometheus doc file") + flag.BoolVar(&dryRun, "dry-run", false, "Dry run") + flag.Parse() +} + +func main() { + metrics, err := readMetrics() + if err != nil { + log.Fatal("can't read metrics: ", err) + } + + doc, err := readPrometheusDoc() + if err != nil { + log.Fatal("can't read Prometheus doc: ", err) + } + + doc, err = updatePrometheusDoc(doc, metrics) + if err != nil { + log.Fatal("can't update Prometheus doc: ", err) + } + + if dryRun { + log.Println(string(doc)) + return + } + + err = writePrometheusDoc(doc) + if err != nil { + log.Fatal("can't write updated Prometheus doc: ", err) + } +} + +func readMetrics() ([]dto.MetricFamily, error) { + f, err := os.Open(metricsFile) + if err != nil { + log.Fatalf("can't open metrics file: %s", metricsFile) + } + + var metrics []dto.MetricFamily + + decoder := expfmt.NewDecoder(f, expfmt.FmtProtoText) + for { + var m dto.MetricFamily + err = decoder.Decode(&m) + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + sort.Slice(metrics, func(i, j int) bool { + return sort.StringsAreSorted([]string{*metrics[i].Name, *metrics[j].Name}) + }) + return metrics, nil +} + +func readPrometheusDoc() ([]byte, error) { + doc, err := os.ReadFile(prometheusDocFile) + if err != nil { + return nil, err + } + return doc, nil +} + +func updatePrometheusDoc(doc []byte, metricFamilies []dto.MetricFamily) ([]byte, error) { + i := bytes.Index(doc, generatorPrefix) + if i < 0 { + return nil, xerrors.New("generator prefix tag not found") + } + tableStartIndex := i + len(generatorPrefix) + 1 + + j := bytes.Index(doc[tableStartIndex:], generatorSuffix) + if j < 0 { + return nil, xerrors.New("generator suffix tag not found") + } + tableEndIndex := tableStartIndex + j + + var buffer bytes.Buffer + buffer.Write(doc[:tableStartIndex]) + buffer.WriteByte('\n') + + buffer.WriteString("| Name | Type | Description | Labels |\n") + buffer.WriteString("| - | - | - | - |\n") + for _, mf := range metricFamilies { + buffer.WriteString("| ") + buffer.Write([]byte("`" + *mf.Name + "`")) + buffer.WriteString(" | ") + buffer.Write([]byte(strings.ToLower(mf.Type.String()))) + buffer.WriteString(" | ") + if mf.Help != nil { + buffer.Write([]byte(*mf.Help)) + } + buffer.WriteString(" | ") + + labels := map[string]struct{}{} + metrics := mf.GetMetric() + for _, m := range metrics { + for _, label := range m.Label { + labels["`"+*label.Name+"`"] = struct{}{} + } + } + + if len(labels) > 0 { + buffer.WriteString(strings.Join(sortedKeys(labels), " ")) + } + + buffer.WriteString(" |\n") + } + + buffer.WriteByte('\n') + buffer.Write(doc[tableEndIndex:]) + return buffer.Bytes(), nil +} + +func writePrometheusDoc(doc []byte) error { + err := os.WriteFile(prometheusDocFile, doc, 0644) + if err != nil { + return err + } + return nil +} + +func sortedKeys(m map[string]struct{}) []string { + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} diff --git a/scripts/metricsdocgen/metrics b/scripts/metricsdocgen/metrics new file mode 100644 index 0000000000000..439533ce45820 --- /dev/null +++ b/scripts/metricsdocgen/metrics @@ -0,0 +1,576 @@ +# HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour. +# TYPE coderd_api_active_users_duration_hour gauge +coderd_api_active_users_duration_hour 0 +# HELP coderd_api_concurrent_requests The number of concurrent API requests +# TYPE coderd_api_concurrent_requests gauge +coderd_api_concurrent_requests 1 +# HELP coderd_api_concurrent_websockets The total number of concurrent API websockets +# TYPE coderd_api_concurrent_websockets gauge +coderd_api_concurrent_websockets 1 +# HELP coderd_api_request_latencies_ms Latency distribution of requests in milliseconds +# TYPE coderd_api_request_latencies_ms histogram +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="1"} 10 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="5"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="10"} 14 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="25"} 15 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="50"} 15 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="100"} 15 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="500"} 15 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="1000"} 15 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="5000"} 15 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="10000"} 15 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="30000"} 16 +coderd_api_request_latencies_ms_bucket{method="GET",path="",le="+Inf"} 16 +coderd_api_request_latencies_ms_sum{method="GET",path=""} 14914.475126 +coderd_api_request_latencies_ms_count{method="GET",path=""} 16 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="5"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="10"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/applications/host/",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/applications/host/"} 1.403417 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/applications/host/"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="1"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="5"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="10"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="25"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="50"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="100"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="500"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="1000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="5000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="10000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="30000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/buildinfo/",le="+Inf"} 5 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/buildinfo/"} 0.579417 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/buildinfo/"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="1"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="5"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="10"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="25"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="50"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="100"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="500"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="1000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="5000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="10000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="30000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/entitlements",le="+Inf"} 5 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/entitlements"} 0.479 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/entitlements"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="1"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="5"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="10"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="25"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="50"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="100"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="500"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="1000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="5000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="10000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="30000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/organizations/*",le="+Inf"} 2 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/organizations/*"} 0.9122079999999999 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/organizations/*"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="10"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/templates/{template}/",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/templates/{template}/"} 5.077833 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/templates/{template}/"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="1"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="5"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="10"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/authmethods",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/users/authmethods"} 0.191791 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/users/authmethods"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="1"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="5"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="10"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="25"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="50"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="100"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="500"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="1000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="5000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="10000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="30000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/first",le="+Inf"} 2 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/users/first"} 2.0617080000000003 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/users/first"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="5"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="10"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/users/{user}"} 2.014417 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/users/{user}"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="10"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/users/{user}/"} 9.146291 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/users/{user}/"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="1"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="5"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="10"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="25"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="50"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="100"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="500"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="1000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="5000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="10000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="30000"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/*",le="+Inf"} 2 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/users/{user}/*"} 2.014167 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/users/{user}/*"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="10"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/"} 22.11575 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="10"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/workspace-quota/{user}/"} 17.540958 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/workspace-quota/{user}/"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="10"} 8 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="25"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="50"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="100"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="500"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="1000"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="5000"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="10000"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="30000"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="+Inf"} 13 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/workspaceagents/me/metadata"} 128.53212800000003 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/workspaceagents/me/metadata"} 13 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="5"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="10"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/workspaces"} 1.406459 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/workspaces"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="10"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="25"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="50"} 3 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="100"} 3 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="500"} 3 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="1000"} 3 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="5000"} 3 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="10000"} 3 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="30000"} 3 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/",le="+Inf"} 3 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/workspaces/"} 72.264375 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/workspaces/"} 3 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="5"} 2 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="10"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="25"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="50"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="100"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="500"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="1000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="5000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="10000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="30000"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="+Inf"} 5 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/workspaces/{workspace}/builds/"} 32.382791999999995 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/workspaces/{workspace}/builds/"} 5 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="10"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="25"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="50"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="100"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="500"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="1000"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="5000"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="10000"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="30000"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/api/v2/workspaces/{workspace}/watch",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/api/v2/workspaces/{workspace}/watch"} 4.690710009291e+06 +coderd_api_request_latencies_ms_count{method="GET",path="/api/v2/workspaces/{workspace}/watch"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="10"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="25"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="50"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="100"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="500"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="1000"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="5000"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="10000"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="30000"} 0 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/",le="+Inf"} 15 +coderd_api_request_latencies_ms_sum{method="GET",path="/derp/"} 3.2032344336794e+07 +coderd_api_request_latencies_ms_count{method="GET",path="/derp/"} 15 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="1"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="5"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="10"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="25"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="50"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="100"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="500"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="1000"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="5000"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="10000"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="30000"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/derp/latency-check",le="+Inf"} 4 +coderd_api_request_latencies_ms_sum{method="GET",path="/derp/latency-check"} 0.905 +coderd_api_request_latencies_ms_count{method="GET",path="/derp/latency-check"} 4 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="1"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="5"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="10"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="GET",path="/healthz",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="GET",path="/healthz"} 0.181458 +coderd_api_request_latencies_ms_count{method="GET",path="/healthz"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="5"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="10"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="25"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="50"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="100"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="500"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="1000"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="5000"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="10000"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="30000"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/authcheck/",le="+Inf"} 2 +coderd_api_request_latencies_ms_sum{method="POST",path="/api/v2/authcheck/"} 7.37975 +coderd_api_request_latencies_ms_count{method="POST",path="/api/v2/authcheck/"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="1"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="5"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="10"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/csp/reports",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="POST",path="/api/v2/csp/reports"} 0.427333 +coderd_api_request_latencies_ms_count{method="POST",path="/api/v2/csp/reports"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="1"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="5"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="10"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="25"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/files",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="POST",path="/api/v2/files"} 0.382875 +coderd_api_request_latencies_ms_count{method="POST",path="/api/v2/files"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="10"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="25"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="50"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="100"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/users/login",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="POST",path="/api/v2/users/login"} 48.730291 +coderd_api_request_latencies_ms_count{method="POST",path="/api/v2/users/login"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="10"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="25"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="50"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="100"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="500"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="1000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="5000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="10000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="30000"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/app-health",le="+Inf"} 1 +coderd_api_request_latencies_ms_sum{method="POST",path="/api/v2/workspaceagents/me/app-health"} 146.071041 +coderd_api_request_latencies_ms_count{method="POST",path="/api/v2/workspaceagents/me/app-health"} 1 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="5"} 55 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="10"} 111 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="25"} 112 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="50"} 112 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="100"} 112 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="500"} 113 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="1000"} 113 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="5000"} 113 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="10000"} 113 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="30000"} 113 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="+Inf"} 113 +coderd_api_request_latencies_ms_sum{method="POST",path="/api/v2/workspaceagents/me/report-stats"} 690.1916720000005 +coderd_api_request_latencies_ms_count{method="POST",path="/api/v2/workspaceagents/me/report-stats"} 113 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="5"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="10"} 12 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="25"} 19 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="50"} 19 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="100"} 19 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="500"} 19 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="1000"} 19 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="5000"} 19 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="10000"} 19 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="30000"} 19 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="+Inf"} 19 +coderd_api_request_latencies_ms_sum{method="POST",path="/api/v2/workspaceagents/me/version"} 175.97979099999998 +coderd_api_request_latencies_ms_count{method="POST",path="/api/v2/workspaceagents/me/version"} 19 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="1"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="5"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="10"} 0 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="25"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="50"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="100"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="500"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="1000"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="5000"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="10000"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="30000"} 2 +coderd_api_request_latencies_ms_bucket{method="POST",path="/api/v2/workspaces/{workspace}/builds/",le="+Inf"} 2 +coderd_api_request_latencies_ms_sum{method="POST",path="/api/v2/workspaces/{workspace}/builds/"} 39.575375 +coderd_api_request_latencies_ms_count{method="POST",path="/api/v2/workspaces/{workspace}/builds/"} 2 +# HELP coderd_api_requests_processed_total The total number of processed API requests +# TYPE coderd_api_requests_processed_total counter +coderd_api_requests_processed_total{code="0",method="GET",path="/derp/"} 15 +coderd_api_requests_processed_total{code="101",method="GET",path="/api/v2/workspaceagents/me/coordinate"} 12 +coderd_api_requests_processed_total{code="200",method="GET",path=""} 16 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/applications/host/"} 1 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/buildinfo/"} 5 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/entitlements"} 5 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/templates/{template}/"} 1 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/users/authmethods"} 1 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/users/first"} 2 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/users/{user}/"} 1 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/"} 1 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/workspace-quota/{user}/"} 1 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/workspaceagents/me/metadata"} 13 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/workspaces/"} 3 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/workspaces/{workspace}/builds/"} 5 +coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/workspaces/{workspace}/watch"} 1 +coderd_api_requests_processed_total{code="200",method="GET",path="/derp/latency-check"} 4 +coderd_api_requests_processed_total{code="200",method="GET",path="/healthz"} 1 +coderd_api_requests_processed_total{code="200",method="POST",path="/api/v2/authcheck/"} 2 +coderd_api_requests_processed_total{code="200",method="POST",path="/api/v2/csp/reports"} 1 +coderd_api_requests_processed_total{code="200",method="POST",path="/api/v2/workspaceagents/me/app-health"} 1 +coderd_api_requests_processed_total{code="200",method="POST",path="/api/v2/workspaceagents/me/report-stats"} 113 +coderd_api_requests_processed_total{code="200",method="POST",path="/api/v2/workspaceagents/me/version"} 13 +coderd_api_requests_processed_total{code="201",method="POST",path="/api/v2/users/login"} 1 +coderd_api_requests_processed_total{code="201",method="POST",path="/api/v2/workspaces/{workspace}/builds/"} 2 +coderd_api_requests_processed_total{code="401",method="GET",path="/api/v2/organizations/*"} 2 +coderd_api_requests_processed_total{code="401",method="GET",path="/api/v2/users/{user}"} 1 +coderd_api_requests_processed_total{code="401",method="GET",path="/api/v2/users/{user}/*"} 2 +coderd_api_requests_processed_total{code="401",method="GET",path="/api/v2/workspaces"} 1 +coderd_api_requests_processed_total{code="401",method="POST",path="/api/v2/files"} 1 +coderd_api_requests_processed_total{code="500",method="POST",path="/api/v2/workspaceagents/me/version"} 6 +# HELP coderd_api_websocket_durations_ms Websocket duration distribution of requests in milliseconds +# TYPE coderd_api_websocket_durations_ms histogram +coderd_api_websocket_durations_ms_bucket{path="/api/v2/workspaceagents/me/coordinate",le="1"} 0 +coderd_api_websocket_durations_ms_bucket{path="/api/v2/workspaceagents/me/coordinate",le="1000"} 0 +coderd_api_websocket_durations_ms_bucket{path="/api/v2/workspaceagents/me/coordinate",le="60000"} 3 +coderd_api_websocket_durations_ms_bucket{path="/api/v2/workspaceagents/me/coordinate",le="3.6e+06"} 10 +coderd_api_websocket_durations_ms_bucket{path="/api/v2/workspaceagents/me/coordinate",le="5.4e+07"} 12 +coderd_api_websocket_durations_ms_bucket{path="/api/v2/workspaceagents/me/coordinate",le="1.08e+08"} 12 +coderd_api_websocket_durations_ms_bucket{path="/api/v2/workspaceagents/me/coordinate",le="+Inf"} 12 +coderd_api_websocket_durations_ms_sum{path="/api/v2/workspaceagents/me/coordinate"} 3.1344549662249e+07 +coderd_api_websocket_durations_ms_count{path="/api/v2/workspaceagents/me/coordinate"} 12 +# HELP coderd_api_workspace_latest_build_total The latest workspace builds with a status. +# TYPE coderd_api_workspace_latest_build_total gauge +coderd_api_workspace_latest_build_total{status="failed"} 1 +coderd_api_workspace_latest_build_total{status="succeeded"} 5 +# HELP coderd_provisionerd_job_timings_ms +# TYPE coderd_provisionerd_job_timings_ms histogram +coderd_provisionerd_job_timings_ms_bucket{provisioner="terraform",status="success",le="1000"} 0 +coderd_provisionerd_job_timings_ms_bucket{provisioner="terraform",status="success",le="10000"} 1 +coderd_provisionerd_job_timings_ms_bucket{provisioner="terraform",status="success",le="30000"} 2 +coderd_provisionerd_job_timings_ms_bucket{provisioner="terraform",status="success",le="60000"} 2 +coderd_provisionerd_job_timings_ms_bucket{provisioner="terraform",status="success",le="300000"} 2 +coderd_provisionerd_job_timings_ms_bucket{provisioner="terraform",status="success",le="600000"} 2 +coderd_provisionerd_job_timings_ms_bucket{provisioner="terraform",status="success",le="1.8e+06"} 2 +coderd_provisionerd_job_timings_ms_bucket{provisioner="terraform",status="success",le="3.6e+06"} 2 +coderd_provisionerd_job_timings_ms_bucket{provisioner="terraform",status="success",le="+Inf"} 2 +coderd_provisionerd_job_timings_ms_sum{provisioner="terraform",status="success"} 21600 +coderd_provisionerd_job_timings_ms_count{provisioner="terraform",status="success"} 2 +# HELP coderd_provisionerd_jobs_current +# TYPE coderd_provisionerd_jobs_current gauge +coderd_provisionerd_jobs_current{provisioner="terraform"} 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 8.0708e-05 +go_gc_duration_seconds{quantile="0.25"} 0.000212166 +go_gc_duration_seconds{quantile="0.5"} 0.00025625 +go_gc_duration_seconds{quantile="0.75"} 0.000320875 +go_gc_duration_seconds{quantile="1"} 0.008116959 +go_gc_duration_seconds_sum 3.244371009 +go_gc_duration_seconds_count 9054 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 97 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.19"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 1.3981856e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 6.5972069512e+10 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.650133e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 4.1306426e+07 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 2.1621904e+07 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 1.3981856e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 4.50527232e+08 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 2.1659648e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 66703 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 4.47094784e+08 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 4.7218688e+08 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.6697085143449228e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 4.1373129e+07 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 9600 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 274312 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 1.0608e+06 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.5806984e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 1.348195e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.769472e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.769472e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 4.99652984e+08 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 14 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 79 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 From 30fd2d139f51531d5971b27b1c8bf4a19ec1f058 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Tue, 29 Nov 2022 15:20:40 +0100 Subject: [PATCH 16/19] gosec --- scripts/metricsdocgen/main.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/metricsdocgen/main.go b/scripts/metricsdocgen/main.go index b9318f4afaba9..0c6a29f7b2aa6 100644 --- a/scripts/metricsdocgen/main.go +++ b/scripts/metricsdocgen/main.go @@ -143,6 +143,8 @@ func updatePrometheusDoc(doc []byte, metricFamilies []dto.MetricFamily) ([]byte, } func writePrometheusDoc(doc []byte) error { + // G306: Expect WriteFile permissions to be 0600 or less + /* #nosec G306 */ err := os.WriteFile(prometheusDocFile, doc, 0644) if err != nil { return err From 7ce111c0e2771ae05afd9415f6e087387a97dba6 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Tue, 29 Nov 2022 15:25:56 +0100 Subject: [PATCH 17/19] fix: lint --- scripts/metricsdocgen/main.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/scripts/metricsdocgen/main.go b/scripts/metricsdocgen/main.go index 0c6a29f7b2aa6..3b36f2677e4f0 100644 --- a/scripts/metricsdocgen/main.go +++ b/scripts/metricsdocgen/main.go @@ -24,14 +24,12 @@ var ( generatorSuffix = []byte("") ) -func init() { +func main() { flag.StringVar(&metricsFile, "metrics-file", "scripts/metricsdocgen/metrics", "Path to Prometheus metrics file") flag.StringVar(&prometheusDocFile, "prometheus-doc-file", "docs/admin/prometheus.md", "Path to prometheus doc file") flag.BoolVar(&dryRun, "dry-run", false, "Dry run") flag.Parse() -} -func main() { metrics, err := readMetrics() if err != nil { log.Fatal("can't read metrics: ", err) @@ -61,7 +59,7 @@ func main() { func readMetrics() ([]dto.MetricFamily, error) { f, err := os.Open(metricsFile) if err != nil { - log.Fatalf("can't open metrics file: %s", metricsFile) + return nil, xerrors.New("can't open metrics file") } var metrics []dto.MetricFamily From 207711d16e5a199d84fb86d9d413facef242acff Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Tue, 29 Nov 2022 17:30:38 +0100 Subject: [PATCH 18/19] PR comments --- Makefile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6971b138bcda8..5548bc2a59338 100644 --- a/Makefile +++ b/Makefile @@ -449,8 +449,14 @@ site/src/api/typesGenerated.ts: scripts/apitypings/main.go $(shell find codersdk cd site yarn run format:types -docs/admin/prometheus.md: scripts/metricsdocgen/main.go +docs/admin/prometheus.md: scripts/metricsdocgen/main.go scripts/metricsdocgen/metrics go run scripts/metricsdocgen/main.go + cd site +ifdef CI + yarn run format:check +else + yarn run format:write +endif .PHONY: docs/admin/prometheus.md # As the .md file can be edited manually and the generator works in-place, we need to use .PHONY. update-golden-files: cli/testdata/.gen-golden From e112fc485ac7be957fbc28310eb98f859c1e8e97 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Tue, 29 Nov 2022 18:37:01 +0100 Subject: [PATCH 19/19] not needed anymore --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 5548bc2a59338..74ef2743bb001 100644 --- a/Makefile +++ b/Makefile @@ -457,7 +457,6 @@ ifdef CI else yarn run format:write endif -.PHONY: docs/admin/prometheus.md # As the .md file can be edited manually and the generator works in-place, we need to use .PHONY. update-golden-files: cli/testdata/.gen-golden .PHONY: update-golden-files