-
Notifications
You must be signed in to change notification settings - Fork 894
chore: add prometheus monitoring of workspace traffic generation #7583
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
f62cf16
00b9eca
9a9778c
f00b8d7
69bc5ad
57d338d
a1d14df
fb96c4d
59ef445
6734521
4ab2314
d54af33
8c48d2b
cf00b5a
e6917e6
ea71c4f
399e506
ec96a00
a4ecd0c
fe0ecfc
239ba96
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
- Loading branch information
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,6 +15,7 @@ import ( | |
|
||
"github.com/google/uuid" | ||
"github.com/prometheus/client_golang/prometheus" | ||
"github.com/prometheus/client_golang/prometheus/promhttp" | ||
"go.opentelemetry.io/otel/trace" | ||
"golang.org/x/xerrors" | ||
|
||
|
@@ -900,14 +901,14 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd { | |
|
||
func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd { | ||
var ( | ||
tickInterval time.Duration | ||
bytesPerTick int64 | ||
prometheusAddress string | ||
client = &codersdk.Client{} | ||
tracingFlags = &scaletestTracingFlags{} | ||
strategy = &scaletestStrategyFlags{} | ||
cleanupStrategy = &scaletestStrategyFlags{cleanup: true} | ||
output = &scaletestOutputFlags{} | ||
tickInterval time.Duration | ||
bytesPerTick int64 | ||
scaletestPrometheusAddress string | ||
client = &codersdk.Client{} | ||
tracingFlags = &scaletestTracingFlags{} | ||
strategy = &scaletestStrategyFlags{} | ||
cleanupStrategy = &scaletestStrategyFlags{cleanup: true} | ||
output = &scaletestOutputFlags{} | ||
) | ||
|
||
cmd := &clibase.Cmd{ | ||
|
@@ -922,7 +923,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd { | |
metrics := workspacetraffic.NewMetrics(reg, "username", "workspace_name", "agent_name") | ||
|
||
logger := slog.Make(sloghuman.Sink(io.Discard)) | ||
prometheusSrvClose := ServeHandler(ctx, logger, prometheusMetricsHandler(), prometheusAddress, "prometheus") | ||
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), scaletestPrometheusAddress, "prometheus") | ||
defer prometheusSrvClose() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm wondering if we need to add some graceful period before closing to make sure that all relevant metrics are scraped before the tool goes down. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, this would be heavily dependent on the prometheus scrape interval. Simplest is probably to expose it as a parameter to be set by the test operatorl. |
||
|
||
// Bypass rate limiting | ||
|
@@ -1052,11 +1053,11 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd { | |
Value: clibase.DurationOf(&tickInterval), | ||
}, | ||
{ | ||
Flag: "prometheus-address", | ||
Flag: "scaletest-prometheus-address", | ||
Env: "CODER_SCALETEST_PROMETHEUS_ADDRESS", | ||
Default: "0.0.0.0:2112", | ||
Description: "Address on which to expose prometheus metrics.", | ||
Value: clibase.StringOf(&prometheusAddress), | ||
Default: "0.0.0.0:21112", | ||
Description: "Address on which to expose scaletest prometheus metrics.", | ||
Value: clibase.StringOf(&scaletestPrometheusAddress), | ||
}, | ||
} | ||
|
||
|
Uh oh!
There was an error while loading. Please reload this page.