@@ -30,6 +30,7 @@ import (
30
30
"github.com/coder/coder/cryptorand"
31
31
"github.com/coder/coder/scaletest/agentconn"
32
32
"github.com/coder/coder/scaletest/createworkspaces"
33
+ "github.com/coder/coder/scaletest/dashboard"
33
34
"github.com/coder/coder/scaletest/harness"
34
35
"github.com/coder/coder/scaletest/reconnectingpty"
35
36
"github.com/coder/coder/scaletest/workspacebuild"
@@ -47,6 +48,7 @@ func (r *RootCmd) scaletestCmd() *clibase.Cmd {
47
48
},
48
49
Children : []* clibase.Cmd {
49
50
r .scaletestCleanup (),
51
+ r .scaletestDashboard (),
50
52
r .scaletestCreateWorkspaces (),
51
53
r .scaletestWorkspaceTraffic (),
52
54
},
@@ -317,6 +319,30 @@ func (s *scaletestOutputFlags) parse() ([]scaleTestOutput, error) {
317
319
return out , nil
318
320
}
319
321
322
+ type scaletestPrometheusFlags struct {
323
+ Address string
324
+ Wait time.Duration
325
+ }
326
+
327
+ func (s * scaletestPrometheusFlags ) attach (opts * clibase.OptionSet ) {
328
+ * opts = append (* opts ,
329
+ clibase.Option {
330
+ Flag : "scaletest-prometheus-address" ,
331
+ Env : "CODER_SCALETEST_PROMETHEUS_ADDRESS" ,
332
+ Default : "0.0.0.0:21112" ,
333
+ Description : "Address on which to expose scaletest Prometheus metrics." ,
334
+ Value : clibase .StringOf (& s .Address ),
335
+ },
336
+ clibase.Option {
337
+ Flag : "scaletest-prometheus-wait" ,
338
+ Env : "CODER_SCALETEST_PROMETHEUS_WAIT" ,
339
+ Default : "15s" ,
340
+ Description : "How long to wait before exiting in order to allow Prometheus metrics to be scraped." ,
341
+ Value : clibase .DurationOf (& s .Wait ),
342
+ },
343
+ )
344
+ }
345
+
320
346
func requireAdmin (ctx context.Context , client * codersdk.Client ) (codersdk.User , error ) {
321
347
me , err := client .User (ctx , codersdk .Me )
322
348
if err != nil {
@@ -846,17 +872,16 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
846
872
847
873
func (r * RootCmd ) scaletestWorkspaceTraffic () * clibase.Cmd {
848
874
var (
849
- tickInterval time.Duration
850
- bytesPerTick int64
851
- ssh bool
852
- scaletestPrometheusAddress string
853
- scaletestPrometheusWait time.Duration
875
+ tickInterval time.Duration
876
+ bytesPerTick int64
877
+ ssh bool
854
878
855
879
client = & codersdk.Client {}
856
880
tracingFlags = & scaletestTracingFlags {}
857
881
strategy = & scaletestStrategyFlags {}
858
882
cleanupStrategy = & scaletestStrategyFlags {cleanup : true }
859
883
output = & scaletestOutputFlags {}
884
+ prometheusFlags = & scaletestPrometheusFlags {}
860
885
)
861
886
862
887
cmd := & clibase.Cmd {
@@ -871,7 +896,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
871
896
metrics := workspacetraffic .NewMetrics (reg , "username" , "workspace_name" , "agent_name" )
872
897
873
898
logger := slog .Make (sloghuman .Sink (io .Discard ))
874
- prometheusSrvClose := ServeHandler (ctx , logger , promhttp .HandlerFor (reg , promhttp.HandlerOpts {}), scaletestPrometheusAddress , "prometheus" )
899
+ prometheusSrvClose := ServeHandler (ctx , logger , promhttp .HandlerFor (reg , promhttp.HandlerOpts {}), prometheusFlags . Address , "prometheus" )
875
900
defer prometheusSrvClose ()
876
901
877
902
// Bypass rate limiting
@@ -905,8 +930,8 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
905
930
_ , _ = fmt .Fprintf (inv .Stderr , "\n Error uploading traces: %+v\n " , err )
906
931
}
907
932
// Wait for prometheus metrics to be scraped
908
- _ , _ = fmt .Fprintf (inv .Stderr , "Waiting %s for prometheus metrics to be scraped\n " , scaletestPrometheusWait )
909
- <- time .After (scaletestPrometheusWait )
933
+ _ , _ = fmt .Fprintf (inv .Stderr , "Waiting %s for prometheus metrics to be scraped\n " , prometheusFlags . Wait )
934
+ <- time .After (prometheusFlags . Wait )
910
935
}()
911
936
tracer := tracerProvider .Tracer (scaletestTracerName )
912
937
@@ -1009,26 +1034,143 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
1009
1034
Description : "Send traffic over SSH." ,
1010
1035
Value : clibase .BoolOf (& ssh ),
1011
1036
},
1037
+ }
1038
+
1039
+ tracingFlags .attach (& cmd .Options )
1040
+ strategy .attach (& cmd .Options )
1041
+ cleanupStrategy .attach (& cmd .Options )
1042
+ output .attach (& cmd .Options )
1043
+ prometheusFlags .attach (& cmd .Options )
1044
+
1045
+ return cmd
1046
+ }
1047
+
1048
+ func (r * RootCmd ) scaletestDashboard () * clibase.Cmd {
1049
+ var (
1050
+ count int64
1051
+ minWait time.Duration
1052
+ maxWait time.Duration
1053
+
1054
+ client = & codersdk.Client {}
1055
+ tracingFlags = & scaletestTracingFlags {}
1056
+ strategy = & scaletestStrategyFlags {}
1057
+ cleanupStrategy = & scaletestStrategyFlags {cleanup : true }
1058
+ output = & scaletestOutputFlags {}
1059
+ prometheusFlags = & scaletestPrometheusFlags {}
1060
+ )
1061
+
1062
+ cmd := & clibase.Cmd {
1063
+ Use : "dashboard" ,
1064
+ Short : "Generate traffic to the HTTP API to simulate use of the dashboard." ,
1065
+ Middleware : clibase .Chain (
1066
+ r .InitClient (client ),
1067
+ ),
1068
+ Handler : func (inv * clibase.Invocation ) error {
1069
+ ctx := inv .Context ()
1070
+ logger := slog .Make (sloghuman .Sink (inv .Stdout )).Leveled (slog .LevelInfo )
1071
+ tracerProvider , closeTracing , tracingEnabled , err := tracingFlags .provider (ctx )
1072
+ if err != nil {
1073
+ return xerrors .Errorf ("create tracer provider: %w" , err )
1074
+ }
1075
+ defer func () {
1076
+ // Allow time for traces to flush even if command context is
1077
+ // canceled. This is a no-op if tracing is not enabled.
1078
+ _ , _ = fmt .Fprintln (inv .Stderr , "\n Uploading traces..." )
1079
+ if err := closeTracing (ctx ); err != nil {
1080
+ _ , _ = fmt .Fprintf (inv .Stderr , "\n Error uploading traces: %+v\n " , err )
1081
+ }
1082
+ // Wait for prometheus metrics to be scraped
1083
+ _ , _ = fmt .Fprintf (inv .Stderr , "Waiting %s for prometheus metrics to be scraped\n " , prometheusFlags .Wait )
1084
+ <- time .After (prometheusFlags .Wait )
1085
+ }()
1086
+ tracer := tracerProvider .Tracer (scaletestTracerName )
1087
+ outputs , err := output .parse ()
1088
+ if err != nil {
1089
+ return xerrors .Errorf ("could not parse --output flags" )
1090
+ }
1091
+ reg := prometheus .NewRegistry ()
1092
+ prometheusSrvClose := ServeHandler (ctx , logger , promhttp .HandlerFor (reg , promhttp.HandlerOpts {}), prometheusFlags .Address , "prometheus" )
1093
+ defer prometheusSrvClose ()
1094
+ metrics := dashboard .NewMetrics (reg )
1095
+
1096
+ th := harness .NewTestHarness (strategy .toStrategy (), cleanupStrategy .toStrategy ())
1097
+
1098
+ for i := int64 (0 ); i < count ; i ++ {
1099
+ name := fmt .Sprintf ("dashboard-%d" , i )
1100
+ config := dashboard.Config {
1101
+ MinWait : minWait ,
1102
+ MaxWait : maxWait ,
1103
+ Trace : tracingEnabled ,
1104
+ Logger : logger .Named (name ),
1105
+ RollTable : dashboard .DefaultActions ,
1106
+ }
1107
+ if err := config .Validate (); err != nil {
1108
+ return err
1109
+ }
1110
+ var runner harness.Runnable = dashboard .NewRunner (client , metrics , config )
1111
+ if tracingEnabled {
1112
+ runner = & runnableTraceWrapper {
1113
+ tracer : tracer ,
1114
+ spanName : name ,
1115
+ runner : runner ,
1116
+ }
1117
+ }
1118
+ th .AddRun ("dashboard" , name , runner )
1119
+ }
1120
+
1121
+ _ , _ = fmt .Fprintln (inv .Stderr , "Running load test..." )
1122
+ testCtx , testCancel := strategy .toContext (ctx )
1123
+ defer testCancel ()
1124
+ err = th .Run (testCtx )
1125
+ if err != nil {
1126
+ return xerrors .Errorf ("run test harness (harness failure, not a test failure): %w" , err )
1127
+ }
1128
+
1129
+ res := th .Results ()
1130
+ for _ , o := range outputs {
1131
+ err = o .write (res , inv .Stdout )
1132
+ if err != nil {
1133
+ return xerrors .Errorf ("write output %q to %q: %w" , o .format , o .path , err )
1134
+ }
1135
+ }
1136
+
1137
+ if res .TotalFail > 0 {
1138
+ return xerrors .New ("load test failed, see above for more details" )
1139
+ }
1140
+
1141
+ return nil
1142
+ },
1143
+ }
1144
+
1145
+ cmd .Options = []clibase.Option {
1012
1146
{
1013
- Flag : "scaletest-prometheus-address " ,
1014
- Env : "CODER_SCALETEST_PROMETHEUS_ADDRESS " ,
1015
- Default : "0.0.0.0:21112 " ,
1016
- Description : "Address on which to expose scaletest Prometheus metrics ." ,
1017
- Value : clibase .StringOf ( & scaletestPrometheusAddress ),
1147
+ Flag : "count " ,
1148
+ Env : "CODER_SCALETEST_DASHBOARD_COUNT " ,
1149
+ Default : "1 " ,
1150
+ Description : "Number of concurrent workers ." ,
1151
+ Value : clibase .Int64Of ( & count ),
1018
1152
},
1019
1153
{
1020
- Flag : "scaletest-prometheus-wait" ,
1021
- Env : "CODER_SCALETEST_PROMETHEUS_WAIT" ,
1022
- Default : "5s" ,
1023
- Description : "How long to wait before exiting in order to allow Prometheus metrics to be scraped." ,
1024
- Value : clibase .DurationOf (& scaletestPrometheusWait ),
1154
+ Flag : "min-wait" ,
1155
+ Env : "CODER_SCALETEST_DASHBOARD_MIN_WAIT" ,
1156
+ Default : "100ms" ,
1157
+ Description : "Minimum wait between fetches." ,
1158
+ Value : clibase .DurationOf (& minWait ),
1159
+ },
1160
+ {
1161
+ Flag : "max-wait" ,
1162
+ Env : "CODER_SCALETEST_DASHBOARD_MAX_WAIT" ,
1163
+ Default : "1s" ,
1164
+ Description : "Maximum wait between fetches." ,
1165
+ Value : clibase .DurationOf (& maxWait ),
1025
1166
},
1026
1167
}
1027
1168
1028
1169
tracingFlags .attach (& cmd .Options )
1029
1170
strategy .attach (& cmd .Options )
1030
1171
cleanupStrategy .attach (& cmd .Options )
1031
1172
output .attach (& cmd .Options )
1173
+ prometheusFlags .attach (& cmd .Options )
1032
1174
1033
1175
return cmd
1034
1176
}
0 commit comments