From 02ec880bb6fb10663d19052e38f9dab757fb428b Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Tue, 1 Nov 2022 13:22:05 +0000 Subject: [PATCH 1/7] feat: add load testing harness --- loadtest/harness/harness.go | 101 +++++++++++ loadtest/harness/harness_test.go | 262 ++++++++++++++++++++++++++++ loadtest/harness/results.go | 72 ++++++++ loadtest/harness/run.go | 125 +++++++++++++ loadtest/harness/run_test.go | 134 ++++++++++++++ loadtest/harness/strategies.go | 156 +++++++++++++++++ loadtest/harness/strategies_test.go | 186 ++++++++++++++++++++ 7 files changed, 1036 insertions(+) create mode 100644 loadtest/harness/harness.go create mode 100644 loadtest/harness/harness_test.go create mode 100644 loadtest/harness/results.go create mode 100644 loadtest/harness/run.go create mode 100644 loadtest/harness/run_test.go create mode 100644 loadtest/harness/strategies.go create mode 100644 loadtest/harness/strategies_test.go diff --git a/loadtest/harness/harness.go b/loadtest/harness/harness.go new file mode 100644 index 0000000000000..8c9fc62936659 --- /dev/null +++ b/loadtest/harness/harness.go @@ -0,0 +1,101 @@ +package harness + +import ( + "context" + "sync" + + "github.com/hashicorp/go-multierror" + "golang.org/x/xerrors" +) + +// ExecutionStrategy defines how a TestHarness should execute a set of runs. It +// essentially defines the concurrency model for a given testing session. +type ExecutionStrategy interface { + // Execute runs the given runs in whatever way the strategy wants. An error + // may only be returned if the strategy has a failure itself, not if any of + // the runs fail. + Execute(ctx context.Context, runs []*TestRun) error +} + +// TestHarness runs a bunch of registered test runs using the given +// ExecutionStrategy. +type TestHarness struct { + execStrat ExecutionStrategy + + mut *sync.Mutex + runIDs map[string]struct{} + runs []*TestRun + started bool + done chan struct{} +} + +// NewTestHarness creates a new TestHarness with the given ExecutionStrategy. +func NewTestHarness(execStrat ExecutionStrategy) *TestHarness { + return &TestHarness{ + execStrat: execStrat, + mut: new(sync.Mutex), + runIDs: map[string]struct{}{}, + runs: []*TestRun{}, + done: make(chan struct{}), + } +} + +// Run runs the registered tests using the given ExecutionStrategy. The provided +// context can be used to cancel or set a deadline for the test run. Blocks +// until the tests have finished and returns the test execution error (not +// individual run errors). +// +// Panics if called more than once. +func (h *TestHarness) Run(ctx context.Context) (err error) { + h.mut.Lock() + if h.started { + h.mut.Unlock() + panic("harness is already started") + } + h.started = true + h.mut.Unlock() + + defer close(h.done) + defer func() { + e := recover() + if e != nil { + err = xerrors.Errorf("execution strategy panicked: %w", e) + } + }() + + err = h.execStrat.Execute(ctx, h.runs) + //nolint:revive // we use named returns because we mutate it in a defer + return +} + +// Cleanup should be called after the test run has finished and results have +// been collected. +func (h *TestHarness) Cleanup(ctx context.Context) (err error) { + h.mut.Lock() + defer h.mut.Unlock() + if !h.started { + panic("harness has not started") + } + select { + case <-h.done: + default: + panic("harness has not finished") + } + + defer func() { + e := recover() + if e != nil { + err = multierror.Append(err, xerrors.Errorf("panic in cleanup: %w", e)) + } + }() + + for _, run := range h.runs { + e := run.Cleanup(ctx) + if e != nil { + err = multierror.Append(err, xerrors.Errorf("cleanup for %s failed: %w", run.FullID(), e)) + } + } + + //nolint:revive // we use named returns because we mutate it in a defer + return +} diff --git a/loadtest/harness/harness_test.go b/loadtest/harness/harness_test.go new file mode 100644 index 0000000000000..8be66f7bf2d87 --- /dev/null +++ b/loadtest/harness/harness_test.go @@ -0,0 +1,262 @@ +package harness_test + +import ( + "context" + "io" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "github.com/coder/coder/loadtest/harness" +) + +const testPanicMessage = "expected test panic" + +type panickingExecutionStrategy struct{} + +var _ harness.ExecutionStrategy = panickingExecutionStrategy{} + +func (panickingExecutionStrategy) Execute(_ context.Context, _ []*harness.TestRun) error { + panic(testPanicMessage) +} + +type erroringExecutionStrategy struct { + err error +} + +var _ harness.ExecutionStrategy = erroringExecutionStrategy{} + +func (e erroringExecutionStrategy) Execute(_ context.Context, _ []*harness.TestRun) error { + return e.err +} + +func Test_TestHarness(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + + expectedErr := xerrors.New("expected error") + + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + r1 := h.AddRun("test", "1", fakeTestFns(nil, nil)) + r2 := h.AddRun("test", "2", fakeTestFns(expectedErr, nil)) + + err := h.Run(context.Background()) + require.NoError(t, err) + + res := h.Results() + require.Equal(t, 2, res.TotalRuns) + require.Equal(t, 1, res.TotalPass) + require.Equal(t, 1, res.TotalFail) + require.Equal(t, map[string]harness.RunResult{ + r1.FullID(): r1.Result(), + r2.FullID(): r2.Result(), + }, res.Runs) + + err = h.Cleanup(context.Background()) + require.NoError(t, err) + }) + + t.Run("CatchesExecutionError", func(t *testing.T) { + t.Parallel() + + expectedErr := xerrors.New("expected error") + + h := harness.NewTestHarness(erroringExecutionStrategy{err: expectedErr}) + _ = h.AddRun("test", "1", fakeTestFns(nil, nil)) + + err := h.Run(context.Background()) + require.Error(t, err) + require.ErrorIs(t, err, expectedErr) + }) + + t.Run("CatchesExecutionPanic", func(t *testing.T) { + t.Parallel() + + h := harness.NewTestHarness(panickingExecutionStrategy{}) + _ = h.AddRun("test", "1", fakeTestFns(nil, nil)) + + err := h.Run(context.Background()) + require.Error(t, err) + require.ErrorContains(t, err, "panic") + require.ErrorContains(t, err, testPanicMessage) + }) + + t.Run("Cleanup", func(t *testing.T) { + t.Parallel() + + t.Run("Error", func(t *testing.T) { + t.Parallel() + + expectedErr := xerrors.New("expected error") + + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + _ = h.AddRun("test", "1", fakeTestFns(nil, expectedErr)) + + err := h.Run(context.Background()) + require.NoError(t, err) + + err = h.Cleanup(context.Background()) + require.Error(t, err) + require.ErrorContains(t, err, expectedErr.Error()) + }) + + t.Run("Panic", func(t *testing.T) { + t.Parallel() + + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + _ = h.AddRun("test", "1", testFns{ + RunFn: func(_ context.Context, _ string, _ io.Writer) error { + return nil + }, + CleanupFn: func(_ context.Context, _ string) error { + panic(testPanicMessage) + }, + }) + + err := h.Run(context.Background()) + require.NoError(t, err) + + err = h.Cleanup(context.Background()) + require.Error(t, err) + require.ErrorContains(t, err, "panic") + require.ErrorContains(t, err, testPanicMessage) + }) + }) + + t.Run("Panics", func(t *testing.T) { + t.Parallel() + + t.Run("RegisterAfterStart", func(t *testing.T) { + t.Parallel() + + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + _ = h.Run(context.Background()) + + require.Panics(t, func() { + _ = h.AddRun("test", "1", fakeTestFns(nil, nil)) + }) + }) + + t.Run("DuplicateTestID", func(t *testing.T) { + t.Parallel() + + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + + name, id := "test", "1" + _ = h.AddRun(name, id, fakeTestFns(nil, nil)) + + require.Panics(t, func() { + _ = h.AddRun(name, id, fakeTestFns(nil, nil)) + }) + }) + + t.Run("StartedTwice", func(t *testing.T) { + t.Parallel() + + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + h.Run(context.Background()) + + require.Panics(t, func() { + h.Run(context.Background()) + }) + }) + + t.Run("ResultsBeforeStart", func(t *testing.T) { + t.Parallel() + + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + + require.Panics(t, func() { + h.Results() + }) + }) + + t.Run("ResultsBeforeFinish", func(t *testing.T) { + t.Parallel() + + var ( + endRun = make(chan struct{}) + testsEnded = make(chan struct{}) + ) + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + _ = h.AddRun("test", "1", testFns{ + RunFn: func(_ context.Context, _ string, _ io.Writer) error { + <-endRun + return nil + }, + }) + go func() { + defer close(testsEnded) + err := h.Run(context.Background()) + assert.NoError(t, err) + }() + + time.Sleep(100 * time.Millisecond) + require.Panics(t, func() { + h.Results() + }) + + close(endRun) + <-testsEnded + _ = h.Results() + }) + + t.Run("CleanupBeforeStart", func(t *testing.T) { + t.Parallel() + + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + + require.Panics(t, func() { + h.Cleanup(context.Background()) + }) + }) + + t.Run("ClenaupBeforeFinish", func(t *testing.T) { + t.Parallel() + + var ( + endRun = make(chan struct{}) + testsEnded = make(chan struct{}) + ) + h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) + _ = h.AddRun("test", "1", testFns{ + RunFn: func(_ context.Context, _ string, _ io.Writer) error { + <-endRun + return nil + }, + }) + go func() { + defer close(testsEnded) + err := h.Run(context.Background()) + assert.NoError(t, err) + }() + + time.Sleep(100 * time.Millisecond) + require.Panics(t, func() { + h.Cleanup(context.Background()) + }) + + close(endRun) + <-testsEnded + + err := h.Cleanup(context.Background()) + require.NoError(t, err) + }) + }) +} + +func fakeTestFns(err, cleanupErr error) testFns { + return testFns{ + RunFn: func(_ context.Context, _ string, _ io.Writer) error { + return err + }, + CleanupFn: func(_ context.Context, _ string) error { + return cleanupErr + }, + } +} diff --git a/loadtest/harness/results.go b/loadtest/harness/results.go new file mode 100644 index 0000000000000..615b6861634a6 --- /dev/null +++ b/loadtest/harness/results.go @@ -0,0 +1,72 @@ +package harness + +import "time" + +// Results is the full compiled results for a set of test runs. +type Results struct { + TotalRuns int + TotalPass int + TotalFail int + + Runs map[string]RunResult +} + +// RunResult is the result of a single test run. +type RunResult struct { + FullID string + TestName string + ID string + Logs []byte + Error error + StartedAt time.Time + Duration time.Duration +} + +// Results returns the results of the test run. Panics if the test run is not +// done yet. +func (r *TestRun) Result() RunResult { + select { + case <-r.done: + default: + panic("cannot get results of a test run that is not done yet") + } + + return RunResult{ + FullID: r.FullID(), + TestName: r.testName, + ID: r.id, + Logs: r.logs.Bytes(), + Error: r.err, + StartedAt: r.started, + Duration: r.duration, + } +} + +// Results collates the results of all the test runs and returns them. +func (h *TestHarness) Results() Results { + if !h.started { + panic("harness has not started") + } + select { + case <-h.done: + default: + panic("harness has not finished") + } + + results := Results{ + TotalRuns: len(h.runs), + Runs: make(map[string]RunResult, len(h.runs)), + } + for _, run := range h.runs { + runRes := run.Result() + results.Runs[runRes.FullID] = runRes + + if runRes.Error == nil { + results.TotalPass++ + } else { + results.TotalFail++ + } + } + + return results +} diff --git a/loadtest/harness/run.go b/loadtest/harness/run.go new file mode 100644 index 0000000000000..7c523fd62d092 --- /dev/null +++ b/loadtest/harness/run.go @@ -0,0 +1,125 @@ +package harness + +import ( + "bytes" + "context" + "io" + "time" + + "golang.org/x/xerrors" +) + +// Runnable is a test interface that can be executed by a TestHarness. +type Runnable interface { + // Run should use the passed context to handle cancellation and deadlines + // properly, and should only return once the test has been fully completed + // (no lingering goroutines, unless they are cleaned up by the accompanying + // cleanup function). + // + // The test ID (part after the slash) is passed for identification if + // necessary, and the provided logs write should be used for writing + // whatever may be necessary for debugging the test. + Run(ctx context.Context, id string, logs io.Writer) error +} + +// Cleanable is an optional extension to Runnable that allows for post-test +// cleanup. +type Cleanable interface { + Runnable + // Cleanup should clean up any lingering resources from the test. + Cleanup(ctx context.Context, id string) error +} + +// AddRun creates a new *TestRun with the given name, ID and Runnable, adds it +// to the harness and returns it. Panics if the harness has been started, or a +// test with the given run.FullID() is already registered. +// +// This is a convenience method that calls NewTestRun() and h.RegisterRun(). +func (h *TestHarness) AddRun(testName string, id string, runner Runnable) *TestRun { + run := NewTestRun(testName, id, runner) + h.RegisterRun(run) + + return run +} + +// RegisterRun registers the given *TestRun with the harness. Panics if the +// harness has been started, or a test with the given run.FullID() is already +// registered. +func (h *TestHarness) RegisterRun(run *TestRun) { + h.mut.Lock() + defer h.mut.Unlock() + if h.started { + panic("cannot add a run after the harness has started") + } + + if _, ok := h.runIDs[run.FullID()]; ok { + panic("cannot add test with duplicate full ID: " + run.FullID()) + } + h.runIDs[run.FullID()] = struct{}{} + h.runs = append(h.runs, run) +} + +// TestRun is a single test run and it's accompanying state. +type TestRun struct { + testName string + id string + runner Runnable + + logs *bytes.Buffer + done chan struct{} + started time.Time + duration time.Duration + err error +} + +func NewTestRun(testName string, id string, runner Runnable) *TestRun { + return &TestRun{ + testName: testName, + id: id, + runner: runner, + } +} + +func (r *TestRun) FullID() string { + return r.testName + "/" + r.id +} + +// Run executes the Run function with a self-managed log writer, panic handler, +// error recording and duration recording. The test error is returned. +func (r *TestRun) Run(ctx context.Context) (err error) { + r.logs = new(bytes.Buffer) + r.done = make(chan struct{}) + defer close(r.done) + + r.started = time.Now() + defer func() { + r.duration = time.Since(r.started) + r.err = err + }() + defer func() { + e := recover() + if e != nil { + err = xerrors.Errorf("panic: %v", e) + } + }() + + err = r.runner.Run(ctx, r.id, r.logs) + //nolint:revive // we use named returns because we mutate it in a defer + return +} + +func (r *TestRun) Cleanup(ctx context.Context) error { + c, ok := r.runner.(Cleanable) + if !ok { + return nil + } + + select { + case <-r.done: + default: + // Test wasn't executed, so we don't need to clean up. + return nil + } + + return c.Cleanup(ctx, r.id) +} diff --git a/loadtest/harness/run_test.go b/loadtest/harness/run_test.go new file mode 100644 index 0000000000000..fb8fc42a35bcf --- /dev/null +++ b/loadtest/harness/run_test.go @@ -0,0 +1,134 @@ +package harness_test + +import ( + "context" + "fmt" + "io" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/coder/coder/loadtest/harness" +) + +// testFns implements Runnable and Cleanable. +type testFns struct { + RunFn func(ctx context.Context, id string, logs io.Writer) error + // CleanupFn is optional if no cleanup is required. + CleanupFn func(ctx context.Context, id string) error +} + +// Run implements Runnable. +func (fns testFns) Run(ctx context.Context, id string, logs io.Writer) error { + return fns.RunFn(ctx, id, logs) +} + +// Cleanup implements Cleanable. +func (fns testFns) Cleanup(ctx context.Context, id string) error { + if fns.CleanupFn == nil { + return nil + } + + return fns.CleanupFn(ctx, id) +} + +func Test_TestRun(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + + var ( + name, id = "test", "1" + runCalled int64 + cleanupCalled int64 + + testFns = testFns{ + RunFn: func(ctx context.Context, id string, logs io.Writer) error { + atomic.AddInt64(&runCalled, 1) + return nil + }, + CleanupFn: func(ctx context.Context, id string) error { + atomic.AddInt64(&cleanupCalled, 1) + return nil + }, + } + ) + + run := harness.NewTestRun(name, id, testFns) + require.Equal(t, fmt.Sprintf("%s/%s", name, id), run.FullID()) + + err := run.Run(context.Background()) + require.NoError(t, err) + require.EqualValues(t, 1, atomic.LoadInt64(&runCalled)) + + err = run.Cleanup(context.Background()) + require.NoError(t, err) + require.EqualValues(t, 1, atomic.LoadInt64(&cleanupCalled)) + }) + + t.Run("Cleanup", func(t *testing.T) { + t.Parallel() + + t.Run("NoFn", func(t *testing.T) { + t.Parallel() + + run := harness.NewTestRun("test", "1", testFns{ + RunFn: func(ctx context.Context, id string, logs io.Writer) error { + return nil + }, + CleanupFn: nil, + }) + + err := run.Cleanup(context.Background()) + require.NoError(t, err) + }) + + t.Run("NotDone", func(t *testing.T) { + t.Parallel() + + var cleanupCalled int64 + run := harness.NewTestRun("test", "1", testFns{ + RunFn: func(ctx context.Context, id string, logs io.Writer) error { + return nil + }, + CleanupFn: func(ctx context.Context, id string) error { + atomic.AddInt64(&cleanupCalled, 1) + return nil + }, + }) + + err := run.Cleanup(context.Background()) + require.NoError(t, err) + require.EqualValues(t, 0, atomic.LoadInt64(&cleanupCalled)) + }) + }) + + t.Run("CatchesRunPanic", func(t *testing.T) { + t.Parallel() + + testFns := testFns{ + RunFn: func(ctx context.Context, id string, logs io.Writer) error { + panic(testPanicMessage) + }, + } + + run := harness.NewTestRun("test", "1", testFns) + + err := run.Run(context.Background()) + require.Error(t, err) + require.ErrorContains(t, err, "panic") + require.ErrorContains(t, err, testPanicMessage) + }) + + t.Run("ResultPanicsWhenNotDone", func(t *testing.T) { + t.Parallel() + + run := harness.NewTestRun("test", "1", testFns{}) + + require.Panics(t, func() { + _ = run.Result() + }) + }) +} diff --git a/loadtest/harness/strategies.go b/loadtest/harness/strategies.go new file mode 100644 index 0000000000000..e23501630a79a --- /dev/null +++ b/loadtest/harness/strategies.go @@ -0,0 +1,156 @@ +package harness + +import ( + "context" + cryptorand "crypto/rand" + "encoding/binary" + "io" + "math/rand" + "sync" + "time" +) + +// LinearExecutionStrategy executes all test runs in a linear fashion, one after +// the other. +type LinearExecutionStrategy struct{} + +var _ ExecutionStrategy = LinearExecutionStrategy{} + +// Execute implements ExecutionStrategy. +func (LinearExecutionStrategy) Execute(ctx context.Context, runs []*TestRun) error { + for _, run := range runs { + _ = run.Run(ctx) + } + + return nil +} + +// ConcurrentExecutionStrategy executes all test runs concurrently without any +// regard for parallelism. +type ConcurrentExecutionStrategy struct{} + +var _ ExecutionStrategy = ConcurrentExecutionStrategy{} + +// Execute implements ExecutionStrategy. +func (ConcurrentExecutionStrategy) Execute(ctx context.Context, runs []*TestRun) error { + var wg sync.WaitGroup + for _, run := range runs { + run := run + + wg.Add(1) + go func() { + defer wg.Done() + _ = run.Run(ctx) + }() + } + + wg.Wait() + return nil +} + +// ParallelExecutionStrategy executes all test runs concurrently, but limits the +// number of concurrent runs to the given limit. +type ParallelExecutionStrategy struct { + Limit int +} + +var _ ExecutionStrategy = ParallelExecutionStrategy{} + +// Execute implements ExecutionStrategy. +func (p ParallelExecutionStrategy) Execute(ctx context.Context, runs []*TestRun) error { + var wg sync.WaitGroup + sem := make(chan struct{}, p.Limit) + + for _, run := range runs { + run := run + + wg.Add(1) + go func() { + defer wg.Done() + defer func() { + <-sem + }() + sem <- struct{}{} + _ = run.Run(ctx) + }() + } + + wg.Wait() + return nil +} + +// TimeoutExecutionStrategyWrapper is an ExecutionStrategy that wraps another +// ExecutionStrategy and applies a timeout to each test run's context. +type TimeoutExecutionStrategyWrapper struct { + Timeout time.Duration + Inner ExecutionStrategy +} + +var _ ExecutionStrategy = &TimeoutExecutionStrategyWrapper{} + +type timeoutRunnerWrapper struct { + timeout time.Duration + inner Runnable +} + +var _ Runnable = timeoutRunnerWrapper{} + +func (t timeoutRunnerWrapper) Run(ctx context.Context, id string, logs io.Writer) error { + ctx, cancel := context.WithTimeout(ctx, t.timeout) + defer cancel() + + return t.inner.Run(ctx, id, logs) +} + +// Execute implements ExecutionStrategy. +func (t *TimeoutExecutionStrategyWrapper) Execute(ctx context.Context, runs []*TestRun) error { + for _, run := range runs { + oldRunner := run.runner + run.runner = timeoutRunnerWrapper{ + timeout: t.Timeout, + inner: oldRunner, + } + } + + return t.Inner.Execute(ctx, runs) +} + +// ShuffleExecutionStrategyWrapper is an ExecutionStrategy that wraps another +// ExecutionStrategy and shuffles the order of the test runs before executing. +type ShuffleExecutionStrategyWrapper struct { + Inner ExecutionStrategy +} + +var _ ExecutionStrategy = &ShuffleExecutionStrategyWrapper{} + +type cryptoRandSource struct{} + +var _ rand.Source = cryptoRandSource{} + +func (cryptoRandSource) Int63() int64 { + var b [8]byte + _, err := cryptorand.Read(b[:]) + if err != nil { + panic(err) + } + + // mask off sign bit to ensure positive number + return int64(binary.LittleEndian.Uint64(b[:]) & (1<<63 - 1)) +} + +func (cryptoRandSource) Seed(_ int64) {} + +// Execute implements ExecutionStrategy. +func (s *ShuffleExecutionStrategyWrapper) Execute(ctx context.Context, runs []*TestRun) error { + shuffledRuns := make([]*TestRun, len(runs)) + copy(shuffledRuns, runs) + + //nolint:gosec // gosec thinks we're using an insecure RNG, but we're not. + src := rand.New(cryptoRandSource{}) + for i := range shuffledRuns { + j := src.Intn(i + 1) + shuffledRuns[i], shuffledRuns[j] = shuffledRuns[j], shuffledRuns[i] + } + + return s.Inner.Execute(ctx, shuffledRuns) +} diff --git a/loadtest/harness/strategies_test.go b/loadtest/harness/strategies_test.go new file mode 100644 index 0000000000000..88968313e92e8 --- /dev/null +++ b/loadtest/harness/strategies_test.go @@ -0,0 +1,186 @@ +package harness_test + +import ( + "context" + "io" + "sort" + "strconv" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "github.com/coder/coder/loadtest/harness" +) + +func Test_LinearExecutionStrategy(t *testing.T) { + t.Parallel() + + var ( + lastSeenI int64 = -1 + count int64 + ) + runs := strategyTestData(100, func(_ context.Context, i int, _ io.Writer) error { + atomic.AddInt64(&count, 1) + swapped := atomic.CompareAndSwapInt64(&lastSeenI, int64(i-1), int64(i)) + assert.True(t, swapped) + return nil + }) + strategy := harness.LinearExecutionStrategy{} + err := strategy.Execute(context.Background(), runs) + require.NoError(t, err) + require.EqualValues(t, 100, atomic.LoadInt64(&count)) + + lastStartTime := time.Time{} + for _, run := range runs { + startTime := run.Result().StartedAt + require.True(t, startTime.After(lastStartTime)) + lastStartTime = startTime + } +} + +func Test_ConcurrentExecutionStrategy(t *testing.T) { + t.Parallel() + + runs := strategyTestData(10, func(_ context.Context, i int, _ io.Writer) error { + time.Sleep(1 * time.Second) + return nil + }) + strategy := harness.ConcurrentExecutionStrategy{} + + startTime := time.Now() + err := strategy.Execute(context.Background(), runs) + require.NoError(t, err) + + // Should've taken at least 900ms to run but less than 5 seconds. + require.True(t, time.Since(startTime) > 900*time.Millisecond) + require.True(t, time.Since(startTime) < 5*time.Second) + + // All tests should've started within 500 ms of the start time. + endTime := startTime.Add(500 * time.Millisecond) + for _, run := range runs { + runStartTime := run.Result().StartedAt + require.WithinRange(t, runStartTime, startTime, endTime) + } +} + +func Test_ParallelExecutionStrategy(t *testing.T) { + t.Parallel() + + runs := strategyTestData(10, func(_ context.Context, _ int, _ io.Writer) error { + time.Sleep(1 * time.Second) + return nil + }) + strategy := harness.ParallelExecutionStrategy{ + Limit: 5, + } + + startTime := time.Now() + err := strategy.Execute(context.Background(), runs) + require.NoError(t, err) + + // Should've taken at least 1900ms to run but less than 8 seconds. + require.True(t, time.Since(startTime) > 1900*time.Millisecond) + require.True(t, time.Since(startTime) < 8*time.Second) + + // Any five of the tests should've started within 500 ms of the start time. + endTime := startTime.Add(500 * time.Millisecond) + withinRange := 0 + for _, run := range runs { + runStartTime := run.Result().StartedAt + if runStartTime.After(startTime) && runStartTime.Before(endTime) { + withinRange++ + } + } + require.Equal(t, 5, withinRange) + + // The other 5 tests should've started between 900ms and 1.5s after the + // start time. + startTime = startTime.Add(900 * time.Millisecond) + endTime = startTime.Add(600 * time.Millisecond) + withinRange = 0 + for _, run := range runs { + runStartTime := run.Result().StartedAt + if runStartTime.After(startTime) && runStartTime.Before(endTime) { + withinRange++ + } + } + require.Equal(t, 5, withinRange) +} + +func Test_TimeoutExecutionStrategy(t *testing.T) { + t.Parallel() + + runs := strategyTestData(10, func(ctx context.Context, _ int, _ io.Writer) error { + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + return xerrors.New("context wasn't canceled") + } + }) + strategy := harness.TimeoutExecutionStrategyWrapper{ + Timeout: 100 * time.Millisecond, + Inner: harness.ConcurrentExecutionStrategy{}, + } + + err := strategy.Execute(context.Background(), runs) + require.NoError(t, err) + + for _, run := range runs { + require.NoError(t, run.Result().Error) + } +} + +func Test_ShuffleExecutionStrategyWrapper(t *testing.T) { + t.Parallel() + + runs := strategyTestData(100000, func(_ context.Context, i int, _ io.Writer) error { + // t.Logf("run %d", i) + return nil + }) + strategy := harness.ShuffleExecutionStrategyWrapper{ + Inner: harness.LinearExecutionStrategy{}, + } + + err := strategy.Execute(context.Background(), runs) + require.NoError(t, err) + + // Ensure not in order by sorting the start time of each run. + unsortedTimes := make([]time.Time, len(runs)) + for i, run := range runs { + unsortedTimes[i] = run.Result().StartedAt + } + + sortedTimes := make([]time.Time, len(runs)) + copy(sortedTimes, unsortedTimes) + sort.Slice(sortedTimes, func(i, j int) bool { + return sortedTimes[i].Before(sortedTimes[j]) + }) + + require.NotEqual(t, unsortedTimes, sortedTimes) +} + +func strategyTestData(count int, runFn func(ctx context.Context, i int, logs io.Writer) error) []*harness.TestRun { + out := make([]*harness.TestRun, count) + for i := 0; i < count; i++ { + i := i + + out[i] = harness.NewTestRun("test", strconv.Itoa(i), testFns{ + RunFn: func(ctx context.Context, id string, logs io.Writer) error { + if runFn != nil { + return runFn(ctx, i, logs) + } + return nil + }, + }) + } + + return out +} From b63d7e8a422d038b6235864771fdf8d77469c43a Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Tue, 1 Nov 2022 15:11:20 +0000 Subject: [PATCH 2/7] feat: add workspacebuild load test runner --- loadtest/workspacebuild/run.go | 202 ++++++++++++++++++++++++++ loadtest/workspacebuild/run_test.go | 216 ++++++++++++++++++++++++++++ 2 files changed, 418 insertions(+) create mode 100644 loadtest/workspacebuild/run.go create mode 100644 loadtest/workspacebuild/run_test.go diff --git a/loadtest/workspacebuild/run.go b/loadtest/workspacebuild/run.go new file mode 100644 index 0000000000000..de1d0988eccf0 --- /dev/null +++ b/loadtest/workspacebuild/run.go @@ -0,0 +1,202 @@ +package workspacebuild + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/google/uuid" + "golang.org/x/xerrors" + + "github.com/coder/coder/codersdk" + "github.com/coder/coder/cryptorand" + "github.com/coder/coder/loadtest/harness" +) + +type Runner struct { + client *codersdk.Client + orgID uuid.UUID + userID string + req codersdk.CreateWorkspaceRequest + + workspaceID uuid.UUID +} + +var _ harness.Runnable = &Runner{} +var _ harness.Cleanable = &Runner{} + +// NewRunner creates a new workspace build loadtest Runner. The provided request +// will be used verbatim, but the name will be set to a random value if unset. +// +// The Cleanup method will delete the workspace if it was successfully created. +func NewRunner(client *codersdk.Client, orgID uuid.UUID, userID string, req codersdk.CreateWorkspaceRequest) *Runner { + return &Runner{ + client: client, + orgID: orgID, + userID: userID, + req: req, + } +} + +// Run implements Runnable. +func (r *Runner) Run(ctx context.Context, _ string, logs io.Writer) error { + req := r.req + if req.Name == "" { + randName, err := cryptorand.HexString(8) + if err != nil { + return xerrors.Errorf("generate random name for workspace: %w", err) + } + req.Name = "test-" + randName + } + + after := time.Now() + workspace, err := r.client.CreateWorkspace(ctx, r.orgID, r.userID, req) + if err != nil { + return xerrors.Errorf("create workspace: %w", err) + } + r.workspaceID = workspace.ID + + err = waitForBuild(ctx, logs, r.client, workspace.LatestBuild.ID, after) + if err != nil { + return xerrors.Errorf("wait for build: %w", err) + } + + _, _ = fmt.Fprintln(logs, "") + err = waitForAgents(ctx, logs, r.client, workspace.ID) + if err != nil { + return xerrors.Errorf("wait for agent: %w", err) + } + + return nil +} + +// Cleanup implements Cleanable. +func (r *Runner) Cleanup(ctx context.Context, _ string) error { + if r.workspaceID == uuid.Nil { + return nil + } + + after := time.Now() + build, err := r.client.CreateWorkspaceBuild(ctx, r.workspaceID, codersdk.CreateWorkspaceBuildRequest{ + Transition: codersdk.WorkspaceTransitionDelete, + }) + if err != nil { + return xerrors.Errorf("delete workspace: %w", err) + } + + // TODO: capture these logs + logs := io.Discard + err = waitForBuild(ctx, logs, r.client, build.ID, after) + if err != nil { + return xerrors.Errorf("wait for build: %w", err) + } + + return nil +} + +func waitForBuild(ctx context.Context, w io.Writer, client *codersdk.Client, buildID uuid.UUID, after time.Time) error { + _, _ = fmt.Fprint(w, "Build is currently queued...") + + // Wait for build to start. + for { + build, err := client.WorkspaceBuild(ctx, buildID) + if err != nil { + return xerrors.Errorf("fetch build: %w", err) + } + + if build.Job.Status != codersdk.ProvisionerJobPending { + break + } + + _, _ = fmt.Fprint(w, ".") + time.Sleep(500 * time.Millisecond) + } + + _, _ = fmt.Fprintln(w, "\nBuild started! Streaming logs below:") + + logs, closer, err := client.WorkspaceBuildLogsAfter(ctx, buildID, after) + if err != nil { + return xerrors.Errorf("start streaming build logs: %w", err) + } + defer closer.Close() + + currentStage := "" + for { + select { + case <-ctx.Done(): + return ctx.Err() + case log, ok := <-logs: + if !ok { + build, err := client.WorkspaceBuild(ctx, buildID) + if err != nil { + return xerrors.Errorf("fetch build: %w", err) + } + + _, _ = fmt.Fprintln(w, "") + switch build.Job.Status { + case codersdk.ProvisionerJobSucceeded: + _, _ = fmt.Fprintln(w, "\nBuild succeeded!") + return nil + case codersdk.ProvisionerJobFailed: + _, _ = fmt.Fprintf(w, "\nBuild failed with error %q.\nSee logs above for more details.\n", build.Job.Error) + return xerrors.Errorf("build failed with status %q: %s", build.Job.Status, build.Job.Error) + case codersdk.ProvisionerJobCanceled: + _, _ = fmt.Fprintln(w, "\nBuild canceled.") + return xerrors.New("build canceled") + default: + _, _ = fmt.Fprintf(w, "\nLogs disconnected with unexpected job status %q and error %q.\n", build.Job.Status, build.Job.Error) + return xerrors.Errorf("logs disconnected with unexpected job status %q and error %q", build.Job.Status, build.Job.Error) + } + } + + if log.Stage != currentStage { + currentStage = log.Stage + _, _ = fmt.Fprintf(w, "\n%s\n", currentStage) + } + + level := "unknown" + if log.Level != "" { + level = string(log.Level) + } + _, _ = fmt.Fprintf(w, "\t%s:\t%s\n", level, log.Output) + } + } +} + +func waitForAgents(ctx context.Context, w io.Writer, client *codersdk.Client, workspaceID uuid.UUID) error { + _, _ = fmt.Fprint(w, "Waiting for agents to connect...\n\n") + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + workspace, err := client.Workspace(ctx, workspaceID) + if err != nil { + return xerrors.Errorf("fetch workspace: %w", err) + } + + ok := true + for _, res := range workspace.LatestBuild.Resources { + for _, agent := range res.Agents { + if agent.Status != codersdk.WorkspaceAgentConnected { + ok = false + } + + _, _ = fmt.Fprintf(w, "\tAgent %q is %s\n", agent.Name, agent.Status) + } + } + if ok { + break + } + + _, _ = fmt.Fprintln(w, "") + time.Sleep(1 * time.Second) + } + + _, _ = fmt.Fprint(w, "\nAgents connected!\n\n") + return nil +} diff --git a/loadtest/workspacebuild/run_test.go b/loadtest/workspacebuild/run_test.go new file mode 100644 index 0000000000000..15b822de192ca --- /dev/null +++ b/loadtest/workspacebuild/run_test.go @@ -0,0 +1,216 @@ +package workspacebuild_test + +import ( + "bytes" + "context" + "fmt" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "cdr.dev/slog" + "cdr.dev/slog/sloggers/slogtest" + "github.com/coder/coder/agent" + "github.com/coder/coder/coderd/coderdtest" + "github.com/coder/coder/codersdk" + "github.com/coder/coder/loadtest/workspacebuild" + "github.com/coder/coder/provisioner/echo" + "github.com/coder/coder/provisionersdk/proto" + "github.com/coder/coder/testutil" +) + +func Test_Runner(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + client := coderdtest.New(t, &coderdtest.Options{ + IncludeProvisionerDaemon: true, + }) + user := coderdtest.CreateFirstUser(t, client) + + authToken1 := uuid.NewString() + authToken2 := uuid.NewString() + authToken3 := uuid.NewString() + version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{ + Parse: echo.ParseComplete, + ProvisionDryRun: echo.ProvisionComplete, + Provision: []*proto.Provision_Response{ + { + Type: &proto.Provision_Response_Log{ + Log: &proto.Log{ + Level: proto.LogLevel_INFO, + Output: "hello from logs", + }, + }, + }, + { + Type: &proto.Provision_Response_Complete{ + Complete: &proto.Provision_Complete{ + Resources: []*proto.Resource{ + { + Name: "example1", + Type: "aws_instance", + Agents: []*proto.Agent{ + { + Id: uuid.NewString(), + Name: "agent1", + Auth: &proto.Agent_Token{ + Token: authToken1, + }, + Apps: []*proto.App{}, + }, + { + Id: uuid.NewString(), + Name: "agent2", + Auth: &proto.Agent_Token{ + Token: authToken2, + }, + Apps: []*proto.App{}, + }, + }, + }, + { + Name: "example2", + Type: "aws_instance", + Agents: []*proto.Agent{ + { + Id: uuid.NewString(), + Name: "agent3", + Auth: &proto.Agent_Token{ + Token: authToken3, + }, + Apps: []*proto.App{}, + }, + }, + }, + }, + }, + }, + }, + }, + }) + + template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) + coderdtest.AwaitTemplateVersionJob(t, client, version.ID) + + // Since the runner creates the workspace on it's own, we have to keep + // listing workspaces until we find it, then wait for the build to finish, + // then start the agents. + go func() { + var workspace codersdk.Workspace + for { + workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{ + Owner: codersdk.Me, + }) + if !assert.NoError(t, err) { + return + } + + if len(workspaces) == 1 { + workspace = workspaces[0] + break + } + + time.Sleep(100 * time.Millisecond) + } + + coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID) + + // Start the three agents. + for i, authToken := range []string{authToken1, authToken2, authToken3} { + i := i + 1 + + agentClient := codersdk.New(client.URL) + agentClient.SessionToken = authToken + agentCloser := agent.New(agent.Options{ + Client: agentClient, + Logger: slogtest.Make(t, nil). + Named(fmt.Sprintf("agent%d", i)). + Leveled(slog.LevelWarn), + }) + t.Cleanup(func() { + _ = agentCloser.Close() + }) + } + + coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID) + }() + + runner := workspacebuild.NewRunner(client, user.OrganizationID, codersdk.Me, codersdk.CreateWorkspaceRequest{ + TemplateID: template.ID, + }) + + logs := bytes.NewBuffer(nil) + err := runner.Run(ctx, "1", logs) + logsStr := logs.String() + t.Log("Runner logs:\n\n" + logsStr) + require.NoError(t, err) + + // Look for strings in the logs. + require.Contains(t, logsStr, "hello from logs") + require.Contains(t, logsStr, `"agent1" is connected`) + require.Contains(t, logsStr, `"agent2" is connected`) + require.Contains(t, logsStr, `"agent3" is connected`) + + // Find the workspace. + workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{ + Owner: codersdk.Me, + }) + require.NoError(t, err) + require.Len(t, workspaces, 1) + + coderdtest.AwaitWorkspaceBuildJob(t, client, workspaces[0].LatestBuild.ID) + coderdtest.AwaitWorkspaceAgents(t, client, workspaces[0].ID) + + err = runner.Cleanup(ctx, "1") + require.NoError(t, err) + }) + + t.Run("FailedBuild", func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancel() + + client := coderdtest.New(t, &coderdtest.Options{ + IncludeProvisionerDaemon: true, + }) + user := coderdtest.CreateFirstUser(t, client) + + version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{ + Parse: echo.ParseComplete, + ProvisionDryRun: echo.ProvisionComplete, + Provision: []*proto.Provision_Response{ + { + Type: &proto.Provision_Response_Complete{ + Complete: &proto.Provision_Complete{ + Error: "test error", + }, + }, + }, + }, + }) + + template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) + coderdtest.AwaitTemplateVersionJob(t, client, version.ID) + + runner := workspacebuild.NewRunner(client, user.OrganizationID, codersdk.Me, codersdk.CreateWorkspaceRequest{ + TemplateID: template.ID, + }) + + logs := bytes.NewBuffer(nil) + err := runner.Run(ctx, "1", logs) + logsStr := logs.String() + t.Log("Runner logs:\n\n" + logsStr) + require.Error(t, err) + require.ErrorContains(t, err, "test error") + }) +} From 4839db76d8aa0d7ff2f4d9ee6e9ce975be3523a8 Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Wed, 2 Nov 2022 13:00:39 +0000 Subject: [PATCH 3/7] feat: add coder loadtest command --- cli/loadtest.go | 180 +++++++++++++++++++++++++ cli/loadtest_test.go | 135 +++++++++++++++++++ cli/loadtestconfig.go | 180 +++++++++++++++++++++++++ cli/root.go | 8 +- coderd/provisionerjobs.go | 19 ++- codersdk/client.go | 7 + loadtest/harness/strategies.go | 8 +- loadtest/placebo/config.go | 31 +++++ loadtest/placebo/config_test.go | 82 +++++++++++ loadtest/placebo/run.go | 51 +++++++ loadtest/placebo/run_test.go | 64 +++++++++ loadtest/workspacebuild/config.go | 39 ++++++ loadtest/workspacebuild/config_test.go | 94 +++++++++++++ loadtest/workspacebuild/run.go | 21 +-- loadtest/workspacebuild/run_test.go | 16 ++- 15 files changed, 906 insertions(+), 29 deletions(-) create mode 100644 cli/loadtest.go create mode 100644 cli/loadtest_test.go create mode 100644 cli/loadtestconfig.go create mode 100644 loadtest/placebo/config.go create mode 100644 loadtest/placebo/config_test.go create mode 100644 loadtest/placebo/run.go create mode 100644 loadtest/placebo/run_test.go create mode 100644 loadtest/workspacebuild/config.go create mode 100644 loadtest/workspacebuild/config_test.go diff --git a/cli/loadtest.go b/cli/loadtest.go new file mode 100644 index 0000000000000..04b8ba9eef10b --- /dev/null +++ b/cli/loadtest.go @@ -0,0 +1,180 @@ +package cli + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "time" + + "github.com/spf13/cobra" + "golang.org/x/xerrors" + + "github.com/coder/coder/cli/cliflag" + "github.com/coder/coder/codersdk" + "github.com/coder/coder/loadtest/harness" +) + +func loadtest() *cobra.Command { + var ( + configPath string + ) + cmd := &cobra.Command{ + Use: "loadtest --config ", + Short: "Load test the Coder API", + Long: "", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + if configPath == "" { + return xerrors.New("config is required") + } + + var ( + configReader io.Reader + configCloser io.Closer + ) + if configPath == "-" { + configReader = cmd.InOrStdin() + } else { + f, err := os.Open(configPath) + if err != nil { + return xerrors.Errorf("open config file %q: %w", configPath, err) + } + configReader = f + configCloser = f + } + + var config LoadTestConfig + err := json.NewDecoder(configReader).Decode(&config) + if configCloser != nil { + _ = configCloser.Close() + } + if err != nil { + return xerrors.Errorf("read config file %q: %w", configPath, err) + } + + err = config.Validate() + if err != nil { + return xerrors.Errorf("validate config: %w", err) + } + + client, err := CreateClient(cmd) + if err != nil { + return err + } + + me, err := client.User(cmd.Context(), codersdk.Me) + if err != nil { + return xerrors.Errorf("fetch current user: %w", err) + } + + // Only owners can do loadtests. This isn't a very strong check but + // there's not much else we can do. Ratelimits are enforced for + // non-owners so hopefully that limits the damage if someone + // disables this check and runs it against a non-owner account. + ok := false + for _, role := range me.Roles { + if role.Name == "owner" { + ok = true + break + } + } + if !ok { + return xerrors.Errorf("Not logged in as site owner. Load testing is only available to the site owner.") + } + + // Disable ratelimits for future requests. + client.BypassRatelimits = true + + // Prepare the test. + strategy := config.Strategy.ExecutionStrategy() + th := harness.NewTestHarness(strategy) + + for i, t := range config.Tests { + name := fmt.Sprintf("%s-%d", t.Type, i) + + for i := 0; i < t.Count; i++ { + id := strconv.Itoa(i) + runner, err := t.NewRunner(client) + if err != nil { + return xerrors.Errorf("create %q runner for %s/%s: %w", t.Type, name, id, err) + } + + th.AddRun(name, id, runner) + } + } + + _, _ = fmt.Fprintln(cmd.ErrOrStderr(), "Running load test...") + + testCtx := cmd.Context() + if config.Timeout > 0 { + var cancel func() + testCtx, cancel = context.WithTimeout(testCtx, config.Timeout) + defer cancel() + } + + // TODO: live progress output + start := time.Now() + err = th.Run(testCtx) + if err != nil { + return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err) + } + elapsed := time.Since(start) + + // Print the results. + // TODO: better result printing + // TODO: move result printing to the loadtest package, add multiple + // output formats (like HTML, JSON) + res := th.Results() + var totalDuration time.Duration + for _, run := range res.Runs { + totalDuration += run.Duration + if run.Error == nil { + continue + } + + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\n== FAIL: %s\n\n", run.FullID) + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tError: %s\n\n", run.Error) + + // Print log lines indented. + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tLog:\n") + rd := bufio.NewReader(bytes.NewBuffer(run.Logs)) + for { + line, err := rd.ReadBytes('\n') + if err == io.EOF { + break + } + if err != nil { + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\n\tLOG PRINT ERROR: %+v\n", err) + } + + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\t\t%s", line) + } + } + + _, _ = fmt.Fprintln(cmd.ErrOrStderr(), "\n\nTest results:") + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tPass: %d\n", res.TotalPass) + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tFail: %d\n", res.TotalFail) + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tTotal: %d\n", res.TotalRuns) + _, _ = fmt.Fprintln(cmd.ErrOrStderr(), "") + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tTotal duration: %s\n", elapsed) + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "\tAvg. duration: %s\n", totalDuration/time.Duration(res.TotalRuns)) + + // Cleanup. + _, _ = fmt.Fprintln(cmd.ErrOrStderr(), "\nCleaning up...") + err = th.Cleanup(cmd.Context()) + if err != nil { + return xerrors.Errorf("cleanup tests: %w", err) + } + + return nil + }, + } + + cliflag.StringVarP(cmd.Flags(), &configPath, "config", "", "CODER_LOADTEST_CONFIG_PATH", "", "Path to the load test configuration file, or - to read from stdin.") + return cmd +} diff --git a/cli/loadtest_test.go b/cli/loadtest_test.go new file mode 100644 index 0000000000000..bfc8adaade9c3 --- /dev/null +++ b/cli/loadtest_test.go @@ -0,0 +1,135 @@ +package cli_test + +import ( + "bytes" + "context" + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/coder/coder/cli" + "github.com/coder/coder/cli/clitest" + "github.com/coder/coder/coderd/coderdtest" + "github.com/coder/coder/codersdk" + "github.com/coder/coder/loadtest/placebo" + "github.com/coder/coder/loadtest/workspacebuild" + "github.com/coder/coder/pty/ptytest" + "github.com/coder/coder/testutil" +) + +func TestLoadTest(t *testing.T) { + t.Parallel() + + t.Run("PlaceboFromStdin", func(t *testing.T) { + t.Parallel() + + client := coderdtest.New(t, nil) + _ = coderdtest.CreateFirstUser(t, client) + + config := cli.LoadTestConfig{ + Strategy: cli.LoadTestStrategy{ + Type: cli.LoadTestStrategyTypeLinear, + }, + Tests: []cli.LoadTest{ + { + Type: cli.LoadTestTypePlacebo, + Count: 10, + Placebo: &placebo.Config{ + Sleep: 10 * time.Millisecond, + }, + }, + }, + Timeout: 1 * time.Second, + } + + configBytes, err := json.Marshal(config) + require.NoError(t, err) + + cmd, root := clitest.New(t, "loadtest", "--config", "-") + clitest.SetupConfig(t, client, root) + pty := ptytest.New(t) + cmd.SetIn(bytes.NewReader(configBytes)) + cmd.SetOut(pty.Output()) + cmd.SetErr(pty.Output()) + + ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancelFunc() + + done := make(chan any) + go func() { + errC := cmd.ExecuteContext(ctx) + assert.NoError(t, errC) + close(done) + }() + pty.ExpectMatch("Test results:") + pty.ExpectMatch("Pass: 10") + cancelFunc() + <-done + }) + + t.Run("WorkspaceBuildFromFile", func(t *testing.T) { + t.Parallel() + + client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true}) + user := coderdtest.CreateFirstUser(t, client) + version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil) + coderdtest.AwaitTemplateVersionJob(t, client, version.ID) + template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) + + config := cli.LoadTestConfig{ + Strategy: cli.LoadTestStrategy{ + Type: cli.LoadTestStrategyTypeConcurrent, + ConcurrencyLimit: 2, + }, + Tests: []cli.LoadTest{ + { + Type: cli.LoadTestTypeWorkspaceBuild, + Count: 2, + WorkspaceBuild: &workspacebuild.Config{ + OrganizationID: user.OrganizationID, + UserID: user.UserID.String(), + Request: codersdk.CreateWorkspaceRequest{ + TemplateID: template.ID, + }, + }, + }, + }, + Timeout: 10 * time.Second, + } + + d := t.TempDir() + configPath := filepath.Join(d, "/config.loadtest.json") + f, err := os.Create(configPath) + require.NoError(t, err) + defer f.Close() + err = json.NewEncoder(f).Encode(config) + require.NoError(t, err) + _ = f.Close() + + cmd, root := clitest.New(t, "loadtest", "--config", configPath) + clitest.SetupConfig(t, client, root) + pty := ptytest.New(t) + cmd.SetIn(pty.Input()) + cmd.SetOut(pty.Output()) + cmd.SetErr(pty.Output()) + + ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong) + defer cancelFunc() + + done := make(chan any) + go func() { + errC := cmd.ExecuteContext(ctx) + assert.NoError(t, errC) + close(done) + }() + pty.ExpectMatch("Test results:") + pty.ExpectMatch("Pass: 2") + <-done + cancelFunc() + }) +} diff --git a/cli/loadtestconfig.go b/cli/loadtestconfig.go new file mode 100644 index 0000000000000..57d70e4f81969 --- /dev/null +++ b/cli/loadtestconfig.go @@ -0,0 +1,180 @@ +package cli + +import ( + "time" + + "golang.org/x/xerrors" + + "github.com/coder/coder/codersdk" + "github.com/coder/coder/loadtest/harness" + "github.com/coder/coder/loadtest/placebo" + "github.com/coder/coder/loadtest/workspacebuild" +) + +// LoadTestConfig is the overall configuration for a call to `coder loadtest`. +type LoadTestConfig struct { + Strategy LoadTestStrategy `json:"strategy"` + Tests []LoadTest `json:"tests"` + // Timeout sets a timeout for the entire test run, to control the timeout + // for each individual run use strategy.timeout. + Timeout time.Duration `json:"timeout"` +} + +type LoadTestStrategyType string + +const ( + LoadTestStrategyTypeLinear LoadTestStrategyType = "linear" + LoadTestStrategyTypeConcurrent LoadTestStrategyType = "concurrent" +) + +type LoadTestStrategy struct { + // Type is the type of load test strategy to use. Strategies determine how + // to run tests concurrently. + Type LoadTestStrategyType `json:"type"` + + // ConcurrencyLimit is the maximum number of concurrent runs. This only + // applies if type == "concurrent". Negative values disable the concurrency + // limit and attempts to perform all runs concurrently. The default value is + // 100. + ConcurrencyLimit int `json:"concurrency_limit"` + + // Shuffle determines whether or not to shuffle the test runs before + // executing them. + Shuffle bool `json:"shuffle"` + // Timeout is the maximum amount of time to run each test for. This is + // independent of the timeout specified in the test run. A timeout of 0 + // disables the timeout. + Timeout time.Duration `json:"timeout"` +} + +func (s LoadTestStrategy) ExecutionStrategy() harness.ExecutionStrategy { + var strategy harness.ExecutionStrategy + switch s.Type { + case LoadTestStrategyTypeLinear: + strategy = harness.LinearExecutionStrategy{} + case LoadTestStrategyTypeConcurrent: + limit := s.ConcurrencyLimit + if limit < 0 { + return harness.ConcurrentExecutionStrategy{} + } + if limit == 0 { + limit = 100 + } + strategy = harness.ParallelExecutionStrategy{ + Limit: limit, + } + default: + panic("unreachable, unknown strategy type " + s.Type) + } + + if s.Timeout > 0 { + strategy = harness.TimeoutExecutionStrategyWrapper{ + Timeout: s.Timeout, + Inner: strategy, + } + } + if s.Shuffle { + strategy = harness.ShuffleExecutionStrategyWrapper{ + Inner: strategy, + } + } + + return strategy +} + +type LoadTestType string + +const ( + LoadTestTypePlacebo LoadTestType = "placebo" + LoadTestTypeWorkspaceBuild LoadTestType = "workspacebuild" +) + +type LoadTest struct { + // Type is the type of load test to run. + Type LoadTestType `json:"type"` + // Count is the number of test runs to execute with this configuration. If + // the count is 0 or negative, defaults to 1. + Count int `json:"count"` + + // Placebo must be set if type == "placebo". + Placebo *placebo.Config `json:"placebo,omitempty"` + // WorkspaceBuild must be set if type == "workspacebuild". + WorkspaceBuild *workspacebuild.Config `json:"workspacebuild,omitempty"` +} + +func (t LoadTest) NewRunner(client *codersdk.Client) (harness.Runnable, error) { + switch t.Type { + case LoadTestTypePlacebo: + if t.Placebo == nil { + return nil, xerrors.New("placebo config must be set") + } + + return placebo.NewRunner(*t.Placebo), nil + case LoadTestTypeWorkspaceBuild: + if t.WorkspaceBuild == nil { + return nil, xerrors.Errorf("workspacebuild config must be set") + } + + return workspacebuild.NewRunner(client, *t.WorkspaceBuild), nil + default: + return nil, xerrors.Errorf("unknown test type %q", t.Type) + } +} + +func (c *LoadTestConfig) Validate() error { + err := c.Strategy.Validate() + if err != nil { + return xerrors.Errorf("validate strategy: %w", err) + } + + for i, test := range c.Tests { + err := test.Validate() + if err != nil { + return xerrors.Errorf("validate test %d: %w", i, err) + } + } + + return nil +} + +func (s *LoadTestStrategy) Validate() error { + switch s.Type { + case LoadTestStrategyTypeLinear: + case LoadTestStrategyTypeConcurrent: + default: + return xerrors.Errorf("invalid load test strategy type: %q", s.Type) + } + + if s.Timeout < 0 { + return xerrors.Errorf("invalid load test strategy timeout: %q", s.Timeout) + } + + return nil +} + +func (t *LoadTest) Validate() error { + switch t.Type { + case LoadTestTypePlacebo: + if t.Placebo == nil { + return xerrors.Errorf("placebo test type must specify placebo") + } + + err := t.Placebo.Validate() + if err != nil { + return xerrors.Errorf("validate placebo: %w", err) + } + case LoadTestTypeWorkspaceBuild: + if t.WorkspaceBuild == nil { + return xerrors.New("workspacebuild test type must specify workspacebuild") + } + + err := t.WorkspaceBuild.Validate() + if err != nil { + return xerrors.Errorf("validate workspacebuild: %w", err) + } + default: + return xerrors.Errorf("invalid load test type: %q", t.Type) + } + + return nil +} diff --git a/cli/root.go b/cli/root.go index dcff07ab1f7df..b0e9ec715ddb9 100644 --- a/cli/root.go +++ b/cli/root.go @@ -70,6 +70,7 @@ func init() { } func Core() []*cobra.Command { + // Please re-sort this list alphabetically if you change it! return []*cobra.Command{ configSSH(), create(), @@ -77,26 +78,27 @@ func Core() []*cobra.Command { dotfiles(), gitssh(), list(), + loadtest(), login(), logout(), parameters(), portForward(), publickey(), + rename(), resetPassword(), schedules(), show(), - ssh(), speedtest(), + ssh(), start(), state(), stop(), - rename(), templates(), + tokens(), update(), users(), versionCmd(), workspaceAgent(), - tokens(), } } diff --git a/coderd/provisionerjobs.go b/coderd/provisionerjobs.go index 04f050f0c5218..77a6589c71ac4 100644 --- a/coderd/provisionerjobs.go +++ b/coderd/provisionerjobs.go @@ -376,9 +376,20 @@ type provisionerJobLogsMessage struct { func (api *API) followLogs(jobID uuid.UUID) (<-chan database.ProvisionerJobLog, func(), error) { logger := api.Logger.With(slog.F("job_id", jobID)) - bufferedLogs := make(chan database.ProvisionerJobLog, 128) - closeSubscribe, err := api.Pubsub.Subscribe(provisionerJobLogsChannel(jobID), + + var ( + closed = make(chan struct{}) + bufferedLogs = make(chan database.ProvisionerJobLog, 128) + ) + closeSubscribe, err := api.Pubsub.Subscribe( + provisionerJobLogsChannel(jobID), func(ctx context.Context, message []byte) { + select { + case <-closed: + return + default: + } + jlMsg := provisionerJobLogsMessage{} err := json.Unmarshal(message, &jlMsg) if err != nil { @@ -399,9 +410,11 @@ func (api *API) followLogs(jobID uuid.UUID) (<-chan database.ProvisionerJobLog, } if jlMsg.EndOfLogs { logger.Debug(ctx, "got End of Logs") + close(closed) close(bufferedLogs) } - }) + }, + ) if err != nil { return nil, nil, err } diff --git a/codersdk/client.go b/codersdk/client.go index d79408ec10956..a961844a0e539 100644 --- a/codersdk/client.go +++ b/codersdk/client.go @@ -44,6 +44,10 @@ type Client struct { HTTPClient *http.Client SessionToken string URL *url.URL + + // BypassRatelimits is an optional flag that can be set by the site owner to + // disable ratelimit checks for the client. + BypassRatelimits bool } type RequestOption func(*http.Request) @@ -87,6 +91,9 @@ func (c *Client) Request(ctx context.Context, method, path string, body interfac return nil, xerrors.Errorf("create request: %w", err) } req.Header.Set(SessionCustomHeader, c.SessionToken) + if c.BypassRatelimits { + req.Header.Set(BypassRatelimitHeader, "true") + } if body != nil { req.Header.Set("Content-Type", "application/json") diff --git a/loadtest/harness/strategies.go b/loadtest/harness/strategies.go index e23501630a79a..a831f86da4623 100644 --- a/loadtest/harness/strategies.go +++ b/loadtest/harness/strategies.go @@ -86,7 +86,7 @@ type TimeoutExecutionStrategyWrapper struct { Inner ExecutionStrategy } -var _ ExecutionStrategy = &TimeoutExecutionStrategyWrapper{} +var _ ExecutionStrategy = TimeoutExecutionStrategyWrapper{} type timeoutRunnerWrapper struct { timeout time.Duration @@ -103,7 +103,7 @@ func (t timeoutRunnerWrapper) Run(ctx context.Context, id string, logs io.Writer } // Execute implements ExecutionStrategy. -func (t *TimeoutExecutionStrategyWrapper) Execute(ctx context.Context, runs []*TestRun) error { +func (t TimeoutExecutionStrategyWrapper) Execute(ctx context.Context, runs []*TestRun) error { for _, run := range runs { oldRunner := run.runner run.runner = timeoutRunnerWrapper{ @@ -121,7 +121,7 @@ type ShuffleExecutionStrategyWrapper struct { Inner ExecutionStrategy } -var _ ExecutionStrategy = &ShuffleExecutionStrategyWrapper{} +var _ ExecutionStrategy = ShuffleExecutionStrategyWrapper{} type cryptoRandSource struct{} @@ -141,7 +141,7 @@ func (cryptoRandSource) Int63() int64 { func (cryptoRandSource) Seed(_ int64) {} // Execute implements ExecutionStrategy. -func (s *ShuffleExecutionStrategyWrapper) Execute(ctx context.Context, runs []*TestRun) error { +func (s ShuffleExecutionStrategyWrapper) Execute(ctx context.Context, runs []*TestRun) error { shuffledRuns := make([]*TestRun, len(runs)) copy(shuffledRuns, runs) diff --git a/loadtest/placebo/config.go b/loadtest/placebo/config.go new file mode 100644 index 0000000000000..7db91194c2a8d --- /dev/null +++ b/loadtest/placebo/config.go @@ -0,0 +1,31 @@ +package placebo + +import ( + "time" + + "golang.org/x/xerrors" +) + +type Config struct { + // Sleep is how long to sleep for. If unspecified, the test run will finish + // instantly. + Sleep time.Duration `json:"sleep"` + // Jitter is the maximum amount of jitter to add to the sleep duration. The + // sleep value will be increased by a random value between 0 and jitter if + // jitter is greater than 0. + Jitter time.Duration `json:"jitter"` +} + +func (c Config) Validate() error { + if c.Sleep < 0 { + return xerrors.New("sleep must be set to a positive value") + } + if c.Jitter < 0 { + return xerrors.New("jitter must be set to a positive value") + } + if c.Jitter > 0 && c.Sleep == 0 { + return xerrors.New("jitter must be 0 if sleep is 0") + } + + return nil +} diff --git a/loadtest/placebo/config_test.go b/loadtest/placebo/config_test.go new file mode 100644 index 0000000000000..3dc7e265c2115 --- /dev/null +++ b/loadtest/placebo/config_test.go @@ -0,0 +1,82 @@ +package placebo_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/coder/coder/loadtest/placebo" +) + +func Test_Config(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + config placebo.Config + errContains string + }{ + { + name: "Empty", + config: placebo.Config{ + Sleep: 0, + Jitter: 0, + }, + }, + { + name: "Sleep", + config: placebo.Config{ + Sleep: 1 * time.Second, + Jitter: 0, + }, + }, + { + name: "SleepAndJitter", + config: placebo.Config{ + Sleep: 1 * time.Second, + Jitter: 1 * time.Second, + }, + }, + { + name: "NegativeSleep", + config: placebo.Config{ + Sleep: -1 * time.Second, + Jitter: 0, + }, + errContains: "sleep must be set to a positive value", + }, + { + name: "NegativeJitter", + config: placebo.Config{ + Sleep: 0, + Jitter: -1 * time.Second, + }, + errContains: "jitter must be set to a positive value", + }, + { + name: "JitterWithoutSleep", + config: placebo.Config{ + Sleep: 0, + Jitter: 1 * time.Second, + }, + errContains: "jitter must be 0 if sleep is 0", + }, + } + + for _, c := range cases { + c := c + + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + err := c.config.Validate() + if c.errContains != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.errContains) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/loadtest/placebo/run.go b/loadtest/placebo/run.go new file mode 100644 index 0000000000000..34ec8d4cbc9ff --- /dev/null +++ b/loadtest/placebo/run.go @@ -0,0 +1,51 @@ +package placebo + +import ( + "context" + "fmt" + "io" + "math/rand" + "time" + + "github.com/coder/coder/loadtest/harness" +) + +type Runner struct { + cfg Config +} + +var _ harness.Runnable = &Runner{} + +// NewRunner creates a new placebo loadtest Runner. The test will sleep for the +// specified duration if set, and will add a random amount of jitter between 0 +// and the specified jitter value if set. +func NewRunner(cfg Config) *Runner { + return &Runner{ + cfg: cfg, + } +} + +// Run implements Runnable. +func (r *Runner) Run(ctx context.Context, _ string, logs io.Writer) error { + sleepDur := r.cfg.Sleep + if r.cfg.Jitter > 0 { + //nolint:gosec // not used for crypto + sleepDur += time.Duration(rand.Int63n(int64(r.cfg.Jitter))) + // This makes it easier to tell if jitter was applied in tests. + sleepDur += time.Millisecond + } + + if sleepDur > 0 { + _, _ = fmt.Fprintf(logs, "sleeping for %s\n", sleepDur) + + t := time.NewTimer(sleepDur) + defer t.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + } + } + + return nil +} diff --git a/loadtest/placebo/run_test.go b/loadtest/placebo/run_test.go new file mode 100644 index 0000000000000..dcb47122d96ad --- /dev/null +++ b/loadtest/placebo/run_test.go @@ -0,0 +1,64 @@ +package placebo_test + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/coder/coder/loadtest/placebo" +) + +func Test_Runner(t *testing.T) { + t.Parallel() + + t.Run("NoSleep", func(t *testing.T) { + t.Parallel() + + r := placebo.NewRunner(placebo.Config{}) + start := time.Now() + logs := bytes.NewBuffer(nil) + err := r.Run(context.Background(), "", logs) + require.NoError(t, err) + + require.WithinDuration(t, time.Now(), start, 100*time.Millisecond) + require.Empty(t, logs.String()) + }) + + t.Run("Sleep", func(t *testing.T) { + t.Parallel() + + r := placebo.NewRunner(placebo.Config{ + Sleep: 100 * time.Millisecond, + }) + + start := time.Now() + logs := bytes.NewBuffer(nil) + err := r.Run(context.Background(), "", logs) + require.NoError(t, err) + + require.WithinRange(t, time.Now(), start.Add(90*time.Millisecond), start.Add(200*time.Millisecond)) + require.Contains(t, logs.String(), "sleeping for 100ms") + }) + + t.Run("Jitter", func(t *testing.T) { + t.Parallel() + + r := placebo.NewRunner(placebo.Config{ + Sleep: 100 * time.Millisecond, + Jitter: 100 * time.Millisecond, + }) + + start := time.Now() + logs := bytes.NewBuffer(nil) + err := r.Run(context.Background(), "", logs) + require.NoError(t, err) + + require.WithinRange(t, time.Now(), start.Add(90*time.Millisecond), start.Add(300*time.Millisecond)) + logsStr := logs.String() + require.Contains(t, logsStr, "sleeping for") + require.NotContains(t, logsStr, "sleeping for 100ms") + }) +} diff --git a/loadtest/workspacebuild/config.go b/loadtest/workspacebuild/config.go new file mode 100644 index 0000000000000..37d3f6a85eb6c --- /dev/null +++ b/loadtest/workspacebuild/config.go @@ -0,0 +1,39 @@ +package workspacebuild + +import ( + "github.com/google/uuid" + "golang.org/x/xerrors" + + "github.com/coder/coder/codersdk" +) + +type Config struct { + // OrganizationID is the ID of the organization to create the workspace in. + OrganizationID uuid.UUID `json:"organization_id"` + // UserID is the ID of the user to run the test as. + UserID string `json:"user_id"` + // Request is the request to send to the Coder API to create the workspace. + // request.template_id must be set. A name will be generated if not + // specified. + Request codersdk.CreateWorkspaceRequest `json:"request"` +} + +func (c Config) Validate() error { + if c.OrganizationID == uuid.Nil { + return xerrors.New("organization_id must be set") + } + if c.UserID == "" { + return xerrors.New("user_id must be set") + } + if c.UserID != codersdk.Me { + _, err := uuid.Parse(c.UserID) + if err != nil { + return xerrors.Errorf("user_id must be %q or a valid UUID: %w", codersdk.Me, err) + } + } + if c.Request.TemplateID == uuid.Nil { + return xerrors.New("request.template_id must be set") + } + + return nil +} diff --git a/loadtest/workspacebuild/config_test.go b/loadtest/workspacebuild/config_test.go new file mode 100644 index 0000000000000..9c98d498289d8 --- /dev/null +++ b/loadtest/workspacebuild/config_test.go @@ -0,0 +1,94 @@ +package workspacebuild_test + +import ( + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/coder/coder/codersdk" + "github.com/coder/coder/loadtest/workspacebuild" +) + +func Test_Config(t *testing.T) { + t.Parallel() + + id := uuid.Must(uuid.NewRandom()) + + cases := []struct { + name string + config workspacebuild.Config + errContains string + }{ + { + name: "NoOrganizationID", + config: workspacebuild.Config{ + OrganizationID: uuid.Nil, + UserID: id.String(), + Request: codersdk.CreateWorkspaceRequest{ + TemplateID: id, + }, + }, + errContains: "organization_id must be set", + }, + { + name: "NoUserID", + config: workspacebuild.Config{ + OrganizationID: id, + UserID: "", + Request: codersdk.CreateWorkspaceRequest{ + TemplateID: id, + }, + }, + errContains: "user_id must be set", + }, + { + name: "UserIDNotUUID", + config: workspacebuild.Config{ + OrganizationID: id, + UserID: "blah", + Request: codersdk.CreateWorkspaceRequest{ + TemplateID: id, + }, + }, + errContains: "user_id must be \"me\" or a valid UUID", + }, + { + name: "NoTemplateID", + config: workspacebuild.Config{ + OrganizationID: id, + UserID: id.String(), + Request: codersdk.CreateWorkspaceRequest{ + TemplateID: uuid.Nil, + }, + }, + errContains: "request.template_id must be set", + }, + { + name: "UserMe", + config: workspacebuild.Config{ + OrganizationID: id, + UserID: "me", + Request: codersdk.CreateWorkspaceRequest{ + TemplateID: id, + }, + }, + }, + } + + for _, c := range cases { + c := c + + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + err := c.config.Validate() + if c.errContains != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.errContains) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/loadtest/workspacebuild/run.go b/loadtest/workspacebuild/run.go index de1d0988eccf0..d5ffd37596e1e 100644 --- a/loadtest/workspacebuild/run.go +++ b/loadtest/workspacebuild/run.go @@ -15,33 +15,24 @@ import ( ) type Runner struct { - client *codersdk.Client - orgID uuid.UUID - userID string - req codersdk.CreateWorkspaceRequest - + client *codersdk.Client + cfg Config workspaceID uuid.UUID } var _ harness.Runnable = &Runner{} var _ harness.Cleanable = &Runner{} -// NewRunner creates a new workspace build loadtest Runner. The provided request -// will be used verbatim, but the name will be set to a random value if unset. -// -// The Cleanup method will delete the workspace if it was successfully created. -func NewRunner(client *codersdk.Client, orgID uuid.UUID, userID string, req codersdk.CreateWorkspaceRequest) *Runner { +func NewRunner(client *codersdk.Client, cfg Config) *Runner { return &Runner{ client: client, - orgID: orgID, - userID: userID, - req: req, + cfg: cfg, } } // Run implements Runnable. func (r *Runner) Run(ctx context.Context, _ string, logs io.Writer) error { - req := r.req + req := r.cfg.Request if req.Name == "" { randName, err := cryptorand.HexString(8) if err != nil { @@ -51,7 +42,7 @@ func (r *Runner) Run(ctx context.Context, _ string, logs io.Writer) error { } after := time.Now() - workspace, err := r.client.CreateWorkspace(ctx, r.orgID, r.userID, req) + workspace, err := r.client.CreateWorkspace(ctx, r.cfg.OrganizationID, r.cfg.UserID, req) if err != nil { return xerrors.Errorf("create workspace: %w", err) } diff --git a/loadtest/workspacebuild/run_test.go b/loadtest/workspacebuild/run_test.go index 15b822de192ca..438c188fd83e7 100644 --- a/loadtest/workspacebuild/run_test.go +++ b/loadtest/workspacebuild/run_test.go @@ -144,8 +144,12 @@ func Test_Runner(t *testing.T) { coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID) }() - runner := workspacebuild.NewRunner(client, user.OrganizationID, codersdk.Me, codersdk.CreateWorkspaceRequest{ - TemplateID: template.ID, + runner := workspacebuild.NewRunner(client, workspacebuild.Config{ + OrganizationID: user.OrganizationID, + UserID: codersdk.Me, + Request: codersdk.CreateWorkspaceRequest{ + TemplateID: template.ID, + }, }) logs := bytes.NewBuffer(nil) @@ -202,8 +206,12 @@ func Test_Runner(t *testing.T) { template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID) coderdtest.AwaitTemplateVersionJob(t, client, version.ID) - runner := workspacebuild.NewRunner(client, user.OrganizationID, codersdk.Me, codersdk.CreateWorkspaceRequest{ - TemplateID: template.ID, + runner := workspacebuild.NewRunner(client, workspacebuild.Config{ + OrganizationID: user.OrganizationID, + UserID: codersdk.Me, + Request: codersdk.CreateWorkspaceRequest{ + TemplateID: template.ID, + }, }) logs := bytes.NewBuffer(nil) From dde7ad8b1be8dfbf86fdb12bce7408774aaf0b15 Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Wed, 2 Nov 2022 13:06:10 +0000 Subject: [PATCH 4/7] fixup! feat: add coder loadtest command --- cli/loadtest.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cli/loadtest.go b/cli/loadtest.go index 04b8ba9eef10b..a44ed66c02d97 100644 --- a/cli/loadtest.go +++ b/cli/loadtest.go @@ -26,8 +26,11 @@ func loadtest() *cobra.Command { cmd := &cobra.Command{ Use: "loadtest --config ", Short: "Load test the Coder API", - Long: "", - Args: cobra.ExactArgs(0), + // TODO: documentation and a JSON scheme file + Long: "Perform load tests against the Coder server. The load tests " + + "configurable via a JSON file.", + Hidden: true, + Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { if configPath == "" { return xerrors.New("config is required") From df7866cfe0a4ab362f23efc1464ef3295a3aa006 Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Wed, 2 Nov 2022 13:54:20 +0000 Subject: [PATCH 5/7] fixup! feat: add coder loadtest command --- cli/loadtest.go | 2 +- cli/loadtest_test.go | 7 +- cli/loadtestconfig.go | 7 +- coderd/httpapi/json.go | 45 +++++++++ coderd/httpapi/json_test.go | 168 ++++++++++++++++++++++++++++++++ loadtest/placebo/config.go | 14 ++- loadtest/placebo/config_test.go | 57 ++++++++--- loadtest/placebo/run.go | 17 +++- loadtest/placebo/run_test.go | 37 ++++++- 9 files changed, 327 insertions(+), 27 deletions(-) create mode 100644 coderd/httpapi/json.go create mode 100644 coderd/httpapi/json_test.go diff --git a/cli/loadtest.go b/cli/loadtest.go index a44ed66c02d97..a0195da7878ce 100644 --- a/cli/loadtest.go +++ b/cli/loadtest.go @@ -116,7 +116,7 @@ func loadtest() *cobra.Command { testCtx := cmd.Context() if config.Timeout > 0 { var cancel func() - testCtx, cancel = context.WithTimeout(testCtx, config.Timeout) + testCtx, cancel = context.WithTimeout(testCtx, time.Duration(config.Timeout)) defer cancel() } diff --git a/cli/loadtest_test.go b/cli/loadtest_test.go index bfc8adaade9c3..44a0cd6b69309 100644 --- a/cli/loadtest_test.go +++ b/cli/loadtest_test.go @@ -15,6 +15,7 @@ import ( "github.com/coder/coder/cli" "github.com/coder/coder/cli/clitest" "github.com/coder/coder/coderd/coderdtest" + "github.com/coder/coder/coderd/httpapi" "github.com/coder/coder/codersdk" "github.com/coder/coder/loadtest/placebo" "github.com/coder/coder/loadtest/workspacebuild" @@ -40,11 +41,11 @@ func TestLoadTest(t *testing.T) { Type: cli.LoadTestTypePlacebo, Count: 10, Placebo: &placebo.Config{ - Sleep: 10 * time.Millisecond, + Sleep: httpapi.Duration(10 * time.Millisecond), }, }, }, - Timeout: 1 * time.Second, + Timeout: httpapi.Duration(testutil.WaitShort), } configBytes, err := json.Marshal(config) @@ -99,7 +100,7 @@ func TestLoadTest(t *testing.T) { }, }, }, - Timeout: 10 * time.Second, + Timeout: httpapi.Duration(testutil.WaitLong), } d := t.TempDir() diff --git a/cli/loadtestconfig.go b/cli/loadtestconfig.go index 57d70e4f81969..0a1f865d8701a 100644 --- a/cli/loadtestconfig.go +++ b/cli/loadtestconfig.go @@ -5,6 +5,7 @@ import ( "golang.org/x/xerrors" + "github.com/coder/coder/coderd/httpapi" "github.com/coder/coder/codersdk" "github.com/coder/coder/loadtest/harness" "github.com/coder/coder/loadtest/placebo" @@ -17,7 +18,7 @@ type LoadTestConfig struct { Tests []LoadTest `json:"tests"` // Timeout sets a timeout for the entire test run, to control the timeout // for each individual run use strategy.timeout. - Timeout time.Duration `json:"timeout"` + Timeout httpapi.Duration `json:"timeout"` } type LoadTestStrategyType string @@ -44,7 +45,7 @@ type LoadTestStrategy struct { // Timeout is the maximum amount of time to run each test for. This is // independent of the timeout specified in the test run. A timeout of 0 // disables the timeout. - Timeout time.Duration `json:"timeout"` + Timeout httpapi.Duration `json:"timeout"` } func (s LoadTestStrategy) ExecutionStrategy() harness.ExecutionStrategy { @@ -69,7 +70,7 @@ func (s LoadTestStrategy) ExecutionStrategy() harness.ExecutionStrategy { if s.Timeout > 0 { strategy = harness.TimeoutExecutionStrategyWrapper{ - Timeout: s.Timeout, + Timeout: time.Duration(s.Timeout), Inner: strategy, } } diff --git a/coderd/httpapi/json.go b/coderd/httpapi/json.go new file mode 100644 index 0000000000000..bffb5c987c8a7 --- /dev/null +++ b/coderd/httpapi/json.go @@ -0,0 +1,45 @@ +package httpapi + +import ( + "encoding/json" + "time" + + "golang.org/x/xerrors" +) + +// Duration wraps time.Duration and provides better JSON marshaling and +// unmarshaling. +type Duration time.Duration + +var _ json.Marshaler = Duration(0) +var _ json.Unmarshaler = (*Duration)(nil) + +// MarshalJSON implements json.Marshaler. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Duration(d).String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (d *Duration) UnmarshalJSON(b []byte) error { + var v interface{} + err := json.Unmarshal(b, &v) + if err != nil { + return xerrors.Errorf("unmarshal JSON value: %w", err) + } + + switch value := v.(type) { + case float64: + *d = Duration(time.Duration(value)) + return nil + case string: + tmp, err := time.ParseDuration(value) + if err != nil { + return xerrors.Errorf("parse duration %q: %w", value, err) + } + + *d = Duration(tmp) + return nil + } + + return xerrors.New("invalid duration") +} diff --git a/coderd/httpapi/json_test.go b/coderd/httpapi/json_test.go new file mode 100644 index 0000000000000..62e5c546a0b4b --- /dev/null +++ b/coderd/httpapi/json_test.go @@ -0,0 +1,168 @@ +package httpapi_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/coder/coder/coderd/httpapi" +) + +func TestDuration(t *testing.T) { + t.Parallel() + + t.Run("MarshalJSON", func(t *testing.T) { + t.Parallel() + + cases := []struct { + value time.Duration + expected string + }{ + { + value: 0, + expected: "0s", + }, + { + value: 1 * time.Millisecond, + expected: "1ms", + }, + { + value: 1 * time.Second, + expected: "1s", + }, + { + value: 1 * time.Minute, + expected: "1m0s", + }, + { + value: 1 * time.Hour, + expected: "1h0m0s", + }, + { + value: 1*time.Hour + 1*time.Minute + 1*time.Second + 1*time.Millisecond, + expected: "1h1m1.001s", + }, + } + + for _, c := range cases { + c := c + + t.Run(c.expected, func(t *testing.T) { + t.Parallel() + + d := httpapi.Duration(c.value) + b, err := d.MarshalJSON() + require.NoError(t, err) + require.Equal(t, `"`+c.expected+`"`, string(b)) + }) + } + }) + + t.Run("UnmarshalJSON", func(t *testing.T) { + t.Parallel() + + cases := []struct { + value string + expected time.Duration + }{ + { + value: "0ms", + expected: 0, + }, + { + value: "0s", + expected: 0, + }, + { + value: "1ms", + expected: 1 * time.Millisecond, + }, + { + value: "1s", + expected: 1 * time.Second, + }, + { + value: "1m", + expected: 1 * time.Minute, + }, + { + value: "1m0s", + expected: 1 * time.Minute, + }, + { + value: "1h", + expected: 1 * time.Hour, + }, + { + value: "1h0m0s", + expected: 1 * time.Hour, + }, + { + value: "1h1m1.001s", + expected: 1*time.Hour + 1*time.Minute + 1*time.Second + 1*time.Millisecond, + }, + { + value: "1h1m1s1ms", + expected: 1*time.Hour + 1*time.Minute + 1*time.Second + 1*time.Millisecond, + }, + } + + for _, c := range cases { + c := c + + t.Run(c.value, func(t *testing.T) { + t.Parallel() + + var d httpapi.Duration + err := d.UnmarshalJSON([]byte(`"` + c.value + `"`)) + require.NoError(t, err) + require.Equal(t, c.expected, time.Duration(d)) + }) + } + }) + + t.Run("UnmarshalJSONInt", func(t *testing.T) { + t.Parallel() + + var d httpapi.Duration + err := d.UnmarshalJSON([]byte("12345")) + require.NoError(t, err) + require.EqualValues(t, 12345, d) + }) + + t.Run("UnmarshalJSONErrors", func(t *testing.T) { + t.Parallel() + + cases := []struct { + value string + errContains string + }{ + { + value: "not valid json (no double quotes)", + errContains: "unmarshal JSON value", + }, + { + value: `"not valid duration"`, + errContains: "parse duration", + }, + { + value: "{}", + errContains: "invalid duration", + }, + } + + for _, c := range cases { + c := c + + t.Run(c.value, func(t *testing.T) { + t.Parallel() + + var d httpapi.Duration + err := d.UnmarshalJSON([]byte(c.value)) + require.Error(t, err) + require.Contains(t, err.Error(), c.errContains) + }) + } + }) +} diff --git a/loadtest/placebo/config.go b/loadtest/placebo/config.go index 7db91194c2a8d..501afb3961572 100644 --- a/loadtest/placebo/config.go +++ b/loadtest/placebo/config.go @@ -1,19 +1,22 @@ package placebo import ( - "time" - "golang.org/x/xerrors" + + "github.com/coder/coder/coderd/httpapi" ) type Config struct { // Sleep is how long to sleep for. If unspecified, the test run will finish // instantly. - Sleep time.Duration `json:"sleep"` + Sleep httpapi.Duration `json:"sleep"` // Jitter is the maximum amount of jitter to add to the sleep duration. The // sleep value will be increased by a random value between 0 and jitter if // jitter is greater than 0. - Jitter time.Duration `json:"jitter"` + Jitter httpapi.Duration `json:"jitter"` + // FailureChance is the chance that the test will fail. The value must be + // between 0 and 1. + FailureChance float64 `json:"failure_chance"` } func (c Config) Validate() error { @@ -26,6 +29,9 @@ func (c Config) Validate() error { if c.Jitter > 0 && c.Sleep == 0 { return xerrors.New("jitter must be 0 if sleep is 0") } + if c.FailureChance < 0 || c.FailureChance > 1 { + return xerrors.New("failure_chance must be between 0 and 1") + } return nil } diff --git a/loadtest/placebo/config_test.go b/loadtest/placebo/config_test.go index 3dc7e265c2115..927a880f11d0f 100644 --- a/loadtest/placebo/config_test.go +++ b/loadtest/placebo/config_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/coder/coder/coderd/httpapi" "github.com/coder/coder/loadtest/placebo" ) @@ -20,48 +21,80 @@ func Test_Config(t *testing.T) { { name: "Empty", config: placebo.Config{ - Sleep: 0, - Jitter: 0, + Sleep: 0, + Jitter: 0, + FailureChance: 0, }, }, { name: "Sleep", config: placebo.Config{ - Sleep: 1 * time.Second, - Jitter: 0, + Sleep: httpapi.Duration(1 * time.Second), + Jitter: 0, + FailureChance: 0, }, }, { name: "SleepAndJitter", config: placebo.Config{ - Sleep: 1 * time.Second, - Jitter: 1 * time.Second, + Sleep: httpapi.Duration(1 * time.Second), + Jitter: httpapi.Duration(1 * time.Second), + FailureChance: 0, + }, + }, + { + name: "FailureChance", + config: placebo.Config{ + Sleep: 0, + Jitter: 0, + FailureChance: 0.5, }, }, { name: "NegativeSleep", config: placebo.Config{ - Sleep: -1 * time.Second, - Jitter: 0, + Sleep: httpapi.Duration(-1 * time.Second), + Jitter: 0, + FailureChance: 0, }, errContains: "sleep must be set to a positive value", }, { name: "NegativeJitter", config: placebo.Config{ - Sleep: 0, - Jitter: -1 * time.Second, + Sleep: 0, + Jitter: httpapi.Duration(-1 * time.Second), + FailureChance: 0, }, errContains: "jitter must be set to a positive value", }, { name: "JitterWithoutSleep", config: placebo.Config{ - Sleep: 0, - Jitter: 1 * time.Second, + Sleep: 0, + Jitter: httpapi.Duration(1 * time.Second), + FailureChance: 0, }, errContains: "jitter must be 0 if sleep is 0", }, + { + name: "NegativeFailureChance", + config: placebo.Config{ + Sleep: 0, + Jitter: 0, + FailureChance: -0.1, + }, + errContains: "failure_chance must be between 0 and 1", + }, + { + name: "FailureChanceTooLarge", + config: placebo.Config{ + Sleep: 0, + Jitter: 0, + FailureChance: 1.1, + }, + errContains: "failure_chance must be between 0 and 1", + }, } for _, c := range cases { diff --git a/loadtest/placebo/run.go b/loadtest/placebo/run.go index 34ec8d4cbc9ff..5e6c15eb8ec00 100644 --- a/loadtest/placebo/run.go +++ b/loadtest/placebo/run.go @@ -7,6 +7,8 @@ import ( "math/rand" "time" + "golang.org/x/xerrors" + "github.com/coder/coder/loadtest/harness" ) @@ -27,7 +29,7 @@ func NewRunner(cfg Config) *Runner { // Run implements Runnable. func (r *Runner) Run(ctx context.Context, _ string, logs io.Writer) error { - sleepDur := r.cfg.Sleep + sleepDur := time.Duration(r.cfg.Sleep) if r.cfg.Jitter > 0 { //nolint:gosec // not used for crypto sleepDur += time.Duration(rand.Int63n(int64(r.cfg.Jitter))) @@ -47,5 +49,18 @@ func (r *Runner) Run(ctx context.Context, _ string, logs io.Writer) error { } } + if r.cfg.FailureChance > 0 { + _, _ = fmt.Fprintf(logs, "failure chance is %f\n", r.cfg.FailureChance) + _, _ = fmt.Fprintln(logs, "rolling the dice of fate...") + //nolint:gosec // not used for crypto + roll := rand.Float64() + _, _ = fmt.Fprintf(logs, "rolled: %f\n", roll) + + if roll < r.cfg.FailureChance { + _, _ = fmt.Fprintln(logs, ":(") + return xerrors.New("test failed due to configured failure chance") + } + } + return nil } diff --git a/loadtest/placebo/run_test.go b/loadtest/placebo/run_test.go index dcb47122d96ad..2ec3e00abde62 100644 --- a/loadtest/placebo/run_test.go +++ b/loadtest/placebo/run_test.go @@ -3,11 +3,13 @@ package placebo_test import ( "bytes" "context" + "io" "testing" "time" "github.com/stretchr/testify/require" + "github.com/coder/coder/coderd/httpapi" "github.com/coder/coder/loadtest/placebo" ) @@ -31,7 +33,7 @@ func Test_Runner(t *testing.T) { t.Parallel() r := placebo.NewRunner(placebo.Config{ - Sleep: 100 * time.Millisecond, + Sleep: httpapi.Duration(100 * time.Millisecond), }) start := time.Now() @@ -47,8 +49,8 @@ func Test_Runner(t *testing.T) { t.Parallel() r := placebo.NewRunner(placebo.Config{ - Sleep: 100 * time.Millisecond, - Jitter: 100 * time.Millisecond, + Sleep: httpapi.Duration(100 * time.Millisecond), + Jitter: httpapi.Duration(100 * time.Millisecond), }) start := time.Now() @@ -61,4 +63,33 @@ func Test_Runner(t *testing.T) { require.Contains(t, logsStr, "sleeping for") require.NotContains(t, logsStr, "sleeping for 100ms") }) + + t.Run("Timeout", func(t *testing.T) { + t.Parallel() + + r := placebo.NewRunner(placebo.Config{ + Sleep: httpapi.Duration(100 * time.Millisecond), + }) + + //nolint:gocritic // we're testing timeouts here so we want specific values + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + err := r.Run(ctx, "", io.Discard) + require.Error(t, err) + require.ErrorIs(t, err, context.DeadlineExceeded) + }) + + t.Run("FailureChance", func(t *testing.T) { + t.Parallel() + + r := placebo.NewRunner(placebo.Config{ + FailureChance: 1, + }) + + logs := bytes.NewBuffer(nil) + err := r.Run(context.Background(), "", logs) + require.Error(t, err) + require.Contains(t, logs.String(), ":(") + }) } From 124f252541c0d554e14384bac3c5bb85e19aac5f Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Wed, 2 Nov 2022 16:42:10 +0000 Subject: [PATCH 6/7] fix: fix races in loadtest tests --- loadtest/harness/harness.go | 16 ++++++++-------- loadtest/harness/harness_test.go | 14 ++++++++------ loadtest/harness/strategies.go | 3 ++- loadtest/harness/strategies_test.go | 21 +++++++++------------ 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/loadtest/harness/harness.go b/loadtest/harness/harness.go index 8c9fc62936659..435a7406b8e61 100644 --- a/loadtest/harness/harness.go +++ b/loadtest/harness/harness.go @@ -20,7 +20,7 @@ type ExecutionStrategy interface { // TestHarness runs a bunch of registered test runs using the given // ExecutionStrategy. type TestHarness struct { - execStrat ExecutionStrategy + strategy ExecutionStrategy mut *sync.Mutex runIDs map[string]struct{} @@ -30,13 +30,13 @@ type TestHarness struct { } // NewTestHarness creates a new TestHarness with the given ExecutionStrategy. -func NewTestHarness(execStrat ExecutionStrategy) *TestHarness { +func NewTestHarness(strategy ExecutionStrategy) *TestHarness { return &TestHarness{ - execStrat: execStrat, - mut: new(sync.Mutex), - runIDs: map[string]struct{}{}, - runs: []*TestRun{}, - done: make(chan struct{}), + strategy: strategy, + mut: new(sync.Mutex), + runIDs: map[string]struct{}{}, + runs: []*TestRun{}, + done: make(chan struct{}), } } @@ -63,7 +63,7 @@ func (h *TestHarness) Run(ctx context.Context) (err error) { } }() - err = h.execStrat.Execute(ctx, h.runs) + err = h.strategy.Execute(ctx, h.runs) //nolint:revive // we use named returns because we mutate it in a defer return } diff --git a/loadtest/harness/harness_test.go b/loadtest/harness/harness_test.go index 8be66f7bf2d87..4d0e8b3158a2d 100644 --- a/loadtest/harness/harness_test.go +++ b/loadtest/harness/harness_test.go @@ -4,7 +4,6 @@ import ( "context" "io" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -180,12 +179,14 @@ func Test_TestHarness(t *testing.T) { t.Parallel() var ( + started = make(chan struct{}) endRun = make(chan struct{}) testsEnded = make(chan struct{}) ) h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) _ = h.AddRun("test", "1", testFns{ RunFn: func(_ context.Context, _ string, _ io.Writer) error { + close(started) <-endRun return nil }, @@ -196,12 +197,12 @@ func Test_TestHarness(t *testing.T) { assert.NoError(t, err) }() - time.Sleep(100 * time.Millisecond) + <-started require.Panics(t, func() { h.Results() }) - close(endRun) + <-testsEnded _ = h.Results() }) @@ -220,12 +221,14 @@ func Test_TestHarness(t *testing.T) { t.Parallel() var ( + started = make(chan struct{}) endRun = make(chan struct{}) testsEnded = make(chan struct{}) ) h := harness.NewTestHarness(harness.LinearExecutionStrategy{}) _ = h.AddRun("test", "1", testFns{ RunFn: func(_ context.Context, _ string, _ io.Writer) error { + close(started) <-endRun return nil }, @@ -236,14 +239,13 @@ func Test_TestHarness(t *testing.T) { assert.NoError(t, err) }() - time.Sleep(100 * time.Millisecond) + <-started require.Panics(t, func() { h.Cleanup(context.Background()) }) - close(endRun) - <-testsEnded + <-testsEnded err := h.Cleanup(context.Background()) require.NoError(t, err) }) diff --git a/loadtest/harness/strategies.go b/loadtest/harness/strategies.go index a831f86da4623..7bc52a3b25ee9 100644 --- a/loadtest/harness/strategies.go +++ b/loadtest/harness/strategies.go @@ -60,15 +60,16 @@ var _ ExecutionStrategy = ParallelExecutionStrategy{} func (p ParallelExecutionStrategy) Execute(ctx context.Context, runs []*TestRun) error { var wg sync.WaitGroup sem := make(chan struct{}, p.Limit) + defer close(sem) for _, run := range runs { run := run wg.Add(1) go func() { - defer wg.Done() defer func() { <-sem + wg.Done() }() sem <- struct{}{} _ = run.Run(ctx) diff --git a/loadtest/harness/strategies_test.go b/loadtest/harness/strategies_test.go index 88968313e92e8..7df7ce5cf00f5 100644 --- a/loadtest/harness/strategies_test.go +++ b/loadtest/harness/strategies_test.go @@ -16,9 +16,8 @@ import ( "github.com/coder/coder/loadtest/harness" ) +//nolint:paralleltest // this tests uses timings to determine if it's working func Test_LinearExecutionStrategy(t *testing.T) { - t.Parallel() - var ( lastSeenI int64 = -1 count int64 @@ -27,6 +26,7 @@ func Test_LinearExecutionStrategy(t *testing.T) { atomic.AddInt64(&count, 1) swapped := atomic.CompareAndSwapInt64(&lastSeenI, int64(i-1), int64(i)) assert.True(t, swapped) + time.Sleep(2 * time.Millisecond) return nil }) strategy := harness.LinearExecutionStrategy{} @@ -42,9 +42,8 @@ func Test_LinearExecutionStrategy(t *testing.T) { } } +//nolint:paralleltest // this tests uses timings to determine if it's working func Test_ConcurrentExecutionStrategy(t *testing.T) { - t.Parallel() - runs := strategyTestData(10, func(_ context.Context, i int, _ io.Writer) error { time.Sleep(1 * time.Second) return nil @@ -67,9 +66,8 @@ func Test_ConcurrentExecutionStrategy(t *testing.T) { } } +//nolint:paralleltest // this tests uses timings to determine if it's working func Test_ParallelExecutionStrategy(t *testing.T) { - t.Parallel() - runs := strategyTestData(10, func(_ context.Context, _ int, _ io.Writer) error { time.Sleep(1 * time.Second) return nil @@ -79,6 +77,7 @@ func Test_ParallelExecutionStrategy(t *testing.T) { } startTime := time.Now() + time.Sleep(time.Millisecond) err := strategy.Execute(context.Background(), runs) require.NoError(t, err) @@ -111,10 +110,9 @@ func Test_ParallelExecutionStrategy(t *testing.T) { require.Equal(t, 5, withinRange) } +//nolint:paralleltest // this tests uses timings to determine if it's working func Test_TimeoutExecutionStrategy(t *testing.T) { - t.Parallel() - - runs := strategyTestData(10, func(ctx context.Context, _ int, _ io.Writer) error { + runs := strategyTestData(1, func(ctx context.Context, _ int, _ io.Writer) error { ticker := time.NewTicker(500 * time.Millisecond) defer ticker.Stop() @@ -127,7 +125,7 @@ func Test_TimeoutExecutionStrategy(t *testing.T) { }) strategy := harness.TimeoutExecutionStrategyWrapper{ Timeout: 100 * time.Millisecond, - Inner: harness.ConcurrentExecutionStrategy{}, + Inner: harness.LinearExecutionStrategy{}, } err := strategy.Execute(context.Background(), runs) @@ -138,9 +136,8 @@ func Test_TimeoutExecutionStrategy(t *testing.T) { } } +//nolint:paralleltest // this tests uses timings to determine if it's working func Test_ShuffleExecutionStrategyWrapper(t *testing.T) { - t.Parallel() - runs := strategyTestData(100000, func(_ context.Context, i int, _ io.Writer) error { // t.Logf("run %d", i) return nil From e5d2f97bfb2119962d14a60dcf73eb18470497f0 Mon Sep 17 00:00:00 2001 From: Dean Sheather Date: Wed, 2 Nov 2022 18:21:02 +0000 Subject: [PATCH 7/7] chore: pr comments --- cli/loadtest.go | 16 ++++++---------- coderd/httpapi/json.go | 7 ++++++- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/cli/loadtest.go b/cli/loadtest.go index a0195da7878ce..ac811366a1152 100644 --- a/cli/loadtest.go +++ b/cli/loadtest.go @@ -37,25 +37,21 @@ func loadtest() *cobra.Command { } var ( - configReader io.Reader - configCloser io.Closer + configReader io.ReadCloser ) if configPath == "-" { - configReader = cmd.InOrStdin() + configReader = io.NopCloser(cmd.InOrStdin()) } else { f, err := os.Open(configPath) if err != nil { return xerrors.Errorf("open config file %q: %w", configPath, err) } configReader = f - configCloser = f } var config LoadTestConfig err := json.NewDecoder(configReader).Decode(&config) - if configCloser != nil { - _ = configCloser.Close() - } + _ = configReader.Close() if err != nil { return xerrors.Errorf("read config file %q: %w", configPath, err) } @@ -87,7 +83,7 @@ func loadtest() *cobra.Command { } } if !ok { - return xerrors.Errorf("Not logged in as site owner. Load testing is only available to the site owner.") + return xerrors.Errorf("Not logged in as site owner. Load testing is only available to site owners.") } // Disable ratelimits for future requests. @@ -100,8 +96,8 @@ func loadtest() *cobra.Command { for i, t := range config.Tests { name := fmt.Sprintf("%s-%d", t.Type, i) - for i := 0; i < t.Count; i++ { - id := strconv.Itoa(i) + for j := 0; j < t.Count; j++ { + id := strconv.Itoa(j) runner, err := t.NewRunner(client) if err != nil { return xerrors.Errorf("create %q runner for %s/%s: %w", t.Type, name, id, err) diff --git a/coderd/httpapi/json.go b/coderd/httpapi/json.go index bffb5c987c8a7..77a43634ab20b 100644 --- a/coderd/httpapi/json.go +++ b/coderd/httpapi/json.go @@ -8,7 +8,12 @@ import ( ) // Duration wraps time.Duration and provides better JSON marshaling and -// unmarshaling. +// unmarshaling. The default time.Duration marshals as an integer and only +// accepts integers when unmarshaling, which is not very user friendly as users +// cannot write durations like "1h30m". +// +// This type marshals as a string like "1h30m", and unmarshals from either a +// string or an integer. type Duration time.Duration var _ json.Marshaler = Duration(0)