Skip to content

Commit 2506415

Browse files
johnstcnmafredri
andauthored
chore(scaletest/templates/scaletest-runner): fix dashboard command invocation, autoscale provisioners (#10177)
add --retries on kubectl cp remove --count parameter to scaletest dashboard scale provisioners up and down Co-authored-by: Mathias Fredriksson <mafredri@gmail.com>
1 parent db8592f commit 2506415

File tree

4 files changed

+20
-1
lines changed

4 files changed

+20
-1
lines changed

scaletest/templates/scaletest-runner/scripts/cleanup.sh

+6
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,12 @@ coder exp scaletest cleanup \
2828
tee "${SCALETEST_RESULTS_DIR}/cleanup-${event}.txt"
2929
end_phase
3030

31+
if [[ $event != prepare ]]; then
32+
start_phase "Scaling down provisioners..."
33+
maybedryrun "$DRY_RUN" kubectl scale deployment/coder-provisioner --replicas 1
34+
maybedryrun "$DRY_RUN" kubectl rollout status deployment/coder-provisioner
35+
fi
36+
3137
if [[ $event = manual ]]; then
3238
echo 'Press any key to continue...'
3339
read -s -r -n 1

scaletest/templates/scaletest-runner/scripts/lib.sh

+2
Original file line numberDiff line numberDiff line change
@@ -271,10 +271,12 @@ fetch_coder_full() {
271271
exit 1
272272
fi
273273
log "Fetching full Coder binary from ${pod}"
274+
# We need --retries due to https://github.com/kubernetes/kubernetes/issues/60140 :(
274275
maybedryrun "${DRY_RUN}" kubectl \
275276
--namespace "${namespace}" \
276277
cp \
277278
--container coder \
279+
--retries 10 \
278280
"${pod}:/opt/coder" "${SCALETEST_CODER_BINARY}"
279281
maybedryrun "${DRY_RUN}" chmod +x "${SCALETEST_CODER_BINARY}"
280282
log "Full Coder binary downloaded to ${SCALETEST_CODER_BINARY}"

scaletest/templates/scaletest-runner/scripts/prepare.sh

+12
Original file line numberDiff line numberDiff line change
@@ -51,3 +51,15 @@ log "Cleaning up from previous runs (if applicable)..."
5151
"${SCRIPTS_DIR}/cleanup.sh" "prepare"
5252

5353
log "Preparation complete!"
54+
55+
PROVISIONER_REPLICA_COUNT="${SCALETEST_PARAM_CREATE_CONCURRENCY:-0}"
56+
if [[ "${PROVISIONER_REPLICA_COUNT}" -eq 0 ]]; then
57+
# TODO(Cian): what is a good default value here?
58+
echo "Setting PROVISIONER_REPLICA_COUNT to 10 since SCALETEST_PARAM_CREATE_CONCURRENCY is 0"
59+
PROVISIONER_REPLICA_COUNT=10
60+
fi
61+
log "Scaling up provisioners to ${PROVISIONER_REPLICA_COUNT}..."
62+
maybedryrun "$DRY_RUN" kubectl scale deployment/coder-provisioner \
63+
--replicas "${PROVISIONER_REPLICA_COUNT}"
64+
log "Waiting for provisioners to scale up..."
65+
maybedryrun "$DRY_RUN" kubectl rollout status deployment/coder-provisioner

scaletest/templates/scaletest-runner/scripts/run.sh

-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@ for scenario in "${SCALETEST_PARAM_LOAD_SCENARIOS[@]}"; do
4949
;;
5050
"Dashboard Traffic")
5151
coder exp scaletest dashboard \
52-
--count "${SCALETEST_PARAM_NUM_WORKSPACES}" \
5352
--timeout "${SCALETEST_PARAM_LOAD_SCENARIO_DASHBOARD_TRAFFIC_DURATION}m" \
5453
--job-timeout "${SCALETEST_PARAM_LOAD_SCENARIO_DASHBOARD_TRAFFIC_DURATION}m30s" \
5554
--output json:"${SCALETEST_RESULTS_DIR}/traffic-dashboard.json" \

0 commit comments

Comments
 (0)