forked from FairwindsOps/polaris
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwebhook_test.sh
executable file
·148 lines (125 loc) · 4.57 KB
/
webhook_test.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#!/bin/bash
set -e
# Testing to ensure that the webhook starts up, allows a correct deployment to pass,
# and prevents a incorrectly formatted deployment.
BLUE='\033[0;34m'
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
function get_timeout() {
if [[ "$OSTYPE" == "darwin"* ]]; then
date -v+4M +%s
else
date -d "+4 minutes" +%s
fi
}
function check_webhook_is_ready() {
# Get the epoch time in one minute from now
local timeout_epoch
# Reset another 4 minutes to wait for webhook
timeout_epoch=$(get_timeout)
# loop until this fails (desired condition is we cannot apply this yaml doc, which means the webhook is working
echo "Waiting for webhook to be ready"
while ! kubectl get pods -n polaris | grep -E "webhook.*1/1.*Running"; do
check_timeout "${timeout_epoch}"
echo -n "."
done
check_timeout "${timeout_epoch}"
echo "Webhook started!"
}
# Check if timeout is hit and exit if it is
function check_timeout() {
local timeout_epoch="${1}"
if [[ "$(date +%s)" -ge "${timeout_epoch}" ]]; then
echo -e "Timeout hit waiting for readiness: exiting"
grab_logs
clean_up
exit 1
fi
}
# Clean up all your stuff
function clean_up() {
echo -e "\n\nCleaning up (you may see some errors)...\n\n"
kubectl delete ns scale-test || true
kubectl delete ns polaris || true
kubectl delete ns tests || true
# Clean up files you've installed (helps with local testing)
for filename in test/webhook_cases/*.yaml; do
# || true to avoid issues when we cannot delete
kubectl delete -f $filename ||true
done
# Uninstall webhook and webhook config
kubectl delete validatingwebhookconfigurations polaris-webhook --wait=false
kubectl -n polaris delete deploy -l app=polaris --wait=false
echo -e "\n\nDone cleaning up\n\n"
}
function grab_logs() {
kubectl -n polaris get pods -oyaml -l app=polaris
kubectl -n polaris describe pods -l app=polaris
kubectl -n polaris logs -l app=polaris -c webhook-certificate-generator
kubectl -n polaris logs -l app=polaris
}
#sed is replacing the polaris version with this commit sha so we are testing exactly this verison.
if [ -z "${POLARIS_IMAGE}" ]; then
POLARIS_IMAGE="quay.io/fairwinds/polaris:$CIRCLE_SHA1"
fi
echo "using image $POLARIS_IMAGE"
sed -E "s|'(quay.io/fairwinds/polaris:).+'|'${POLARIS_IMAGE}'|" ./deploy/webhook.yaml > ./deploy/webhook-test.yaml
clean_up || true
echo -e "Setting up..."
kubectl create ns scale-test
kubectl create ns polaris
kubectl create ns tests
# Install a bad deployment
kubectl apply -n scale-test -f ./test/webhook_cases/failing_test.deployment.yaml
# Install the webhook
kubectl apply -n polaris -f ./deploy/webhook-test.yaml
# wait for the webhook to come online
check_webhook_is_ready
sleep 5
kubectl logs -n polaris $(kubectl get po -oname -n polaris | grep webhook) --follow &
# Webhook started, setting all tests as passed initially.
ALL_TESTS_PASSED=1
# Run tests against correctly configured objects
for filename in test/webhook_cases/passing_test.*.yaml; do
echo -e "\n\n"
echo -e "${BLUE}TEST CASE: $filename${NC}"
if ! kubectl apply -n tests -f $filename; then
ALL_TESTS_PASSED=0
echo -e "${RED}****Test Failed: Polaris prevented a resource with no configuration issues****${NC}"
else
echo -e "${GREEN}****Test Passed: Polaris correctly allowed this resource****${NC}"
fi
kubectl delete -n tests -f $filename || true
done
# Run tests against incorrectly configured objects
for filename in test/webhook_cases/failing_test.*.yaml; do
echo -e "\n\n"
echo -e "${BLUE}TEST CASE: $filename${NC}"
if kubectl apply -n tests -f $filename; then
ALL_TESTS_PASSED=0
echo -e "${RED}****Test Failed: Polaris should have prevented this resource due to configuration issues.****${NC}"
kubectl logs -n polaris $(kubectl get po -oname -n polaris | grep webhook)
else
echo -e "${GREEN}****Test Passed: Polaris correctly prevented this resource****${NC}"
fi
kubectl delete -n tests -f $filename || true
done
kubectl -n scale-test scale deployment nginx-deployment --replicas=2
sleep 5
kubectl get po -n scale-test
pod_count=$(kubectl get po -n scale-test -oname | wc -l)
if [ $pod_count != 2 ]; then
ALL_TESTS_PASSED=0
echo "Existing deployment was unable to scale after webhook installed: found $pod_count pods"
fi
if [ -z $SKIP_FINAL_CLEANUP ]; then
clean_up
fi
#Verify that all the tests passed.
if [ $ALL_TESTS_PASSED -eq 1 ]; then
echo "Tests Passed."
else
echo "Tests Failed."
exit 1
fi