Skip to content

Commit 64d816c

Browse files
authored
add short sleep before redistributing pods (zalando#891)
1 parent 66f2cda commit 64d816c

File tree

1 file changed

+93
-86
lines changed

1 file changed

+93
-86
lines changed

e2e/tests/test_e2e.py

Lines changed: 93 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,92 @@ def setUpClass(cls):
6969
print('Operator log: {}'.format(k8s.get_operator_log()))
7070
raise
7171

72+
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
73+
def test_enable_disable_connection_pool(self):
74+
'''
75+
For a database without connection pool, then turns it on, scale up,
76+
turn off and on again. Test with different ways of doing this (via
77+
enableConnectionPool or connectionPool configuration section). At the
78+
end turn the connection pool off to not interfere with other tests.
79+
'''
80+
k8s = self.k8s
81+
service_labels = {
82+
'cluster-name': 'acid-minimal-cluster',
83+
}
84+
pod_labels = dict({
85+
'connection-pool': 'acid-minimal-cluster-pooler',
86+
})
87+
88+
pod_selector = to_selector(pod_labels)
89+
service_selector = to_selector(service_labels)
90+
91+
try:
92+
# enable connection pool
93+
k8s.api.custom_objects_api.patch_namespaced_custom_object(
94+
'acid.zalan.do', 'v1', 'default',
95+
'postgresqls', 'acid-minimal-cluster',
96+
{
97+
'spec': {
98+
'enableConnectionPool': True,
99+
}
100+
})
101+
k8s.wait_for_pod_start(pod_selector)
102+
103+
pods = k8s.api.core_v1.list_namespaced_pod(
104+
'default', label_selector=pod_selector
105+
).items
106+
107+
self.assertTrue(pods, 'No connection pool pods')
108+
109+
k8s.wait_for_service(service_selector)
110+
services = k8s.api.core_v1.list_namespaced_service(
111+
'default', label_selector=service_selector
112+
).items
113+
services = [
114+
s for s in services
115+
if s.metadata.name.endswith('pooler')
116+
]
117+
118+
self.assertTrue(services, 'No connection pool service')
119+
120+
# scale up connection pool deployment
121+
k8s.api.custom_objects_api.patch_namespaced_custom_object(
122+
'acid.zalan.do', 'v1', 'default',
123+
'postgresqls', 'acid-minimal-cluster',
124+
{
125+
'spec': {
126+
'connectionPool': {
127+
'numberOfInstances': 2,
128+
},
129+
}
130+
})
131+
132+
k8s.wait_for_running_pods(pod_selector, 2)
133+
134+
# turn it off, keeping configuration section
135+
k8s.api.custom_objects_api.patch_namespaced_custom_object(
136+
'acid.zalan.do', 'v1', 'default',
137+
'postgresqls', 'acid-minimal-cluster',
138+
{
139+
'spec': {
140+
'enableConnectionPool': False,
141+
}
142+
})
143+
k8s.wait_for_pods_to_stop(pod_selector)
144+
145+
k8s.api.custom_objects_api.patch_namespaced_custom_object(
146+
'acid.zalan.do', 'v1', 'default',
147+
'postgresqls', 'acid-minimal-cluster',
148+
{
149+
'spec': {
150+
'enableConnectionPool': True,
151+
}
152+
})
153+
k8s.wait_for_pod_start(pod_selector)
154+
except timeout_decorator.TimeoutError:
155+
print('Operator log: {}'.format(k8s.get_operator_log()))
156+
raise
157+
72158
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
73159
def test_enable_load_balancer(self):
74160
'''
@@ -290,6 +376,10 @@ def test_node_readiness_label(self):
290376

291377
# patch also node where master ran before
292378
k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label)
379+
380+
# wait a little before proceeding with the pod distribution test
381+
time.sleep(k8s.RETRY_TIMEOUT_SEC)
382+
293383
# toggle pod anti affinity to move replica away from master node
294384
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
295385

@@ -349,92 +439,6 @@ def test_service_annotations(self):
349439
}
350440
k8s.update_config(unpatch_custom_service_annotations)
351441

352-
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
353-
def test_enable_disable_connection_pool(self):
354-
'''
355-
For a database without connection pool, then turns it on, scale up,
356-
turn off and on again. Test with different ways of doing this (via
357-
enableConnectionPool or connectionPool configuration section). At the
358-
end turn the connection pool off to not interfere with other tests.
359-
'''
360-
k8s = self.k8s
361-
service_labels = {
362-
'cluster-name': 'acid-minimal-cluster',
363-
}
364-
pod_labels = dict({
365-
'connection-pool': 'acid-minimal-cluster-pooler',
366-
})
367-
368-
pod_selector = to_selector(pod_labels)
369-
service_selector = to_selector(service_labels)
370-
371-
try:
372-
# enable connection pool
373-
k8s.api.custom_objects_api.patch_namespaced_custom_object(
374-
'acid.zalan.do', 'v1', 'default',
375-
'postgresqls', 'acid-minimal-cluster',
376-
{
377-
'spec': {
378-
'enableConnectionPool': True,
379-
}
380-
})
381-
k8s.wait_for_pod_start(pod_selector)
382-
383-
pods = k8s.api.core_v1.list_namespaced_pod(
384-
'default', label_selector=pod_selector
385-
).items
386-
387-
self.assertTrue(pods, 'No connection pool pods')
388-
389-
k8s.wait_for_service(service_selector)
390-
services = k8s.api.core_v1.list_namespaced_service(
391-
'default', label_selector=service_selector
392-
).items
393-
services = [
394-
s for s in services
395-
if s.metadata.name.endswith('pooler')
396-
]
397-
398-
self.assertTrue(services, 'No connection pool service')
399-
400-
# scale up connection pool deployment
401-
k8s.api.custom_objects_api.patch_namespaced_custom_object(
402-
'acid.zalan.do', 'v1', 'default',
403-
'postgresqls', 'acid-minimal-cluster',
404-
{
405-
'spec': {
406-
'connectionPool': {
407-
'numberOfInstances': 2,
408-
},
409-
}
410-
})
411-
412-
k8s.wait_for_running_pods(pod_selector, 2)
413-
414-
# turn it off, keeping configuration section
415-
k8s.api.custom_objects_api.patch_namespaced_custom_object(
416-
'acid.zalan.do', 'v1', 'default',
417-
'postgresqls', 'acid-minimal-cluster',
418-
{
419-
'spec': {
420-
'enableConnectionPool': False,
421-
}
422-
})
423-
k8s.wait_for_pods_to_stop(pod_selector)
424-
425-
k8s.api.custom_objects_api.patch_namespaced_custom_object(
426-
'acid.zalan.do', 'v1', 'default',
427-
'postgresqls', 'acid-minimal-cluster',
428-
{
429-
'spec': {
430-
'enableConnectionPool': True,
431-
}
432-
})
433-
k8s.wait_for_pod_start(pod_selector)
434-
except timeout_decorator.TimeoutError:
435-
print('Operator log: {}'.format(k8s.get_operator_log()))
436-
raise
437-
438442
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
439443
def test_taint_based_eviction(self):
440444
'''
@@ -473,6 +477,9 @@ def test_taint_based_eviction(self):
473477
}
474478
k8s.update_config(patch_toleration_config)
475479

480+
# wait a little before proceeding with the pod distribution test
481+
time.sleep(k8s.RETRY_TIMEOUT_SEC)
482+
476483
# toggle pod anti affinity to move replica away from master node
477484
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
478485

0 commit comments

Comments
 (0)