Skip to content

Commit 350cf51

Browse files
author
Maksim Milyutin
committed
Add primary tests for parallel queries under partitioning
1 parent d186018 commit 350cf51

File tree

1 file changed

+78
-0
lines changed

1 file changed

+78
-0
lines changed

tests/partitioning_test.py

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -406,6 +406,84 @@ def test_foreign_table(self):
406406
# Testing drop partitions (including foreign partitions)
407407
master.safe_psql('postgres', 'select drop_partitions(\'abc\')')
408408

409+
def test_parallel_nodes(self):
410+
"""Test parallel queries under partitions"""
411+
412+
# Init and start postgres instance with preload pg_pathman module
413+
node = get_new_node('test')
414+
node.init()
415+
node.append_conf(
416+
'postgresql.conf',
417+
'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n')
418+
node.start()
419+
420+
# Check version of postgres server
421+
# If version < 9.6 skip all tests for parallel queries
422+
version = node.execute("postgres", "show server_version_num")
423+
if version < 90600:
424+
return
425+
426+
# Prepare test database
427+
node.psql('postgres', 'create extension pg_pathman')
428+
node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i')
429+
node.psql('postgres', 'alter table range_partitioned alter column i set not null')
430+
node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)')
431+
node.psql('postgres', 'vacuum analyze range_partitioned')
432+
433+
node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i')
434+
node.psql('postgres', 'alter table hash_partitioned alter column i set not null')
435+
node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)')
436+
node.psql('postgres', 'vacuum analyze hash_partitioned')
437+
438+
# Test parallel select
439+
with node.connect() as con:
440+
con.execute('set max_parallel_workers_per_gather = 2')
441+
con.execute('set min_parallel_relation_size = 0')
442+
con.execute('set parallel_setup_cost = 0')
443+
con.execute('set parallel_tuple_cost = 0')
444+
445+
# Check parallel aggregate plan
446+
plan = con.execute('explain (costs off) select count(*) from range_partitioned where i < 1500')
447+
expected = [('Finalize Aggregate',),
448+
(' -> Gather',),
449+
(' Workers Planned: 2',),
450+
(' -> Partial Aggregate',),
451+
(' -> Append',),
452+
(' -> Parallel Seq Scan on range_partitioned_1',),
453+
(' -> Parallel Seq Scan on range_partitioned_2',),
454+
(' Filter: (i < 1500)',)]
455+
self.assertEqual(plan, expected)
456+
457+
# Check count of returned tuples
458+
count = con.execute('select count(*) from range_partitioned where i < 1500')
459+
self.assertEqual(count[0][0], 1499)
460+
461+
# Check simple parallel seq scan plan with limit
462+
plan = con.execute('explain (costs off) select * from range_partitioned where i < 1500 limit 5')
463+
expected = [('Limit',),
464+
(' -> Gather',),
465+
(' Workers Planned: 2',),
466+
(' -> Append',),
467+
(' -> Parallel Seq Scan on range_partitioned_1',),
468+
(' -> Parallel Seq Scan on range_partitioned_2',),
469+
(' Filter: (i < 1500)',)]
470+
self.assertEqual(plan, expected)
471+
472+
# Check tuples returned by query above
473+
res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5')
474+
expected = [(1,), (2,), (3,), (4,), (5,)]
475+
self.assertEqual(res_tuples, expected)
476+
# import ipdb; ipdb.set_trace()
477+
478+
# Remove all objects for testing
479+
node.psql('postgres', 'drop table range_partitioned cascade')
480+
node.psql('postgres', 'drop table hash_partitioned cascade')
481+
node.psql('postgres', 'drop extension pg_pathman cascade')
482+
483+
# Stop instance and finish work
484+
node.stop()
485+
node.cleanup()
486+
409487

410488
if __name__ == "__main__":
411489
unittest.main()

0 commit comments

Comments
 (0)