|
13 | 13 | import threading
|
14 | 14 |
|
15 | 15 |
|
| 16 | +# Helper function for json equality |
| 17 | +def ordered(obj): |
| 18 | + if isinstance(obj, dict): |
| 19 | + return sorted((k, ordered(v)) for k, v in obj.items()) |
| 20 | + if isinstance(obj, list): |
| 21 | + return sorted(ordered(x) for x in obj) |
| 22 | + else: |
| 23 | + return obj |
| 24 | + |
16 | 25 | def if_fdw_enabled(func):
|
17 | 26 | """To run tests with FDW support set environment variable TEST_FDW=1"""
|
18 | 27 | def wrapper(*args, **kwargs):
|
@@ -365,7 +374,7 @@ def test_foreign_table(self):
|
365 | 374 | master.psql('postgres', 'create extension postgres_fdw')
|
366 | 375 |
|
367 | 376 | # RANGE partitioning test with FDW:
|
368 |
| - # - create range partitioned table in master |
| 377 | + # - create range partitioned table in master |
369 | 378 | # - create foreign server
|
370 | 379 | # - create foreign table and insert some data into it
|
371 | 380 | # - attach foreign table to partitioned one
|
@@ -424,7 +433,7 @@ def test_foreign_table(self):
|
424 | 433 | master.safe_psql('postgres', 'select drop_partitions(\'abc\')')
|
425 | 434 |
|
426 | 435 | # HASH partitioning with FDW:
|
427 |
| - # - create hash partitioned table in master |
| 436 | + # - create hash partitioned table in master |
428 | 437 | # - create foreign table
|
429 | 438 | # - replace local partition with foreign one
|
430 | 439 | # - insert data
|
@@ -493,15 +502,6 @@ def test_parallel_nodes(self):
|
493 | 502 | $$ language plpgsql;
|
494 | 503 | """)
|
495 | 504 |
|
496 |
| - # Helper function for json equality |
497 |
| - def ordered(obj): |
498 |
| - if isinstance(obj, dict): |
499 |
| - return sorted((k, ordered(v)) for k, v in obj.items()) |
500 |
| - if isinstance(obj, list): |
501 |
| - return sorted(ordered(x) for x in obj) |
502 |
| - else: |
503 |
| - return obj |
504 |
| - |
505 | 505 | # Test parallel select
|
506 | 506 | with node.connect() as con:
|
507 | 507 | con.execute('set max_parallel_workers_per_gather = 2')
|
@@ -708,6 +708,195 @@ def con2_thread():
|
708 | 708 | node.stop()
|
709 | 709 | node.cleanup()
|
710 | 710 |
|
| 711 | + def test_pg_dump(self): |
| 712 | + """ |
| 713 | + Test using dump and restore of partitioned table through pg_dump and pg_restore tools. |
| 714 | +
|
| 715 | + Test strategy: |
| 716 | + - test range and hash partitioned tables; |
| 717 | + - for each partitioned table check on restorable side the following quantities: |
| 718 | + * constraints related to partitioning; |
| 719 | + * init callback function and enable parent flag; |
| 720 | + * number of rows in parent and child tables; |
| 721 | + * plan validity of simple SELECT query under partitioned table; |
| 722 | + - check dumping using the following parameters of pg_dump: |
| 723 | + * format = plain | custom; |
| 724 | + * using of inserts and copy. |
| 725 | + - all test cases are carried out on tables half-full with data located in parent part, |
| 726 | + the rest of data - in child tables. |
| 727 | + """ |
| 728 | + |
| 729 | + import subprocess |
| 730 | + |
| 731 | + # Init and start postgres instance with preload pg_pathman module |
| 732 | + node = get_new_node('test') |
| 733 | + node.init() |
| 734 | + node.append_conf( |
| 735 | + 'postgresql.conf', |
| 736 | + """ |
| 737 | + shared_preload_libraries=\'pg_pathman\' |
| 738 | + pg_pathman.override_copy=false |
| 739 | + """) |
| 740 | + node.start() |
| 741 | + |
| 742 | + # Init two databases: initial and copy |
| 743 | + node.psql('postgres', 'create database initial') |
| 744 | + node.psql('postgres', 'create database copy') |
| 745 | + node.psql('initial', 'create extension pg_pathman') |
| 746 | + |
| 747 | + # Create and fillin partitioned table in initial database |
| 748 | + with node.connect('initial') as con: |
| 749 | + |
| 750 | + # create and initailly fillin tables |
| 751 | + con.execute('create table range_partitioned (i integer not null)') |
| 752 | + con.execute('insert into range_partitioned select i from generate_series(1, 500) i') |
| 753 | + con.execute('create table hash_partitioned (i integer not null)') |
| 754 | + con.execute('insert into hash_partitioned select i from generate_series(1, 500) i') |
| 755 | + |
| 756 | + # partition table keeping data in base table |
| 757 | + # enable_parent parameter automatically becames true |
| 758 | + con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)') |
| 759 | + con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)') |
| 760 | + |
| 761 | + # fillin child tables with remain data |
| 762 | + con.execute('insert into range_partitioned select i from generate_series(501, 1000) i') |
| 763 | + con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i') |
| 764 | + |
| 765 | + # set init callback |
| 766 | + con.execute(""" |
| 767 | + create or replace function init_partition_stub_callback(args jsonb) |
| 768 | + returns void as $$ |
| 769 | + begin |
| 770 | + end |
| 771 | + $$ language plpgsql; |
| 772 | + """) |
| 773 | + con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback\')') |
| 774 | + con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback\')') |
| 775 | + |
| 776 | + # turn off enable_parent option |
| 777 | + con.execute('select set_enable_parent(\'range_partitioned\', false)') |
| 778 | + con.execute('select set_enable_parent(\'hash_partitioned\', false)') |
| 779 | + |
| 780 | + con.commit() |
| 781 | + |
| 782 | + # compare strategies |
| 783 | + def cmp_full(con1, con2): |
| 784 | + """Compare selection partitions in plan and contents in partitioned tables""" |
| 785 | + |
| 786 | + plan_query = 'explain (costs off, format json) select * from %s' |
| 787 | + content_query = 'select * from %s order by i' |
| 788 | + table_refs = [ |
| 789 | + 'range_partitioned', |
| 790 | + 'only range_partitioned', |
| 791 | + 'hash_partitioned', |
| 792 | + 'only hash_partitioned' |
| 793 | + ] |
| 794 | + for table_ref in table_refs: |
| 795 | + plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] |
| 796 | + plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] |
| 797 | + self.assertEqual(ordered(plan_initial), ordered(plan_copy)) |
| 798 | + |
| 799 | + content_initial = [x[0] for x in con1.execute(content_query % table_ref)] |
| 800 | + content_copy = [x[0] for x in con2.execute(content_query % table_ref)] |
| 801 | + self.assertEqual(content_initial, content_copy) |
| 802 | + |
| 803 | + def turnoff_pathman(node): |
| 804 | + node.psql('initial', 'alter system set pg_pathman.enable to off') |
| 805 | + node.reload() |
| 806 | + |
| 807 | + def turnon_pathman(node): |
| 808 | + node.psql('initial', 'alter system set pg_pathman.enable to on') |
| 809 | + node.psql('copy', 'alter system set pg_pathman.enable to on') |
| 810 | + node.psql('initial', 'alter system set pg_pathman.override_copy to off') |
| 811 | + node.psql('copy', 'alter system set pg_pathman.override_copy to off') |
| 812 | + node.reload() |
| 813 | + |
| 814 | + # Test dump/restore from init database to copy functionality |
| 815 | + test_params = [ |
| 816 | + (None, |
| 817 | + None, |
| 818 | + [node.get_bin_path("pg_dump"), |
| 819 | + "-p {}".format(node.port), |
| 820 | + "initial"], |
| 821 | + [node.get_bin_path("psql"), |
| 822 | + "-p {}".format(node.port), |
| 823 | + "copy"], |
| 824 | + cmp_full), # dump as plain text and restore via COPY |
| 825 | + (turnoff_pathman, |
| 826 | + turnon_pathman, |
| 827 | + [node.get_bin_path("pg_dump"), |
| 828 | + "-p {}".format(node.port), |
| 829 | + "--inserts", |
| 830 | + "initial"], |
| 831 | + [node.get_bin_path("psql"), |
| 832 | + "-p {}".format(node.port), |
| 833 | + "copy"], |
| 834 | + cmp_full), # dump as plain text and restore via INSERTs |
| 835 | + (None, |
| 836 | + None, |
| 837 | + [node.get_bin_path("pg_dump"), |
| 838 | + "-p {}".format(node.port), |
| 839 | + "--format=custom", |
| 840 | + "initial"], |
| 841 | + [node.get_bin_path("pg_restore"), |
| 842 | + "-p {}".format(node.port), |
| 843 | + "--dbname=copy"], |
| 844 | + cmp_full), # dump in archive format |
| 845 | + ] |
| 846 | + for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: |
| 847 | + |
| 848 | + if (preproc != None): |
| 849 | + preproc(node) |
| 850 | + |
| 851 | + # transfer and restore data |
| 852 | + p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) |
| 853 | + p2 = subprocess.Popen(pg_restore_params, stdin=p1.stdout, stdout=subprocess.PIPE) |
| 854 | + p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. |
| 855 | + p2.communicate() |
| 856 | + |
| 857 | + if (postproc != None): |
| 858 | + postproc(node) |
| 859 | + |
| 860 | + # check validity of data |
| 861 | + with node.connect('initial') as con1, node.connect('copy') as con2: |
| 862 | + |
| 863 | + # compare plans and contents of initial and copy |
| 864 | + cmp_dbs(con1, con2) |
| 865 | + |
| 866 | + # compare enable_parent flag and callback function |
| 867 | + config_params_query = """ |
| 868 | + select partrel, enable_parent, init_callback from pathman_config_params |
| 869 | + """ |
| 870 | + config_params_initial, config_params_copy = {}, {} |
| 871 | + for row in con1.execute(config_params_query): |
| 872 | + config_params_initial[row[0]] = row[1:] |
| 873 | + for row in con2.execute(config_params_query): |
| 874 | + config_params_copy[row[0]] = row[1:] |
| 875 | + self.assertEqual(config_params_initial, config_params_copy) |
| 876 | + |
| 877 | + # compare constraints on each partition |
| 878 | + constraints_query = """ |
| 879 | + select r.relname, c.conname, c.consrc from |
| 880 | + pg_constraint c join pg_class r on c.conrelid=r.oid |
| 881 | + where relname similar to '(range|hash)_partitioned_\d+' |
| 882 | + """ |
| 883 | + constraints_initial, constraints_copy = {}, {} |
| 884 | + for row in con1.execute(constraints_query): |
| 885 | + constraints_initial[row[0]] = row[1:] |
| 886 | + for row in con2.execute(constraints_query): |
| 887 | + constraints_copy[row[0]] = row[1:] |
| 888 | + self.assertEqual(constraints_initial, constraints_copy) |
| 889 | + |
| 890 | + # clear copy database |
| 891 | + node.psql('copy', 'drop schema public cascade') |
| 892 | + node.psql('copy', 'create schema public') |
| 893 | + node.psql('copy', 'drop extension pg_pathman cascade') |
| 894 | + |
| 895 | + # Stop instance and finish work |
| 896 | + node.stop() |
| 897 | + node.cleanup() |
| 898 | + |
711 | 899 |
|
712 | 900 | if __name__ == "__main__":
|
713 | 901 | unittest.main()
|
| 902 | + |
0 commit comments