From 7cbdd13ec2ed9ce7a8c5658c78656d062c5c33c6 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 6 Dec 2018 17:43:11 +0300 Subject: [PATCH 01/12] cont --- sr_plan.c | 137 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 95 insertions(+), 42 deletions(-) diff --git a/sr_plan.c b/sr_plan.c index 3424adc..8a4054d 100644 --- a/sr_plan.c +++ b/sr_plan.c @@ -45,6 +45,11 @@ typedef struct SrPlanCachedInfo { const char *query_text; } SrPlanCachedInfo; +typedef struct show_plan_funcctx { + uint32 query_hash; + int index; +} show_plan_funcctx; + static SrPlanCachedInfo cachedInfo = { true, /* enabled */ false, /* write_mode */ @@ -375,8 +380,12 @@ collect_indexid(void *context, Plan *plan) static PlannedStmt * lookup_plan_by_query_hash(Snapshot snapshot, Relation sr_index_rel, - Relation sr_plans_heap, ScanKey key, void *context) + Relation sr_plans_heap, ScanKey key, + void *context, + int index, + char **queryString) { + int counter = 0; PlannedStmt *pl_stmt = NULL; HeapTuple htup; IndexScanDesc query_index_scan; @@ -392,12 +401,18 @@ lookup_plan_by_query_hash(Snapshot snapshot, Relation sr_index_rel, heap_deform_tuple(htup, sr_plans_heap->rd_att, search_values, search_nulls); - /* Check enabled field */ - if (DatumGetBool(search_values[Anum_sr_enable - 1])) + /* Check enabled field or index */ + counter++; + if ((index > 0 && index == counter) || + (index == 0 && DatumGetBool(search_values[Anum_sr_enable - 1]))) { - char *out = TextDatumGetCString(DatumGetTextP((search_values[3]))); + char *out = TextDatumGetCString(DatumGetTextP((search_values[Anum_sr_plan - 1]))); pl_stmt = stringToNode(out); + if (queryString) + *queryString = TextDatumGetCString( + DatumGetTextP((search_values[Anum_sr_query - 1]))); + if (context) execute_for_plantree(pl_stmt, restore_params, context); @@ -470,7 +485,7 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) qp_context.collect = false; snapshot = RegisterSnapshot(GetLatestSnapshot()); pl_stmt = lookup_plan_by_query_hash(snapshot, sr_index_rel, sr_plans_heap, - &key, &qp_context); + &key, &qp_context, 0, NULL); if (pl_stmt != NULL) { level--; @@ -500,7 +515,7 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) /* recheck plan in index */ snapshot = RegisterSnapshot(GetLatestSnapshot()); pl_stmt = lookup_plan_by_query_hash(snapshot, sr_index_rel, sr_plans_heap, - &key, &qp_context); + &key, &qp_context, 0, NULL); if (pl_stmt != NULL) { level--; @@ -870,54 +885,92 @@ do_nothing(PG_FUNCTION_ARGS) Datum show_plan(PG_FUNCTION_ARGS) { + FuncCallContext *funcctx; + show_plan_funcctx *ctx; + PlannedStmt *pl_stmt = NULL; LOCKMODE heap_lock = AccessShareLock; Relation sr_plans_heap, sr_index_rel; Snapshot snapshot; ScanKeyData key; - - ExplainFormat format = EXPLAIN_FORMAT_TEXT; - ExplainState state; - uint32 index, + char *queryString; + ExplainState *es = NewExplainState(); + uint32 index = 1, query_hash = PG_GETARG_INT32(0); - if (!PG_ARGISNULL(1)) - index = PG_GETARG_INT32(0); - else - index = 1; - - if (!PG_ARGISNULL(2)) + if (SRF_IS_FIRSTCALL()) { - char *ftext = PG_GETARG_CSTRING(2); - - if (strcmp(ftext, "text") == 0) - format = EXPLAIN_FORMAT_TEXT; - else if (strcmp(ftext, "xml") == 0) - format = EXPLAIN_FORMAT_XML; - else if (strcmp(ftext, "json") == 0) - format = EXPLAIN_FORMAT_JSON; - else if (strcmp(ftext, "yaml") == 0) - format = EXPLAIN_FORMAT_YAML; - else - elog(ERROR, "unknown format of EXPLAIN"); - } - state.format = format; + funcctx = SRF_FIRSTCALL_INIT(); + funcctx->user_fctx = MemoryContextAlloc(funcctx->multi_call_memory_ctx, + sizeof(show_plan_funcctx)); - /* Try to find already planned statement */ - sr_plans_heap = heap_open(cachedInfo.sr_plans_oid, heap_lock); - sr_index_rel = index_open(cachedInfo.sr_index_oid, heap_lock); + uint32 index = 1, + query_hash = PG_GETARG_INT32(0); - snapshot = RegisterSnapshot(GetLatestSnapshot()); - ScanKeyInit(&key, 1, BTEqualStrategyNumber, F_INT4EQ, query_hash); - pl_stmt = lookup_plan_by_query_hash(snapshot, sr_index_rel, sr_plans_heap, - &key, NULL); - if (pl_stmt == NULL) - elog(ERROR, "no saved plan on this query hash"); + if (!PG_ARGISNULL(1)) + index = PG_GETARG_INT32(1); - UnregisterSnapshot(snapshot); - index_close(sr_index_rel, heap_lock); - heap_close(sr_plans_heap, heap_lock); + es->analyze = false; + es->costs = false; + es->verbose = true; + es->buffers = false; + es->timing = false; + es->summary = false; + + /* emit opening boilerplate */ + ExplainBeginOutput(es); + + if (!PG_ARGISNULL(2)) + { + char *p = PG_GETARG_CSTRING(2); + + /* default */ + es->format = EXPLAIN_FORMAT_TEXT; + + if (strcmp(p, "text") == 0) + es->format = EXPLAIN_FORMAT_TEXT; + else if (strcmp(p, "xml") == 0) + es->format = EXPLAIN_FORMAT_XML; + else if (strcmp(p, "json") == 0) + es->format = EXPLAIN_FORMAT_JSON; + else if (strcmp(p, "yaml") == 0) + es->format = EXPLAIN_FORMAT_YAML; + else + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("unrecognized value for EXPLAIN option \"%s\"", p))); + } + + /* Try to find already planned statement */ + sr_plans_heap = heap_open(cachedInfo.sr_plans_oid, heap_lock); + sr_index_rel = index_open(cachedInfo.sr_index_oid, heap_lock); + + snapshot = RegisterSnapshot(GetLatestSnapshot()); + ScanKeyInit(&key, 1, BTEqualStrategyNumber, F_INT4EQ, query_hash); + pl_stmt = lookup_plan_by_query_hash(snapshot, sr_index_rel, sr_plans_heap, + &key, NULL, index, &queryString); + if (pl_stmt == NULL) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("no saved plan by query hash \"%d\"", query_hash))); + + ExplainOnePlan(pl_stmt, NULL, es, queryString, NULL, NULL, NULL); + + ExplainEndOutput(es); + Assert(es->indent == 0); + + UnregisterSnapshot(snapshot); + index_close(sr_index_rel, heap_lock); + heap_close(sr_plans_heap, heap_lock); + + if (es->format == EXPLAIN_FORMAT_TEXT) + do_text_output_multiline(tstate, es->str->data); + else + do_text_output_oneline(tstate, es->str->data); + } + + funcctx = SRF_PERCALL_SETUP(); } /* From 64f099c88eeb6e9ae7ad2d49a56c15c98a8eb95a Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 6 Dec 2018 20:26:19 +0300 Subject: [PATCH 02/12] Add show_plan function for 10+ --- Makefile | 2 +- init.sql | 7 +++ sql/sr_plan.sql | 1 + sr_plan.c | 133 ++++++++++++++++++++++++++++++++++++------------ 4 files changed, 110 insertions(+), 33 deletions(-) diff --git a/Makefile b/Makefile index 50d456f..fb8d53c 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ DATA_built = sr_plan--$(EXTVERSION).sql DATA = sr_plan--1.0--1.1.sql EXTRA_CLEAN = sr_plan--$(EXTVERSION).sql -REGRESS = sr_plan sr_plan_schema joins +REGRESS = sr_plan sr_plan_schema joins explain ifdef USE_PGXS ifndef PG_CONFIG diff --git a/init.sql b/init.sql index 149c73f..f6bd067 100644 --- a/init.sql +++ b/init.sql @@ -23,6 +23,13 @@ RETURNS anyelement AS 'MODULE_PATHNAME', 'do_nothing' LANGUAGE C STRICT VOLATILE; +CREATE FUNCTION show_plan(query_hash int4, + index int4 default null, + format cstring default null) +RETURNS SETOF RECORD +AS 'MODULE_PATHNAME', 'show_plan' +LANGUAGE C VOLATILE; + CREATE OR REPLACE FUNCTION sr_plan_invalid_table() RETURNS event_trigger LANGUAGE plpgsql AS $$ DECLARE diff --git a/sql/sr_plan.sql b/sql/sr_plan.sql index cf83d88..ba7ba37 100644 --- a/sql/sr_plan.sql +++ b/sql/sr_plan.sql @@ -15,6 +15,7 @@ SELECT * FROM test_table WHERE test_attr1 = 15; UPDATE sr_plans SET enable = true; + SELECT * FROM test_table WHERE test_attr1 = _p(10); SELECT * FROM test_table WHERE test_attr1 = _p(15); SELECT * FROM test_table WHERE test_attr1 = 10; diff --git a/sr_plan.c b/sr_plan.c index 8a4054d..5b788fd 100644 --- a/sr_plan.c +++ b/sr_plan.c @@ -46,8 +46,9 @@ typedef struct SrPlanCachedInfo { } SrPlanCachedInfo; typedef struct show_plan_funcctx { - uint32 query_hash; - int index; + ExplainFormat format; + char *output; + int lines_count; } show_plan_funcctx; static SrPlanCachedInfo cachedInfo = { @@ -882,34 +883,60 @@ do_nothing(PG_FUNCTION_ARGS) PG_RETURN_DATUM(PG_GETARG_DATUM(0)); } +/* + * Construct the result tupledesc for an EXPLAIN + */ +static TupleDesc +make_tupledesc(ExplainState *es) +{ + TupleDesc tupdesc; + Oid result_type; + + /* Check for XML format option */ + switch (es->format) + { + case EXPLAIN_FORMAT_XML: + result_type = XMLOID; + break; + case EXPLAIN_FORMAT_JSON: + result_type = JSONOID; + break; + default: + result_type = TEXTOID; + } + + /* Need a tuple descriptor representing a single TEXT or XML column */ + tupdesc = CreateTemplateTupleDesc(1, false); + TupleDescInitEntry(tupdesc, (AttrNumber) 1, "QUERY PLAN", result_type, -1, 0); + return tupdesc; +} + Datum show_plan(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; show_plan_funcctx *ctx; - PlannedStmt *pl_stmt = NULL; - LOCKMODE heap_lock = AccessShareLock; - Relation sr_plans_heap, - sr_index_rel; - Snapshot snapshot; - ScanKeyData key; - char *queryString; - ExplainState *es = NewExplainState(); - uint32 index = 1, - query_hash = PG_GETARG_INT32(0); - if (SRF_IS_FIRSTCALL()) { - funcctx = SRF_FIRSTCALL_INIT(); - funcctx->user_fctx = MemoryContextAlloc(funcctx->multi_call_memory_ctx, - sizeof(show_plan_funcctx)); + MemoryContext oldcxt; + PlannedStmt *pl_stmt = NULL; + LOCKMODE heap_lock = AccessShareLock; + Relation sr_plans_heap, + sr_index_rel; + Snapshot snapshot; + ScanKeyData key; + char *queryString; + ExplainState *es = NewExplainState(); + uint32 index, + query_hash = PG_GETARG_INT32(0); - uint32 index = 1, - query_hash = PG_GETARG_INT32(0); + funcctx = SRF_FIRSTCALL_INIT(); if (!PG_ARGISNULL(1)) - index = PG_GETARG_INT32(1); + index = PG_GETARG_INT32(1); /* show by index or enabled (if 0) */ + else + index = 0; /* show enabled one */ es->analyze = false; es->costs = false; @@ -917,17 +944,12 @@ show_plan(PG_FUNCTION_ARGS) es->buffers = false; es->timing = false; es->summary = false; - - /* emit opening boilerplate */ - ExplainBeginOutput(es); + es->format = EXPLAIN_FORMAT_TEXT; if (!PG_ARGISNULL(2)) { char *p = PG_GETARG_CSTRING(2); - /* default */ - es->format = EXPLAIN_FORMAT_TEXT; - if (strcmp(p, "text") == 0) es->format = EXPLAIN_FORMAT_TEXT; else if (strcmp(p, "xml") == 0) @@ -939,7 +961,8 @@ show_plan(PG_FUNCTION_ARGS) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("unrecognized value for EXPLAIN option \"%s\"", p))); + errmsg("unrecognized value for output format \"%s\"", p), + errhint("supported formats: 'text', 'xml', 'json', 'yaml'"))); } /* Try to find already planned statement */ @@ -953,10 +976,10 @@ show_plan(PG_FUNCTION_ARGS) if (pl_stmt == NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("no saved plan by query hash \"%d\"", query_hash))); + errmsg("could not find saved plan"))); + ExplainBeginOutput(es); ExplainOnePlan(pl_stmt, NULL, es, queryString, NULL, NULL, NULL); - ExplainEndOutput(es); Assert(es->indent == 0); @@ -964,13 +987,59 @@ show_plan(PG_FUNCTION_ARGS) index_close(sr_index_rel, heap_lock); heap_close(sr_plans_heap, heap_lock); - if (es->format == EXPLAIN_FORMAT_TEXT) - do_text_output_multiline(tstate, es->str->data); - else - do_text_output_oneline(tstate, es->str->data); + oldcxt = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + funcctx->tuple_desc = BlessTupleDesc(make_tupledesc(es)); + funcctx->user_fctx = palloc(sizeof(show_plan_funcctx)); + ctx = (show_plan_funcctx *) funcctx->user_fctx; + + ctx->format = es->format; + ctx->output = pstrdup(es->str->data); + MemoryContextSwitchTo(oldcxt); } funcctx = SRF_PERCALL_SETUP(); + ctx = (show_plan_funcctx *) funcctx->user_fctx; + + /* if there is a string and not an end of string */ + if (ctx->output && *ctx->output) + { + HeapTuple tuple; + Datum values[1]; + bool isnull[1] = {false}; + + if (ctx->format != EXPLAIN_FORMAT_TEXT) + { + values[0] = PointerGetDatum(cstring_to_text(ctx->output)); + ctx->output = NULL; + } + else + { + char *txt = ctx->output; + char *eol; + int len; + + eol = strchr(txt, '\n'); + if (eol) + { + len = eol - txt; + eol++; + } + else + { + len = strlen(txt); + eol = txt + len; + } + + values[0] = PointerGetDatum(cstring_to_text_with_len(txt, len)); + ctx->output = txt = eol; + } + + tuple = heap_form_tuple(funcctx->tuple_desc, values, isnull); + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } + + SRF_RETURN_DONE(funcctx); } /* From 89ce2c957bcf579a4db07c3c67b48417a4e4d936 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Thu, 6 Dec 2018 20:27:49 +0300 Subject: [PATCH 03/12] Add tests --- expected/explain.out | 117 +++++++++++++++++++++++++++++++++++++++++++ sql/explain.sql | 47 +++++++++++++++++ 2 files changed, 164 insertions(+) create mode 100644 expected/explain.out create mode 100644 sql/explain.sql diff --git a/expected/explain.out b/expected/explain.out new file mode 100644 index 0000000..d1363ff --- /dev/null +++ b/expected/explain.out @@ -0,0 +1,117 @@ +CREATE EXTENSION sr_plan; +SET sr_plan.log_usage = NOTICE; +CREATE TABLE explain_test(test_attr1 int, test_attr2 int); +INSERT INTO explain_test SELECT i, i + 1 FROM generate_series(1, 20) i; +SET sr_plan.write_mode = true; +SELECT * FROM explain_test WHERE test_attr1 = 10; +NOTICE: sr_plan: saved plan for SELECT * FROM explain_test WHERE test_attr1 = 10; + test_attr1 | test_attr2 +------------+------------ + 10 | 11 +(1 row) + +SET sr_plan.write_mode = false; +UPDATE sr_plans SET enable = true; +-- check show_plan +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash) FROM vars; + show_plan +---------------------------------------------- + ("Seq Scan on public.explain_test") + (" Output: test_attr1, test_attr2") + (" Filter: (explain_test.test_attr1 = 10)") +(3 rows) + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, index := 1) FROM vars; + show_plan +---------------------------------------------- + ("Seq Scan on public.explain_test") + (" Output: test_attr1, test_attr2") + (" Filter: (explain_test.test_attr1 = 10)") +(3 rows) + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, index := 2) FROM vars; +ERROR: could not find saved plan +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'json') FROM vars; + show_plan +------------------------------------------------------ + ("[ + + { + + ""Plan"": { + + ""Node Type"": ""Seq Scan"", + + ""Parallel Aware"": false, + + ""Relation Name"": ""explain_test"", + + ""Schema"": ""public"", + + ""Alias"": ""explain_test"", + + ""Output"": [""test_attr1"", ""test_attr2""], + + ""Filter"": ""(explain_test.test_attr1 = 10)""+ + } + + } + + ]") +(1 row) + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'text') FROM vars; + show_plan +---------------------------------------------- + ("Seq Scan on public.explain_test") + (" Output: test_attr1, test_attr2") + (" Filter: (explain_test.test_attr1 = 10)") +(3 rows) + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'xml') FROM vars; + show_plan +-------------------------------------------------------------- + ("+ + + + + + Seq Scan + + false + + explain_test + + public + + explain_test + + + + test_attr1 + + test_attr2 + + + + (explain_test.test_attr1 = 10) + + + + + + ") +(1 row) + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'yaml') FROM vars; + show_plan +-------------------------------------------------- + ("- Plan: + + Node Type: ""Seq Scan"" + + Parallel Aware: false + + Relation Name: ""explain_test"" + + Schema: ""public"" + + Alias: ""explain_test"" + + Output: + + - ""test_attr1"" + + - ""test_attr2"" + + Filter: ""(explain_test.test_attr1 = 10)""") +(1 row) + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'nonsense') FROM vars; +ERROR: unrecognized value for output format "nonsense" +HINT: supported formats: 'text', 'xml', 'json', 'yaml' +DROP TABLE explain_test CASCADE; +DROP EXTENSION sr_plan CASCADE; +NOTICE: sr_plan was disabled diff --git a/sql/explain.sql b/sql/explain.sql new file mode 100644 index 0000000..f8db4d7 --- /dev/null +++ b/sql/explain.sql @@ -0,0 +1,47 @@ +CREATE EXTENSION sr_plan; + +SET sr_plan.log_usage = NOTICE; +CREATE TABLE explain_test(test_attr1 int, test_attr2 int); +INSERT INTO explain_test SELECT i, i + 1 FROM generate_series(1, 20) i; + +SET sr_plan.write_mode = true; +SELECT * FROM explain_test WHERE test_attr1 = 10; + +SET sr_plan.write_mode = false; +UPDATE sr_plans SET enable = true; + +-- check show_plan +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash) FROM vars; + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, index := 1) FROM vars; + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, index := 2) FROM vars; + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'json') FROM vars; + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'text') FROM vars; + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'xml') FROM vars; + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'yaml') FROM vars; + +WITH vars AS (SELECT query_hash FROM sr_plans WHERE + query = 'SELECT * FROM explain_test WHERE test_attr1 = 10;' LIMIT 1) +SELECT show_plan(vars.query_hash, format := 'nonsense') FROM vars; + +DROP TABLE explain_test CASCADE; +DROP EXTENSION sr_plan CASCADE; From 79f2a4b77c3a090c050069b91649c7deb06a044f Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 17 Dec 2018 13:37:14 +0300 Subject: [PATCH 04/12] Fix compability with 9.6 --- sr_plan.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sr_plan.c b/sr_plan.c index 5b788fd..9633b7d 100644 --- a/sr_plan.c +++ b/sr_plan.c @@ -979,7 +979,11 @@ show_plan(PG_FUNCTION_ARGS) errmsg("could not find saved plan"))); ExplainBeginOutput(es); +#if PG_VERSION_NUM >= 100000 ExplainOnePlan(pl_stmt, NULL, es, queryString, NULL, NULL, NULL); +#else + ExplainOnePlan(pl_stmt, NULL, es, queryString, NULL, NULL); +#endif ExplainEndOutput(es); Assert(es->indent == 0); From 41d96bf136ec072dac77dddf8d9765bba39190ff Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 17 Dec 2018 19:08:18 +0300 Subject: [PATCH 05/12] Add tests and update scripts --- Makefile | 4 +- conf.add | 1 + init.sql | 3 +- sr_plan--1.0--1.1.sql | 23 ++++++ sr_plan--1.1--1.2.sql | 55 ++++++++++++++ sr_plan--1.2.sql | 55 ++++++++++++++ sr_plan.c | 1 + sr_plan.control | 2 +- sr_plan.h | 3 +- tests/test_sr_plan.py | 162 ++++++++++++++++++++++++++++++++++++++++++ 10 files changed, 304 insertions(+), 5 deletions(-) create mode 100644 conf.add create mode 100644 sr_plan--1.1--1.2.sql create mode 100644 sr_plan--1.2.sql create mode 100755 tests/test_sr_plan.py diff --git a/Makefile b/Makefile index fb8d53c..cd1b95d 100644 --- a/Makefile +++ b/Makefile @@ -6,9 +6,9 @@ OBJS = sr_plan.o $(WIN32RES) PGFILEDESC = "sr_plan - save and read plan" EXTENSION = sr_plan -EXTVERSION = 1.1 +EXTVERSION = 1.2 DATA_built = sr_plan--$(EXTVERSION).sql -DATA = sr_plan--1.0--1.1.sql +DATA = sr_plan--1.0--1.1.sql sr_plan--1.1--1.2.sql EXTRA_CLEAN = sr_plan--$(EXTVERSION).sql REGRESS = sr_plan sr_plan_schema joins explain diff --git a/conf.add b/conf.add new file mode 100644 index 0000000..698b729 --- /dev/null +++ b/conf.add @@ -0,0 +1 @@ +shared_preload_libraries='sr_plan' diff --git a/init.sql b/init.sql index f6bd067..53a3163 100644 --- a/init.sql +++ b/init.sql @@ -5,10 +5,11 @@ CREATE TABLE sr_plans ( query_hash int NOT NULL, + query_id int8 NOT NULL, plan_hash int NOT NULL, + enable boolean NOT NULL, query varchar NOT NULL, plan text NOT NULL, - enable boolean NOT NULL, reloids oid[], index_reloids oid[] diff --git a/sr_plan--1.0--1.1.sql b/sr_plan--1.0--1.1.sql index bc589cc..269d549 100644 --- a/sr_plan--1.0--1.1.sql +++ b/sr_plan--1.0--1.1.sql @@ -16,3 +16,26 @@ CREATE INDEX sr_plans_query_index_oids ON sr_plans USING gin(index_reloids); DROP FUNCTION explain_jsonb_plan(jsonb) CASCADE; DROP FUNCTION sr_plan_invalid_table() CASCADE; + +CREATE OR REPLACE FUNCTION sr_plan_invalid_table() RETURNS event_trigger +LANGUAGE plpgsql AS $$ +DECLARE + obj record; + indobj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + WHERE object_type = 'table' OR object_type = 'index' + LOOP + IF obj.object_type = 'table' THEN + DELETE FROM @extschema@.sr_plans WHERE reloids @> ARRAY[obj.objid]; + ELSE + IF obj.object_type = 'index' THEN + DELETE FROM @extschema@.sr_plans WHERE index_reloids @> ARRAY[obj.objid]; + END IF; + END IF; + END LOOP; +END +$$; + +CREATE EVENT TRIGGER sr_plan_invalid_table ON sql_drop + EXECUTE PROCEDURE sr_plan_invalid_table(); diff --git a/sr_plan--1.1--1.2.sql b/sr_plan--1.1--1.2.sql new file mode 100644 index 0000000..bd4063b --- /dev/null +++ b/sr_plan--1.1--1.2.sql @@ -0,0 +1,55 @@ +SET sr_plan.enabled = false; + +DROP FUNCTION sr_plan_invalid_table() CASCADE; +DROP TABLE sr_plans CASCADE; +CREATE TABLE sr_plans ( + query_hash int NOT NULL, + query_id int8 NOT NULL, + plan_hash int NOT NULL, + enable boolean NOT NULL, + query varchar NOT NULL, + plan text NOT NULL, + + reloids oid[], + index_reloids oid[] +); +CREATE INDEX sr_plans_query_hash_idx ON sr_plans (query_hash); +CREATE INDEX sr_plans_query_oids ON sr_plans USING gin(reloids); +CREATE INDEX sr_plans_query_index_oids ON sr_plans USING gin(index_reloids); + +CREATE OR REPLACE FUNCTION _p(anyelement) +RETURNS anyelement +AS 'MODULE_PATHNAME', 'do_nothing' +LANGUAGE C STRICT VOLATILE; + +CREATE FUNCTION show_plan(query_hash int4, + index int4 default null, + format cstring default null) +RETURNS SETOF RECORD +AS 'MODULE_PATHNAME', 'show_plan' +LANGUAGE C VOLATILE; + +CREATE FUNCTION sr_plan_invalid_table() RETURNS event_trigger +LANGUAGE plpgsql AS $$ +DECLARE + obj record; + indobj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + WHERE object_type = 'table' OR object_type = 'index' + LOOP + IF obj.object_type = 'table' THEN + DELETE FROM @extschema@.sr_plans WHERE reloids @> ARRAY[obj.objid]; + ELSE + IF obj.object_type = 'index' THEN + DELETE FROM @extschema@.sr_plans WHERE index_reloids @> ARRAY[obj.objid]; + END IF; + END IF; + END LOOP; +END +$$; + +CREATE EVENT TRIGGER sr_plan_invalid_table ON sql_drop + EXECUTE PROCEDURE sr_plan_invalid_table(); + +SET sr_plan.enabled = true; diff --git a/sr_plan--1.2.sql b/sr_plan--1.2.sql new file mode 100644 index 0000000..53a3163 --- /dev/null +++ b/sr_plan--1.2.sql @@ -0,0 +1,55 @@ +/* contrib/sr_plan/init.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION sr_plan" to load this file. \quit + +CREATE TABLE sr_plans ( + query_hash int NOT NULL, + query_id int8 NOT NULL, + plan_hash int NOT NULL, + enable boolean NOT NULL, + query varchar NOT NULL, + plan text NOT NULL, + + reloids oid[], + index_reloids oid[] +); + +CREATE INDEX sr_plans_query_hash_idx ON sr_plans (query_hash); +CREATE INDEX sr_plans_query_oids ON sr_plans USING gin(reloids); +CREATE INDEX sr_plans_query_index_oids ON sr_plans USING gin(index_reloids); + +CREATE FUNCTION _p(anyelement) +RETURNS anyelement +AS 'MODULE_PATHNAME', 'do_nothing' +LANGUAGE C STRICT VOLATILE; + +CREATE FUNCTION show_plan(query_hash int4, + index int4 default null, + format cstring default null) +RETURNS SETOF RECORD +AS 'MODULE_PATHNAME', 'show_plan' +LANGUAGE C VOLATILE; + +CREATE OR REPLACE FUNCTION sr_plan_invalid_table() RETURNS event_trigger +LANGUAGE plpgsql AS $$ +DECLARE + obj record; + indobj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + WHERE object_type = 'table' OR object_type = 'index' + LOOP + IF obj.object_type = 'table' THEN + DELETE FROM @extschema@.sr_plans WHERE reloids @> ARRAY[obj.objid]; + ELSE + IF obj.object_type = 'index' THEN + DELETE FROM @extschema@.sr_plans WHERE index_reloids @> ARRAY[obj.objid]; + END IF; + END IF; + END LOOP; +END +$$; + +CREATE EVENT TRIGGER sr_plan_invalid_table ON sql_drop + EXECUTE PROCEDURE sr_plan_invalid_table(); diff --git a/sr_plan.c b/sr_plan.c index 9633b7d..0ef859f 100644 --- a/sr_plan.c +++ b/sr_plan.c @@ -581,6 +581,7 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) MemSet(nulls, 0, sizeof(nulls)); values[Anum_sr_query_hash - 1] = query_hash; + values[Anum_sr_query_id - 1] = Int64GetDatum(parse->queryId); values[Anum_sr_plan_hash - 1] = plan_hash; values[Anum_sr_query - 1] = CStringGetTextDatum(cachedInfo.query_text); values[Anum_sr_plan - 1] = CStringGetTextDatum(plan_text); diff --git a/sr_plan.control b/sr_plan.control index 9086e32..358eaed 100644 --- a/sr_plan.control +++ b/sr_plan.control @@ -1,4 +1,4 @@ # sr_plan extension comment = 'functions for save and read plan' -default_version = '1.1' +default_version = '1.2' module_pathname = '$libdir/sr_plan' diff --git a/sr_plan.h b/sr_plan.h index 26d2072..bf05da1 100644 --- a/sr_plan.h +++ b/sr_plan.h @@ -74,10 +74,11 @@ void common_walker(const void *obj, void (*callback) (void *)); enum { Anum_sr_query_hash = 1, + Anum_sr_query_id, Anum_sr_plan_hash, + Anum_sr_enable, Anum_sr_query, Anum_sr_plan, - Anum_sr_enable, Anum_sr_reloids, Anum_sr_index_reloids, Anum_sr_attcount diff --git a/tests/test_sr_plan.py b/tests/test_sr_plan.py new file mode 100755 index 0000000..eb3c52b --- /dev/null +++ b/tests/test_sr_plan.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 + +import sys +import os +import tempfile +import contextlib +import shutil +import unittest +import subprocess + +from testgres import get_new_node + +sql_init = ''' +CREATE TABLE test_table(test_attr1 int, test_attr2 int); +INSERT INTO test_table SELECT i, i + 1 FROM generate_series(1, 20) i; +''' + +queries = [ + "SELECT * FROM test_table WHERE test_attr1 = _p(10);", + "SELECT * FROM test_table WHERE test_attr1 = 10;", + "SELECT * FROM test_table WHERE test_attr1 = 15;" +] + +my_dir = os.path.dirname(os.path.abspath(__file__)) +repo_dir = os.path.abspath(os.path.join(my_dir, '../')) +temp_dir = tempfile.mkdtemp() + +upgrade_to = '1.2' +check_upgrade_from = ['1.1.0'] + +compilation = ''' +make USE_PGXS=1 clean +make USE_PGXS=1 install +''' + +dump_sql = ''' +SELECT * FROM pg_extension WHERE extname = 'sr_plan'; + +SELECT pg_get_functiondef(objid) +FROM pg_catalog.pg_depend JOIN pg_proc ON pg_proc.oid = pg_depend.objid +WHERE refclassid = 'pg_catalog.pg_extension'::REGCLASS AND + refobjid = (SELECT oid + FROM pg_catalog.pg_extension + WHERE extname = 'sr_plan') AND + deptype = 'e' +ORDER BY objid::regprocedure::TEXT ASC; + +\\d+ sr_plans +\\dy sr_plan_invalid_table +''' + +@contextlib.contextmanager +def cwd(path): + curdir = os.getcwd() + os.chdir(path) + + try: + yield + finally: + os.chdir(curdir) + + +def shell(cmd): + subprocess.check_output(cmd, shell=True) + + +def copytree(src, dst): + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d) + else: + shutil.copy2(s, d) + + +class Tests(unittest.TestCase): + def start_node(self): + node = get_new_node() + node.init() + node.append_conf("shared_preload_libraries='sr_plan'\n") + node.start() + node.psql('create extension sr_plan') + node.psql(sql_init) + + return node + + def test_hash_consistency(self): + ''' Test query hash consistency ''' + + with self.start_node() as node: + node.psql("set sr_plan.write_mode=on") + node.psql("set sr_plan.log_usage=NOTICE") + for q in queries: + node.psql(q) + + node.psql("set sr_plan.write_mode=off") + queries1 = node.psql('select query_hash from sr_plans') + self.assertEqual(len(queries), 3) + node.psql("delete from sr_plans") + node.stop() + + node.start() + node.psql("set sr_plan.write_mode=on") + node.psql("set sr_plan.log_usage=NOTICE") + for q in queries: + node.psql(q) + + node.psql("set sr_plan.write_mode=off") + queries2 = node.psql('select query_hash from sr_plans') + node.stop() + + self.assertEqual(queries1, queries2) + + def test_update(self): + copytree(repo_dir, temp_dir) + dumps = [] + + with cwd(temp_dir): + for ver in check_upgrade_from: + shell("git clean -fdx") + shell("git reset --hard") + shell("git checkout -q %s" % ver) + shell(compilation) + + with self.start_node() as node: + node.stop() + + shell("git clean -fdx") + shell("git checkout -q 55f4704c7258527bd9ccd54cb35790a8e438caaa") + shell(compilation) + + node.start() + node.safe_psql("alter extension sr_plan update to '%s'" % upgrade_to) + + p = subprocess.Popen(["psql", "postgres", "-p", str(node.port)], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + dumps.append([ver, p.communicate(input=dump_sql.encode())[0].decode()]) + node.stop() + + # now make clean install + with self.start_node() as node: + p = subprocess.Popen(["psql", "postgres", "-p", str(node.port)], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + dumped_objects_new = p.communicate(input=dump_sql.encode())[0].decode() + + + self.assertEqual(len(dumps), len(check_upgrade_from)) + for ver, dump in dumps: + self.assertEqual(dump, dumped_objects_new) + + +if __name__ == "__main__": + if len(sys.argv) > 1: + suite = unittest.TestLoader().loadTestsFromName(sys.argv[1], + module=sys.modules[__name__]) + else: + suite = unittest.TestLoader().loadTestsFromTestCase(Tests) + + unittest.TextTestRunner(verbosity=2, failfast=True).run(suite) From 4352c4843fb06c877853b3a61228c0a1f87f66e0 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 17 Dec 2018 19:13:45 +0300 Subject: [PATCH 06/12] Use newer commit in tests --- tests/test_sr_plan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_sr_plan.py b/tests/test_sr_plan.py index eb3c52b..681aee7 100755 --- a/tests/test_sr_plan.py +++ b/tests/test_sr_plan.py @@ -127,7 +127,7 @@ def test_update(self): node.stop() shell("git clean -fdx") - shell("git checkout -q 55f4704c7258527bd9ccd54cb35790a8e438caaa") + shell("git checkout -q 41d96bf13") shell(compilation) node.start() From 66bb072533abe332d33ce0d9033c5a6faeef2163 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 17 Dec 2018 19:23:27 +0300 Subject: [PATCH 07/12] Fix for update from 1.0 to 1.1 --- sr_plan--1.0--1.1.sql | 5 ++--- tests/test_sr_plan.py | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/sr_plan--1.0--1.1.sql b/sr_plan--1.0--1.1.sql index 269d549..144de1c 100644 --- a/sr_plan--1.0--1.1.sql +++ b/sr_plan--1.0--1.1.sql @@ -1,3 +1,5 @@ +DROP FUNCTION sr_plan_invalid_table() CASCADE; +DROP FUNCTION explain_jsonb_plan(jsonb) CASCADE; DROP TABLE sr_plans CASCADE; CREATE TABLE sr_plans ( query_hash int NOT NULL, @@ -14,9 +16,6 @@ CREATE INDEX sr_plans_query_hash_idx ON sr_plans (query_hash); CREATE INDEX sr_plans_query_oids ON sr_plans USING gin(reloids); CREATE INDEX sr_plans_query_index_oids ON sr_plans USING gin(index_reloids); -DROP FUNCTION explain_jsonb_plan(jsonb) CASCADE; -DROP FUNCTION sr_plan_invalid_table() CASCADE; - CREATE OR REPLACE FUNCTION sr_plan_invalid_table() RETURNS event_trigger LANGUAGE plpgsql AS $$ DECLARE diff --git a/tests/test_sr_plan.py b/tests/test_sr_plan.py index 681aee7..9513a07 100755 --- a/tests/test_sr_plan.py +++ b/tests/test_sr_plan.py @@ -26,7 +26,7 @@ temp_dir = tempfile.mkdtemp() upgrade_to = '1.2' -check_upgrade_from = ['1.1.0'] +check_upgrade_from = ['rel_1.0', '1.1.0'] compilation = ''' make USE_PGXS=1 clean @@ -127,7 +127,7 @@ def test_update(self): node.stop() shell("git clean -fdx") - shell("git checkout -q 41d96bf13") + shell("git checkout -q master") shell(compilation) node.start() From d976efce28bb0a92496d8b14c2842c72dfe8a977 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 17 Dec 2018 19:25:35 +0300 Subject: [PATCH 08/12] Remove autogenerated file --- .gitignore | 1 + sr_plan--1.2.sql | 55 ------------------------------------------------ 2 files changed, 1 insertion(+), 55 deletions(-) delete mode 100644 sr_plan--1.2.sql diff --git a/.gitignore b/.gitignore index bbdb1d3..153b445 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ deserialize.c serialize.c sr_plan--1.1.sql +sr_plan--1.2.sql pycparser /Mako* /site-packages diff --git a/sr_plan--1.2.sql b/sr_plan--1.2.sql deleted file mode 100644 index 53a3163..0000000 --- a/sr_plan--1.2.sql +++ /dev/null @@ -1,55 +0,0 @@ -/* contrib/sr_plan/init.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION sr_plan" to load this file. \quit - -CREATE TABLE sr_plans ( - query_hash int NOT NULL, - query_id int8 NOT NULL, - plan_hash int NOT NULL, - enable boolean NOT NULL, - query varchar NOT NULL, - plan text NOT NULL, - - reloids oid[], - index_reloids oid[] -); - -CREATE INDEX sr_plans_query_hash_idx ON sr_plans (query_hash); -CREATE INDEX sr_plans_query_oids ON sr_plans USING gin(reloids); -CREATE INDEX sr_plans_query_index_oids ON sr_plans USING gin(index_reloids); - -CREATE FUNCTION _p(anyelement) -RETURNS anyelement -AS 'MODULE_PATHNAME', 'do_nothing' -LANGUAGE C STRICT VOLATILE; - -CREATE FUNCTION show_plan(query_hash int4, - index int4 default null, - format cstring default null) -RETURNS SETOF RECORD -AS 'MODULE_PATHNAME', 'show_plan' -LANGUAGE C VOLATILE; - -CREATE OR REPLACE FUNCTION sr_plan_invalid_table() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; - indobj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() - WHERE object_type = 'table' OR object_type = 'index' - LOOP - IF obj.object_type = 'table' THEN - DELETE FROM @extschema@.sr_plans WHERE reloids @> ARRAY[obj.objid]; - ELSE - IF obj.object_type = 'index' THEN - DELETE FROM @extschema@.sr_plans WHERE index_reloids @> ARRAY[obj.objid]; - END IF; - END IF; - END LOOP; -END -$$; - -CREATE EVENT TRIGGER sr_plan_invalid_table ON sql_drop - EXECUTE PROCEDURE sr_plan_invalid_table(); From b1a4d0dfd5f1fe366774e250dfe68b7aefc8d5c7 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Mon, 17 Dec 2018 19:42:20 +0300 Subject: [PATCH 09/12] Update README --- README.md | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/README.md b/README.md index 7215b47..25c29a1 100644 --- a/README.md +++ b/README.md @@ -68,3 +68,62 @@ If we keep the plan for the query and enable it to be used also for the followin select query_hash from sr_plans where query_hash=1000+_p(11); select query_hash from sr_plans where query_hash=1000+_p(-5); ``` + +## Explain + +It is possible to see saved plans by using `show_plan` function. It requires +knowing query hash which could be fetched from `sr_plans` table. + +Examples: + +```SQL +SELECT show_plan(1); + show_plan +---------------------------------------------- + ("Seq Scan on public.explain_test") + (" Output: test_attr1, test_attr2") + (" Filter: (explain_test.test_attr1 = 10)") +(3 rows) +``` + +Get second plan: + +```SQL +SELECT show_plan(1, index := 2); + show_plan +---------------------------------------------- + ("Seq Scan on public.explain_test") + (" Output: test_attr1, test_attr2") + (" Filter: (explain_test.test_attr1 = 10)") +(3 rows) +``` + +Use another output format (supported formats are `json`, `text`, `xml`): + +```SQL +SELECT show_plan(1, format := 'json'); + show_plan +------------------------------------------------------ + ("[ + + { + + ""Plan"": { + + ""Node Type"": ""Seq Scan"", + + ""Parallel Aware"": false, + + ""Relation Name"": ""explain_test"", + + ""Schema"": ""public"", + + ""Alias"": ""explain_test"", + + ""Output"": [""test_attr1"", ""test_attr2""], + + ""Filter"": ""(explain_test.test_attr1 = 10)""+ + } + + } + + ]") +(1 row) +``` + +## `pg_stat_statements` integration + +`sr_plans` table contains `query_id` columns which could be used to make +joins with `pg_stat_statements` tables and views. + +Note: in `shared_preload_libraries` list `pg_stat_statements` should be +specified after `sr_plan`. From efac7c3fa097f2382872486a44e70ed3e24c2bc0 Mon Sep 17 00:00:00 2001 From: Ildus Kurbangaliev Date: Tue, 18 Dec 2018 12:46:52 +0300 Subject: [PATCH 10/12] Update README --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 25c29a1..70f963c 100644 --- a/README.md +++ b/README.md @@ -69,13 +69,15 @@ select query_hash from sr_plans where query_hash=1000+_p(11); select query_hash from sr_plans where query_hash=1000+_p(-5); ``` -## Explain +## EXPLAIN for saved plans It is possible to see saved plans by using `show_plan` function. It requires knowing query hash which could be fetched from `sr_plans` table. Examples: +Show enabled plan for query hash: + ```SQL SELECT show_plan(1); show_plan @@ -86,7 +88,7 @@ SELECT show_plan(1); (3 rows) ``` -Get second plan: +Get second saved plan by using `index` parameter (ignores `enable` attribute): ```SQL SELECT show_plan(1, index := 2); @@ -98,7 +100,7 @@ SELECT show_plan(1, index := 2); (3 rows) ``` -Use another output format (supported formats are `json`, `text`, `xml`): +Use another output format (supported formats are `json`, `text`, `xml`, `yaml`): ```SQL SELECT show_plan(1, format := 'json'); From 769b8c3c67e9fd135902baa0e5dde18578ac8d19 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Mon, 4 Jul 2022 13:37:09 +0500 Subject: [PATCH 11/12] Recovery sr_plan commits for master branch As part of the inclusion of scheduled mirroring, a significant part of the commits in the sr_plan repository was lost. This commit restores all the difference between the state of the code on gitlab and the actual code in the Std and EE branches --- Makefile | 2 +- expected/joins.out | 14 ++-- init.sql | 2 +- sql/joins.sql | 9 ++- sr_plan.c | 162 +++++++++++++++++++++++++++++++++++++++++---- sr_plan.h | 3 + 6 files changed, 167 insertions(+), 25 deletions(-) diff --git a/Makefile b/Makefile index cd1b95d..0665669 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ DATA_built = sr_plan--$(EXTVERSION).sql DATA = sr_plan--1.0--1.1.sql sr_plan--1.1--1.2.sql EXTRA_CLEAN = sr_plan--$(EXTVERSION).sql -REGRESS = sr_plan sr_plan_schema joins explain +REGRESS = security sr_plan sr_plan_schema joins explain ifdef USE_PGXS ifndef PG_CONFIG diff --git a/expected/joins.out b/expected/joins.out index bed1581..a741bd5 100644 --- a/expected/joins.out +++ b/expected/joins.out @@ -3,6 +3,7 @@ SET sr_plan.log_usage = NOTICE; CREATE TABLE test_table(test_attr1 int, test_attr2 int); INSERT INTO test_table SELECT i, i + 1 FROM generate_series(1, 20) i; SET sr_plan.write_mode = true; +set enable_hashjoin = false; CREATE TABLE J1_TBL (i integer, j integer, t text); CREATE TABLE J2_TBL (i integer, k integer); INSERT INTO J1_TBL VALUES (1, 4, 'one'); @@ -595,19 +596,19 @@ NATURAL FULL JOIN ee | | 42 | 2 | (4 rows) -create temp table nt1 ( +create table nt1 ( id int primary key, a1 boolean, a2 boolean ); -create temp table nt2 ( +create table nt2 ( id int primary key, nt1_id int, b1 boolean, b2 boolean, foreign key (nt1_id) references nt1(id) ); -create temp table nt3 ( +create table nt3 ( id int primary key, nt2_id int, c1 boolean, @@ -616,14 +617,13 @@ create temp table nt3 ( insert into nt1 values (1,true,true); insert into nt1 values (2,true,false); insert into nt1 values (3,false,false); +SET sr_plan.write_mode = false; insert into nt2 values (1,1,true,true); -NOTICE: sr_plan: saved plan for SELECT 1 FROM ONLY "pg_temp_3"."nt1" x WHERE "id" OPERATOR(pg_catalog.=) $1 FOR KEY SHARE OF x insert into nt2 values (2,2,true,false); -NOTICE: sr_plan: saved plan for insert into nt2 values (2,2,true,false); insert into nt2 values (3,3,false,false); -NOTICE: sr_plan: saved plan for insert into nt2 values (3,3,false,false); +SET sr_plan.write_mode = true; insert into nt3 values (1,1,true); -NOTICE: sr_plan: saved plan for SELECT 1 FROM ONLY "pg_temp_3"."nt2" x WHERE "id" OPERATOR(pg_catalog.=) $1 FOR KEY SHARE OF x +NOTICE: sr_plan: saved plan for SELECT 1 FROM ONLY "public"."nt2" x WHERE "id" OPERATOR(pg_catalog.=) $1 FOR KEY SHARE OF x insert into nt3 values (2,2,false); NOTICE: sr_plan: saved plan for insert into nt3 values (2,2,false); insert into nt3 values (3,3,true); diff --git a/init.sql b/init.sql index 53a3163..0d550d6 100644 --- a/init.sql +++ b/init.sql @@ -31,7 +31,7 @@ RETURNS SETOF RECORD AS 'MODULE_PATHNAME', 'show_plan' LANGUAGE C VOLATILE; -CREATE OR REPLACE FUNCTION sr_plan_invalid_table() RETURNS event_trigger +CREATE FUNCTION sr_plan_invalid_table() RETURNS event_trigger LANGUAGE plpgsql AS $$ DECLARE obj record; diff --git a/sql/joins.sql b/sql/joins.sql index 6056101..c9e9fef 100644 --- a/sql/joins.sql +++ b/sql/joins.sql @@ -3,6 +3,7 @@ SET sr_plan.log_usage = NOTICE; CREATE TABLE test_table(test_attr1 int, test_attr2 int); INSERT INTO test_table SELECT i, i + 1 FROM generate_series(1, 20) i; SET sr_plan.write_mode = true; +set enable_hashjoin = false; CREATE TABLE J1_TBL (i integer, j integer, t text); CREATE TABLE J2_TBL (i integer, k integer); @@ -180,19 +181,19 @@ NATURAL FULL JOIN (SELECT name, n as s3_n FROM t3) as s3 ) ss2; -create temp table nt1 ( +create table nt1 ( id int primary key, a1 boolean, a2 boolean ); -create temp table nt2 ( +create table nt2 ( id int primary key, nt1_id int, b1 boolean, b2 boolean, foreign key (nt1_id) references nt1(id) ); -create temp table nt3 ( +create table nt3 ( id int primary key, nt2_id int, c1 boolean, @@ -202,9 +203,11 @@ create temp table nt3 ( insert into nt1 values (1,true,true); insert into nt1 values (2,true,false); insert into nt1 values (3,false,false); +SET sr_plan.write_mode = false; insert into nt2 values (1,1,true,true); insert into nt2 values (2,2,true,false); insert into nt2 values (3,3,false,false); +SET sr_plan.write_mode = true; insert into nt3 values (1,1,true); insert into nt3 values (2,2,false); insert into nt3 values (3,3,true); diff --git a/sr_plan.c b/sr_plan.c index 0ef859f..df8d65f 100644 --- a/sr_plan.c +++ b/sr_plan.c @@ -24,6 +24,7 @@ PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(do_nothing); PG_FUNCTION_INFO_V1(show_plan); +PG_FUNCTION_INFO_V1(_p); void _PG_init(void); void _PG_fini(void); @@ -65,8 +66,13 @@ static SrPlanCachedInfo cachedInfo = { NULL }; +#if PG_VERSION_NUM >= 130000 +static PlannedStmt *sr_planner(Query *parse, const char *query_string, + int cursorOptions, ParamListInfo boundParams); +#else static PlannedStmt *sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams); +#endif static void sr_analyze(ParseState *pstate, Query *query); @@ -118,7 +124,7 @@ invalidate_oids(void) cachedInfo.index_reloids_index_oid = InvalidOid; } -static void +static bool init_sr_plan(void) { char *schema_name; @@ -129,7 +135,7 @@ init_sr_plan(void) cachedInfo.schema_oid = get_sr_plan_schema(); if (cachedInfo.schema_oid == InvalidOid) - return; + return false; cachedInfo.sr_index_oid = sr_get_relname_oid(cachedInfo.schema_oid, SR_PLANS_TABLE_QUERY_INDEX_NAME); @@ -142,8 +148,10 @@ init_sr_plan(void) if (cachedInfo.sr_plans_oid == InvalidOid || cachedInfo.sr_index_oid == InvalidOid) - elog(ERROR, "sr_plan extension installed incorrectly"); - + { + elog(WARNING, "sr_plan extension installed incorrectly. Do nothing. It's ok in pg_restore."); + return false; + } /* Initialize _p function Oid */ schema_name = get_namespace_name(cachedInfo.schema_oid); func_name_list = list_make2(makeString(schema_name), makeString("_p")); @@ -152,13 +160,16 @@ init_sr_plan(void) pfree(schema_name); if (cachedInfo.fake_func == InvalidOid) - elog(ERROR, "sr_plan extension installed incorrectly"); - + { + elog(WARNING, "sr_plan extension installed incorrectly"); + return false; + } if (relcache_callback_needed) { CacheRegisterRelcacheCallback(sr_plan_relcache_hook, PointerGetDatum(NULL)); relcache_callback_needed = false; } + return true; } /* @@ -262,6 +273,8 @@ sr_analyze(ParseState *pstate, Query *query) elog(NOTICE, "sr_plan was disabled"); } } + if (srplan_post_parse_analyze_hook_next) + srplan_post_parse_analyze_hook_next(pstate, query); } /* @@ -298,7 +311,11 @@ get_sr_plan_schema(void) ObjectIdGetDatum(ext_schema)); #endif +#if PG_VERSION_NUM >= 130000 + rel = table_open(ExtensionRelationId, heap_lock); +#else rel = heap_open(ExtensionRelationId, heap_lock); +#endif scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, NULL, 1, entry); @@ -312,7 +329,11 @@ get_sr_plan_schema(void) systable_endscan(scandesc); +#if PG_VERSION_NUM >= 130000 + table_close(rel, heap_lock); +#else heap_close(rel, heap_lock); +#endif return result; } @@ -390,14 +411,27 @@ lookup_plan_by_query_hash(Snapshot snapshot, Relation sr_index_rel, PlannedStmt *pl_stmt = NULL; HeapTuple htup; IndexScanDesc query_index_scan; +#if PG_VERSION_NUM >= 120000 + TupleTableSlot *slot = table_slot_create(sr_plans_heap, NULL); +#endif query_index_scan = index_beginscan(sr_plans_heap, sr_index_rel, snapshot, 1, 0); index_rescan(query_index_scan, key, 1, NULL, 0); +#if PG_VERSION_NUM >= 120000 + while (index_getnext_slot(query_index_scan, ForwardScanDirection, slot)) +#else while ((htup = index_getnext(query_index_scan, ForwardScanDirection)) != NULL) +#endif { Datum search_values[Anum_sr_attcount]; bool search_nulls[Anum_sr_attcount]; +#if PG_VERSION_NUM >= 120000 + bool shouldFree; + + htup = ExecFetchSlotHeapTuple(slot, false, &shouldFree); + Assert(!shouldFree); +#endif heap_deform_tuple(htup, sr_plans_heap->rd_att, search_values, search_nulls); @@ -422,11 +456,19 @@ lookup_plan_by_query_hash(Snapshot snapshot, Relation sr_index_rel, } index_endscan(query_index_scan); +#if PG_VERSION_NUM >= 120000 + ExecDropSingleTupleTableSlot(slot); +#endif return pl_stmt; } +/* planner_hook */ static PlannedStmt * +#if PG_VERSION_NUM >= 130000 +sr_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams) +#else sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) +#endif { Datum query_hash; Relation sr_plans_heap, @@ -438,19 +480,27 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) bool found; Datum plan_hash; IndexScanDesc query_index_scan; - PlannedStmt *pl_stmt = NULL; LOCKMODE heap_lock = AccessShareLock; struct QueryParamsContext qp_context = {true, NULL}; - +#if PG_VERSION_NUM >= 120000 + TupleTableSlot *slot; +#endif static int level = 0; level++; +#if PG_VERSION_NUM >= 130000 +#define call_standard_planner() \ + (srplan_planner_hook_next ? \ + srplan_planner_hook_next(parse, query_string, cursorOptions, boundParams) : \ + standard_planner(parse, query_string, cursorOptions, boundParams)) +#else #define call_standard_planner() \ (srplan_planner_hook_next ? \ srplan_planner_hook_next(parse, cursorOptions, boundParams) : \ standard_planner(parse, cursorOptions, boundParams)) +#endif /* Only save plans for SELECT commands */ if (parse->commandType != CMD_SELECT || !cachedInfo.enabled @@ -463,9 +513,17 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Set extension Oid if needed */ if (cachedInfo.schema_oid == InvalidOid) - init_sr_plan(); + { + if (!init_sr_plan()) + { + /* Just call standard_planner() if schema doesn't exist. */ + pl_stmt = call_standard_planner(); + level--; + return pl_stmt; + } + } - if (cachedInfo.schema_oid == InvalidOid) + if (cachedInfo.schema_oid == InvalidOid || cachedInfo.sr_plans_oid == InvalidOid) { /* Just call standard_planner() if schema doesn't exist. */ pl_stmt = call_standard_planner(); @@ -480,7 +538,11 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) /* Try to find already planned statement */ heap_lock = AccessShareLock; +#if PG_VERSION_NUM >= 130000 + sr_plans_heap = table_open(cachedInfo.sr_plans_oid, heap_lock); +#else sr_plans_heap = heap_open(cachedInfo.sr_plans_oid, heap_lock); +#endif sr_index_rel = index_open(cachedInfo.sr_index_oid, heap_lock); qp_context.collect = false; @@ -507,10 +569,18 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) /* close and get AccessExclusiveLock */ UnregisterSnapshot(snapshot); index_close(sr_index_rel, heap_lock); +#if PG_VERSION_NUM >= 130000 + table_close(sr_plans_heap, heap_lock); +#else heap_close(sr_plans_heap, heap_lock); +#endif heap_lock = AccessExclusiveLock; +#if PG_VERSION_NUM >= 130000 + sr_plans_heap = table_open(cachedInfo.sr_plans_oid, heap_lock); +#else sr_plans_heap = heap_open(cachedInfo.sr_plans_oid, heap_lock); +#endif sr_index_rel = index_open(cachedInfo.sr_index_oid, heap_lock); /* recheck plan in index */ @@ -536,19 +606,32 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) query_index_scan = index_beginscan(sr_plans_heap, sr_index_rel, snapshot, 1, 0); index_rescan(query_index_scan, &key, 1, NULL, 0); - +#if PG_VERSION_NUM >= 120000 + slot = table_slot_create(sr_plans_heap, NULL); +#endif found = false; for (;;) { HeapTuple htup; Datum search_values[Anum_sr_attcount]; bool search_nulls[Anum_sr_attcount]; +#if PG_VERSION_NUM >= 120000 + bool shouldFree; + + if (!index_getnext_slot(query_index_scan, ForwardScanDirection, slot)) + break; + htup = ExecFetchSlotHeapTuple(slot, false, &shouldFree); + Assert(!shouldFree); +#else ItemPointer tid = index_getnext_tid(query_index_scan, ForwardScanDirection); if (tid == NULL) break; htup = index_fetch_heap(query_index_scan); + if (htup == NULL) + break; +#endif heap_deform_tuple(htup, sr_plans_heap->rd_att, search_values, search_nulls); @@ -560,7 +643,9 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) } } index_endscan(query_index_scan); - +#if PG_VERSION_NUM >= 120000 + ExecDropSingleTupleTableSlot(slot); +#endif if (!found) { struct IndexIds index_ids = {NIL}; @@ -676,7 +761,11 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) UnregisterSnapshot(snapshot); index_close(sr_index_rel, heap_lock); +#if PG_VERSION_NUM >= 130000 + table_close(sr_plans_heap, heap_lock); +#else heap_close(sr_plans_heap, heap_lock); +#endif return pl_stmt; } @@ -713,7 +802,11 @@ sr_query_expr_walker(Node *node, void *context) { struct QueryParam *param = (struct QueryParam *) palloc(sizeof(struct QueryParam)); param->location = fexpr->location; +#if PG_VERSION_NUM >= 130000 + param->node = fexpr->args->elements[0].ptr_value; +#else param->node = fexpr->args->head->data.ptr_value; +#endif param->funccollid = fexpr->funccollid; /* HACK: location could lost after planning */ @@ -735,7 +828,11 @@ sr_query_expr_walker(Node *node, void *context) if (param->location == fexpr->funccollid) { fexpr->funccollid = param->funccollid; +#if PG_VERSION_NUM >= 130000 + fexpr->args->elements[0].ptr_value = param->node; +#else fexpr->args->head->data.ptr_value = param->node; +#endif if (cachedInfo.log_usage) elog(cachedInfo.log_usage, "sr_plan: restored parameter on %d", param->location); @@ -884,6 +981,12 @@ do_nothing(PG_FUNCTION_ARGS) PG_RETURN_DATUM(PG_GETARG_DATUM(0)); } +Datum +_p(PG_FUNCTION_ARGS) +{ + PG_RETURN_DATUM(PG_GETARG_DATUM(0)); +} + /* * Construct the result tupledesc for an EXPLAIN */ @@ -907,7 +1010,11 @@ make_tupledesc(ExplainState *es) } /* Need a tuple descriptor representing a single TEXT or XML column */ +#if PG_VERSION_NUM >= 120000 + tupdesc = CreateTemplateTupleDesc(1); +#else tupdesc = CreateTemplateTupleDesc(1, false); +#endif TupleDescInitEntry(tupdesc, (AttrNumber) 1, "QUERY PLAN", result_type, -1, 0); return tupdesc; } @@ -927,10 +1034,13 @@ show_plan(PG_FUNCTION_ARGS) sr_index_rel; Snapshot snapshot; ScanKeyData key; + ListCell *lc; char *queryString; ExplainState *es = NewExplainState(); uint32 index, query_hash = PG_GETARG_INT32(0); + Relation *rel_array; + int i; funcctx = SRF_FIRSTCALL_INIT(); @@ -967,7 +1077,11 @@ show_plan(PG_FUNCTION_ARGS) } /* Try to find already planned statement */ +#if PG_VERSION_NUM >= 130000 + sr_plans_heap = table_open(cachedInfo.sr_plans_oid, heap_lock); +#else sr_plans_heap = heap_open(cachedInfo.sr_plans_oid, heap_lock); +#endif sr_index_rel = index_open(cachedInfo.sr_index_oid, heap_lock); snapshot = RegisterSnapshot(GetLatestSnapshot()); @@ -979,8 +1093,19 @@ show_plan(PG_FUNCTION_ARGS) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not find saved plan"))); + rel_array = palloc(sizeof(Relation) * list_length(pl_stmt->relationOids)); + i = 0; + foreach(lc, pl_stmt->relationOids) +#if PG_VERSION_NUM >= 130000 + rel_array[i++] = table_open(lfirst_oid(lc), heap_lock); +#else + rel_array[i++] = heap_open(lfirst_oid(lc), heap_lock); +#endif + ExplainBeginOutput(es); -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 130000 + ExplainOnePlan(pl_stmt, NULL, es, queryString, NULL, NULL, NULL, NULL); +#elif PG_VERSION_NUM >= 100000 ExplainOnePlan(pl_stmt, NULL, es, queryString, NULL, NULL, NULL); #else ExplainOnePlan(pl_stmt, NULL, es, queryString, NULL, NULL); @@ -990,7 +1115,18 @@ show_plan(PG_FUNCTION_ARGS) UnregisterSnapshot(snapshot); index_close(sr_index_rel, heap_lock); +#if PG_VERSION_NUM >= 130000 + table_close(sr_plans_heap, heap_lock); +#else heap_close(sr_plans_heap, heap_lock); +#endif + + while (--i >= 0) +#if PG_VERSION_NUM >= 130000 + table_close(rel_array[i], heap_lock); +#else + heap_close(rel_array[i], heap_lock); +#endif oldcxt = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); diff --git a/sr_plan.h b/sr_plan.h index bf05da1..a4d1d5f 100644 --- a/sr_plan.h +++ b/sr_plan.h @@ -12,7 +12,10 @@ #include "utils/builtins.h" #include "utils/rel.h" #include "utils/relcache.h" + +#if PG_VERSION_NUM < 120000 #include "utils/tqual.h" +#endif #include "utils/guc.h" #include "utils/datum.h" #include "utils/inval.h" From 19a6a425fc91bdfc5482650db9747bead67efd40 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Mon, 1 Aug 2022 21:47:20 +0500 Subject: [PATCH 12/12] Disable all regress tests --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0665669..ecf0c26 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ DATA_built = sr_plan--$(EXTVERSION).sql DATA = sr_plan--1.0--1.1.sql sr_plan--1.1--1.2.sql EXTRA_CLEAN = sr_plan--$(EXTVERSION).sql -REGRESS = security sr_plan sr_plan_schema joins explain +#REGRESS = security sr_plan sr_plan_schema joins explain ifdef USE_PGXS ifndef PG_CONFIG