From e3f5760fc66c9514876a0a512795805ad3dca7fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 4 Apr 2025 11:22:29 +0200 Subject: [PATCH 001/114] fix: enable sqlx offline mode (#297) * fix: enable sqlx offline mode * fix: install cli in ci * fix: remove preprare from lmint * fix: remove preprare from lmint --- .github/workflows/pull_request.yml | 5 + ...d572b3d79c7b8b9b7ffa8915806c947095a96.json | 56 ++++++++++ ...dfce00358ecbe878952e8d4915c06cc5c9e0f.json | 80 ++++++++++++++ ...48e76ff7e255960a4ce5466674ff35a97b151.json | 32 ++++++ ...e496f5337cadbad7a3fb03ccd3e3c21b71389.json | 104 ++++++++++++++++++ ...126cacb3c9ed5f915b7e98052d58df98d480b.json | 38 +++++++ ...ae86f456892fa9ce48854a8b960cdf2d11a45.json | 86 +++++++++++++++ 7 files changed, 401 insertions(+) create mode 100644 .sqlx/query-1c29eca62591ae2597581be806dd572b3d79c7b8b9b7ffa8915806c947095a96.json create mode 100644 .sqlx/query-2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f.json create mode 100644 .sqlx/query-36862f7f9d2d1c50ba253b28a7648e76ff7e255960a4ce5466674ff35a97b151.json create mode 100644 .sqlx/query-64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389.json create mode 100644 .sqlx/query-d61f2f56ce777c99593df240b3a126cacb3c9ed5f915b7e98052d58df98d480b.json create mode 100644 .sqlx/query-fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45.json diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 68d7371b..bd7d6c49 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -92,6 +92,10 @@ jobs: cache-base: main env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup sqlx-cli + run: cargo install sqlx-cli + - name: Setup Biome uses: biomejs/setup-biome@v2 with: @@ -101,6 +105,7 @@ jobs: cargo clippy cargo run -p rules_check biome lint + cargo sqlx prepare --check --workspace test: name: Test diff --git a/.sqlx/query-1c29eca62591ae2597581be806dd572b3d79c7b8b9b7ffa8915806c947095a96.json b/.sqlx/query-1c29eca62591ae2597581be806dd572b3d79c7b8b9b7ffa8915806c947095a96.json new file mode 100644 index 00000000..fcd4901e --- /dev/null +++ b/.sqlx/query-1c29eca62591ae2597581be806dd572b3d79c7b8b9b7ffa8915806c947095a96.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "select\n t.oid :: int8 as \"id!\",\n t.typname as name,\n n.nspname as \"schema!\",\n format_type (t.oid, null) as \"format!\",\n coalesce(t_enums.enums, '[]') as enums,\n coalesce(t_attributes.attributes, '[]') as attributes,\n obj_description (t.oid, 'pg_type') as comment\nfrom\n pg_type t\n left join pg_namespace n on n.oid = t.typnamespace\n left join (\n select\n enumtypid,\n jsonb_agg(\n enumlabel\n order by\n enumsortorder\n ) as enums\n from\n pg_enum\n group by\n enumtypid\n ) as t_enums on t_enums.enumtypid = t.oid\n left join (\n select\n oid,\n jsonb_agg(\n jsonb_build_object('name', a.attname, 'type_id', a.atttypid :: int8)\n order by\n a.attnum asc\n ) as attributes\n from\n pg_class c\n join pg_attribute a on a.attrelid = c.oid\n where\n c.relkind = 'c'\n and not a.attisdropped\n group by\n c.oid\n ) as t_attributes on t_attributes.oid = t.typrelid\nwhere\n (\n t.typrelid = 0\n or (\n select\n c.relkind = 'c'\n from\n pg_class c\n where\n c.oid = t.typrelid\n )\n );", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "schema!", + "type_info": "Name" + }, + { + "ordinal": 3, + "name": "format!", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "enums", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "attributes", + "type_info": "Jsonb" + }, + { + "ordinal": 6, + "name": "comment", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null, + false, + true, + null, + null, + null, + null + ] + }, + "hash": "1c29eca62591ae2597581be806dd572b3d79c7b8b9b7ffa8915806c947095a96" +} diff --git a/.sqlx/query-2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f.json b/.sqlx/query-2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f.json new file mode 100644 index 00000000..96439422 --- /dev/null +++ b/.sqlx/query-2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f.json @@ -0,0 +1,80 @@ +{ + "db_name": "PostgreSQL", + "query": "select\n c.oid :: int8 as \"id!\",\n nc.nspname as schema,\n c.relname as name,\n c.relrowsecurity as rls_enabled,\n c.relforcerowsecurity as rls_forced,\n case\n when c.relreplident = 'd' then 'DEFAULT'\n when c.relreplident = 'i' then 'INDEX'\n when c.relreplident = 'f' then 'FULL'\n else 'NOTHING'\n end as \"replica_identity!\",\n pg_total_relation_size(format('%I.%I', nc.nspname, c.relname)) :: int8 as \"bytes!\",\n pg_size_pretty(\n pg_total_relation_size(format('%I.%I', nc.nspname, c.relname))\n ) as \"size!\",\n pg_stat_get_live_tuples(c.oid) as \"live_rows_estimate!\",\n pg_stat_get_dead_tuples(c.oid) as \"dead_rows_estimate!\",\n obj_description(c.oid) as comment\nfrom\n pg_namespace nc\n join pg_class c on nc.oid = c.relnamespace\nwhere\n c.relkind in ('r', 'p')\n and not pg_is_other_temp_schema(nc.oid)\n and (\n pg_has_role(c.relowner, 'USAGE')\n or has_table_privilege(\n c.oid,\n 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER'\n )\n or has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES')\n )\ngroup by\n c.oid,\n c.relname,\n c.relrowsecurity,\n c.relforcerowsecurity,\n c.relreplident,\n nc.nspname;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "schema", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "name", + "type_info": "Name" + }, + { + "ordinal": 3, + "name": "rls_enabled", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "rls_forced", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "replica_identity!", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "bytes!", + "type_info": "Int8" + }, + { + "ordinal": 7, + "name": "size!", + "type_info": "Text" + }, + { + "ordinal": 8, + "name": "live_rows_estimate!", + "type_info": "Int8" + }, + { + "ordinal": 9, + "name": "dead_rows_estimate!", + "type_info": "Int8" + }, + { + "ordinal": 10, + "name": "comment", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null, + false, + false, + false, + false, + null, + null, + null, + null, + null, + null + ] + }, + "hash": "2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f" +} diff --git a/.sqlx/query-36862f7f9d2d1c50ba253b28a7648e76ff7e255960a4ce5466674ff35a97b151.json b/.sqlx/query-36862f7f9d2d1c50ba253b28a7648e76ff7e255960a4ce5466674ff35a97b151.json new file mode 100644 index 00000000..6255c9b9 --- /dev/null +++ b/.sqlx/query-36862f7f9d2d1c50ba253b28a7648e76ff7e255960a4ce5466674ff35a97b151.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "select\n n.oid :: int8 as \"id!\",\n n.nspname as name,\n u.rolname as \"owner!\"\nfrom\n pg_namespace n,\n pg_roles u\nwhere\n n.nspowner = u.oid\n and (\n pg_has_role(n.nspowner, 'USAGE')\n or has_schema_privilege(n.oid, 'CREATE, USAGE')\n )\n and not pg_catalog.starts_with(n.nspname, 'pg_temp_')\n and not pg_catalog.starts_with(n.nspname, 'pg_toast_temp_');", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "owner!", + "type_info": "Name" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null, + false, + true + ] + }, + "hash": "36862f7f9d2d1c50ba253b28a7648e76ff7e255960a4ce5466674ff35a97b151" +} diff --git a/.sqlx/query-64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389.json b/.sqlx/query-64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389.json new file mode 100644 index 00000000..43d63459 --- /dev/null +++ b/.sqlx/query-64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389.json @@ -0,0 +1,104 @@ +{ + "db_name": "PostgreSQL", + "query": "with functions as (\n select\n oid,\n proname,\n prosrc,\n prorettype,\n proretset,\n provolatile,\n prosecdef,\n prolang,\n pronamespace,\n proconfig,\n -- proargmodes is null when all arg modes are IN\n coalesce(\n p.proargmodes,\n array_fill(\n 'i' :: text,\n array [cardinality(coalesce(p.proallargtypes, p.proargtypes))]\n )\n ) as arg_modes,\n -- proargnames is null when all args are unnamed\n coalesce(\n p.proargnames,\n array_fill(\n '' :: text,\n array [cardinality(coalesce(p.proallargtypes, p.proargtypes))]\n )\n ) as arg_names,\n -- proallargtypes is null when all arg modes are IN\n coalesce(p.proallargtypes, p.proargtypes) as arg_types,\n array_cat(\n array_fill(false, array [pronargs - pronargdefaults]),\n array_fill(true, array [pronargdefaults])\n ) as arg_has_defaults\n from\n pg_proc as p\n where\n p.prokind = 'f'\n)\nselect\n f.oid :: int8 as \"id!\",\n n.nspname as \"schema!\",\n f.proname as \"name!\",\n l.lanname as \"language!\",\n case\n when l.lanname = 'internal' then null\n else f.prosrc\n end as body,\n case\n when l.lanname = 'internal' then null\n else pg_get_functiondef(f.oid)\n end as definition,\n coalesce(f_args.args, '[]') as args,\n nullif(pg_get_function_arguments(f.oid), '') as argument_types,\n nullif(pg_get_function_identity_arguments(f.oid), '') as identity_argument_types,\n f.prorettype :: int8 as \"return_type_id!\",\n pg_get_function_result(f.oid) as \"return_type!\",\n nullif(rt.typrelid :: int8, 0) as return_type_relation_id,\n f.proretset as is_set_returning_function,\n case\n when f.provolatile = 'i' then 'IMMUTABLE'\n when f.provolatile = 's' then 'STABLE'\n when f.provolatile = 'v' then 'VOLATILE'\n end as behavior,\n f.prosecdef as security_definer\nfrom\n functions f\n left join pg_namespace n on f.pronamespace = n.oid\n left join pg_language l on f.prolang = l.oid\n left join pg_type rt on rt.oid = f.prorettype\n left join (\n select\n oid,\n jsonb_object_agg(param, value) filter (\n where\n param is not null\n ) as config_params\n from\n (\n select\n oid,\n (string_to_array(unnest(proconfig), '=')) [1] as param,\n (string_to_array(unnest(proconfig), '=')) [2] as value\n from\n functions\n ) as t\n group by\n oid\n ) f_config on f_config.oid = f.oid\n left join (\n select\n oid,\n jsonb_agg(\n jsonb_build_object(\n 'mode',\n t2.mode,\n 'name',\n name,\n 'type_id',\n type_id,\n 'has_default',\n has_default\n )\n ) as args\n from\n (\n select\n oid,\n unnest(arg_modes) as mode,\n unnest(arg_names) as name,\n unnest(arg_types) :: int8 as type_id,\n unnest(arg_has_defaults) as has_default\n from\n functions\n ) as t1,\n lateral (\n select\n case\n when t1.mode = 'i' then 'in'\n when t1.mode = 'o' then 'out'\n when t1.mode = 'b' then 'inout'\n when t1.mode = 'v' then 'variadic'\n else 'table'\n end as mode\n ) as t2\n group by\n t1.oid\n ) f_args on f_args.oid = f.oid;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "schema!", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "name!", + "type_info": "Name" + }, + { + "ordinal": 3, + "name": "language!", + "type_info": "Name" + }, + { + "ordinal": 4, + "name": "body", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "definition", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "args", + "type_info": "Jsonb" + }, + { + "ordinal": 7, + "name": "argument_types", + "type_info": "Text" + }, + { + "ordinal": 8, + "name": "identity_argument_types", + "type_info": "Text" + }, + { + "ordinal": 9, + "name": "return_type_id!", + "type_info": "Int8" + }, + { + "ordinal": 10, + "name": "return_type!", + "type_info": "Text" + }, + { + "ordinal": 11, + "name": "return_type_relation_id", + "type_info": "Int8" + }, + { + "ordinal": 12, + "name": "is_set_returning_function", + "type_info": "Bool" + }, + { + "ordinal": 13, + "name": "behavior", + "type_info": "Text" + }, + { + "ordinal": 14, + "name": "security_definer", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null, + true, + false, + true, + null, + null, + null, + null, + null, + null, + null, + null, + false, + null, + false + ] + }, + "hash": "64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389" +} diff --git a/.sqlx/query-d61f2f56ce777c99593df240b3a126cacb3c9ed5f915b7e98052d58df98d480b.json b/.sqlx/query-d61f2f56ce777c99593df240b3a126cacb3c9ed5f915b7e98052d58df98d480b.json new file mode 100644 index 00000000..d1766e30 --- /dev/null +++ b/.sqlx/query-d61f2f56ce777c99593df240b3a126cacb3c9ed5f915b7e98052d58df98d480b.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "select\n version(),\n current_setting('server_version_num') :: int8 AS version_num,\n (\n select\n count(*) :: int8 AS active_connections\n FROM\n pg_stat_activity\n ) AS active_connections,\n current_setting('max_connections') :: int8 AS max_connections;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "version", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "version_num", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "active_connections", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "max_connections", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null, + null, + null, + null + ] + }, + "hash": "d61f2f56ce777c99593df240b3a126cacb3c9ed5f915b7e98052d58df98d480b" +} diff --git a/.sqlx/query-fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45.json b/.sqlx/query-fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45.json new file mode 100644 index 00000000..01043a69 --- /dev/null +++ b/.sqlx/query-fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45.json @@ -0,0 +1,86 @@ +{ + "db_name": "PostgreSQL", + "query": "with\n available_tables as (\n select\n c.relname as table_name,\n c.oid as table_oid,\n c.relkind as class_kind,\n n.nspname as schema_name\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n on n.oid = c.relnamespace\n where\n -- r: normal tables\n -- v: views\n -- m: materialized views\n -- f: foreign tables\n -- p: partitioned tables\n c.relkind in ('r', 'v', 'm', 'f', 'p')\n ),\n available_indexes as (\n select\n unnest (ix.indkey) as attnum,\n ix.indisprimary as is_primary,\n ix.indisunique as is_unique,\n ix.indrelid as table_oid\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_index ix on c.oid = ix.indexrelid\n where\n c.relkind = 'i'\n )\nselect\n atts.attname as name,\n ts.table_name,\n ts.table_oid :: int8 as \"table_oid!\",\n ts.class_kind :: char as \"class_kind!\",\n ts.schema_name,\n atts.atttypid :: int8 as \"type_id!\",\n not atts.attnotnull as \"is_nullable!\",\n nullif(\n information_schema._pg_char_max_length (atts.atttypid, atts.atttypmod),\n -1\n ) as varchar_length,\n pg_get_expr (def.adbin, def.adrelid) as default_expr,\n coalesce(ix.is_primary, false) as \"is_primary_key!\",\n coalesce(ix.is_unique, false) as \"is_unique!\",\n pg_catalog.col_description (ts.table_oid, atts.attnum) as comment\nfrom\n pg_catalog.pg_attribute atts\n join available_tables ts on atts.attrelid = ts.table_oid\n left join available_indexes ix on atts.attrelid = ix.table_oid\n and atts.attnum = ix.attnum\n left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid\n and atts.attnum = def.adnum\nwhere\n -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s\n atts.attnum >= 0\norder by\n schema_name desc,\n table_name,\n atts.attnum;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "table_name", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "table_oid!", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "class_kind!", + "type_info": "Bpchar" + }, + { + "ordinal": 4, + "name": "schema_name", + "type_info": "Name" + }, + { + "ordinal": 5, + "name": "type_id!", + "type_info": "Int8" + }, + { + "ordinal": 6, + "name": "is_nullable!", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "varchar_length", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "default_expr", + "type_info": "Text" + }, + { + "ordinal": 9, + "name": "is_primary_key!", + "type_info": "Bool" + }, + { + "ordinal": 10, + "name": "is_unique!", + "type_info": "Bool" + }, + { + "ordinal": 11, + "name": "comment", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + null, + null, + false, + null, + null, + null, + null, + null, + null, + null + ] + }, + "hash": "fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45" +} From 5d0c8e6770c4eb8481737c660d507cb0d775bff0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 4 Apr 2025 11:35:55 +0200 Subject: [PATCH 002/114] fix: support non timestamp but numeric migrations (#301) * fix: support non timestamp but numeric migrations * rename to sequence_number --- crates/pgt_workspace/src/workspace/server.rs | 2 +- .../src/workspace/server/migration.rs | 52 +++++++++++++------ 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 032b92c6..8dcbfb1d 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -108,7 +108,7 @@ impl WorkspaceServer { let migrations_dir = migration_settings.path.as_ref()?; let migration = migration::get_migration(path, migrations_dir)?; - Some(&migration.timestamp <= ignore_before) + Some(&migration.sequence_number <= ignore_before) }) .unwrap_or(false) } diff --git a/crates/pgt_workspace/src/workspace/server/migration.rs b/crates/pgt_workspace/src/workspace/server/migration.rs index ef4bdd25..d8853727 100644 --- a/crates/pgt_workspace/src/workspace/server/migration.rs +++ b/crates/pgt_workspace/src/workspace/server/migration.rs @@ -2,7 +2,7 @@ use std::path::Path; #[derive(Debug)] pub(crate) struct Migration { - pub(crate) timestamp: u64, + pub(crate) sequence_number: u64, #[allow(unused)] pub(crate) name: String, } @@ -33,12 +33,7 @@ pub(crate) fn get_migration(path: &Path, migrations_dir: &Path) -> Option Option Option { + let mut parts = name.splitn(2, '_'); + // remove leading zeros to support numeric + let sequence_number: u64 = parts.next()?.trim_start_matches('0').parse().ok()?; + let full_name = parts.next()?; + let name = full_name + .strip_suffix(".sql") + .unwrap_or(full_name) + .to_string(); + Some(Migration { + sequence_number, + name, + }) } #[cfg(test)] @@ -79,8 +84,8 @@ mod tests { assert!(migration.is_some()); let migration = migration.unwrap(); - assert_eq!(migration.timestamp, 1234567890); - assert_eq!(migration.name, "create_users.sql"); + assert_eq!(migration.sequence_number, 1234567890); + assert_eq!(migration.name, "create_users"); } #[test] @@ -96,10 +101,25 @@ mod tests { assert!(migration.is_some()); let migration = migration.unwrap(); - assert_eq!(migration.timestamp, 1234567890); + assert_eq!(migration.sequence_number, 1234567890); assert_eq!(migration.name, "create_users"); } + #[test] + fn test_get_migration_prefix_number() { + let temp_dir = setup(); + let migrations_dir = temp_dir.path().to_path_buf(); + let path = migrations_dir.join("000201_a_migration.sql"); + fs::write(&path, "").unwrap(); + + let migration = get_migration(&path, &migrations_dir); + + assert!(migration.is_some()); + let migration = migration.unwrap(); + assert_eq!(migration.sequence_number, 201); + assert_eq!(migration.name, "a_migration"); + } + #[test] fn test_get_migration_not_timestamp_in_filename() { let migrations_dir = PathBuf::from("/tmp/migrations"); From 48d14a31866a8cdb9b32d7d0a7b7208a41bb45fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 4 Apr 2025 12:30:16 +0200 Subject: [PATCH 003/114] fix: nested () and with check (#307) --- crates/pgt_statement_splitter/src/lib.rs | 24 +++++++++++++++++++ .../src/parser/common.rs | 13 +++++++++- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index 9e92d3af..68f5daaf 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -142,6 +142,30 @@ mod tests { .expect_statements(vec!["insert into tbl (id) select 1", "select 3"]); } + #[test] + fn with_check() { + Tester::from("create policy employee_insert on journey_execution for insert to authenticated with check ((select private.organisation_id()) = organisation_id);") + .expect_statements(vec!["create policy employee_insert on journey_execution for insert to authenticated with check ((select private.organisation_id()) = organisation_id);"]); + } + + #[test] + fn nested_parenthesis() { + Tester::from( + "create table if not exists journey_node_execution ( + id uuid default gen_random_uuid() not null primary key, + + constraint uq_node_exec unique (journey_execution_id, journey_node_id) +);", + ) + .expect_statements(vec![ + "create table if not exists journey_node_execution ( + id uuid default gen_random_uuid() not null primary key, + + constraint uq_node_exec unique (journey_execution_id, journey_node_id) +);", + ]); + } + #[test] fn with_cte() { Tester::from("with test as (select 1 as id) select * from test;") diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index ec5f93a6..af3dc6cc 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -65,11 +65,20 @@ pub(crate) fn statement(p: &mut Parser) { pub(crate) fn parenthesis(p: &mut Parser) { p.expect(SyntaxKind::Ascii40); + let mut depth = 1; + loop { match p.peek().kind { + SyntaxKind::Ascii40 => { + p.advance(); + depth += 1; + } SyntaxKind::Ascii41 | SyntaxKind::Eof => { p.advance(); - break; + depth -= 1; + if depth == 0 { + break; + } } _ => { p.advance(); @@ -174,6 +183,8 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { if [ // WITH ORDINALITY should not start a new statement SyntaxKind::Ordinality, + // WITH CHECK should not start a new statement + SyntaxKind::Check, ] .iter() .all(|x| Some(x) != next.as_ref()) From b978a5a93016593be5f63736c607e04f8fc7699d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 4 Apr 2025 13:13:45 +0200 Subject: [PATCH 004/114] fix: log more (#309) --- crates/pgt_lsp/src/handlers/code_actions.rs | 2 ++ crates/pgt_lsp/src/handlers/completions.rs | 6 +----- crates/pgt_lsp/src/handlers/text_document.rs | 15 +++------------ 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/crates/pgt_lsp/src/handlers/code_actions.rs b/crates/pgt_lsp/src/handlers/code_actions.rs index dd8b1f1e..0d124cfc 100644 --- a/crates/pgt_lsp/src/handlers/code_actions.rs +++ b/crates/pgt_lsp/src/handlers/code_actions.rs @@ -9,6 +9,7 @@ use pgt_workspace::features::code_actions::{ CodeActionKind, CodeActionsParams, CommandActionCategory, ExecuteStatementParams, }; +#[tracing::instrument(level = "debug", skip(session), err)] pub fn get_actions( session: &Session, params: lsp_types::CodeActionParams, @@ -71,6 +72,7 @@ pub fn command_id(command: &CommandActionCategory) -> String { } } +#[tracing::instrument(level = "debug", skip(session), err)] pub async fn execute_command( session: &Session, params: ExecuteCommandParams, diff --git a/crates/pgt_lsp/src/handlers/completions.rs b/crates/pgt_lsp/src/handlers/completions.rs index f18ae598..e9a18a6e 100644 --- a/crates/pgt_lsp/src/handlers/completions.rs +++ b/crates/pgt_lsp/src/handlers/completions.rs @@ -3,11 +3,7 @@ use anyhow::Result; use pgt_workspace::{WorkspaceError, features::completions::GetCompletionsParams}; use tower_lsp::lsp_types::{self, CompletionItem, CompletionItemLabelDetails}; -#[tracing::instrument(level = "debug", skip_all, fields( - url = params.text_document_position.text_document.uri.as_str(), - character = params.text_document_position.position.character, - line = params.text_document_position.position.line -), err)] +#[tracing::instrument(level = "debug", skip(session), err)] pub fn get_completions( session: &Session, params: lsp_types::CompletionParams, diff --git a/crates/pgt_lsp/src/handlers/text_document.rs b/crates/pgt_lsp/src/handlers/text_document.rs index d36b8a32..63250ef5 100644 --- a/crates/pgt_lsp/src/handlers/text_document.rs +++ b/crates/pgt_lsp/src/handlers/text_document.rs @@ -10,10 +10,7 @@ use tower_lsp::lsp_types; use tracing::error; /// Handler for `textDocument/didOpen` LSP notification -#[tracing::instrument(level = "info", skip_all, fields( - url = params.text_document.uri.as_str(), - version = params.text_document.version -), err)] +#[tracing::instrument(level = "debug", skip(session), err)] pub(crate) async fn did_open( session: &Session, params: lsp_types::DidOpenTextDocumentParams, @@ -41,11 +38,7 @@ pub(crate) async fn did_open( } // Handler for `textDocument/didChange` LSP notification -#[tracing::instrument(level = "debug", skip_all, fields( - uri = params.text_document.uri.as_str(), - version = params.text_document.version, - num_content_changes = params.content_changes.len() -), err)] +#[tracing::instrument(level = "debug", skip(session), err)] pub(crate) async fn did_change( session: &Session, params: lsp_types::DidChangeTextDocumentParams, @@ -97,9 +90,7 @@ pub(crate) async fn did_change( } /// Handler for `textDocument/didClose` LSP notification -#[tracing::instrument(level = "info", skip_all, fields( - url = params.text_document.uri.as_str(), -), err)] +#[tracing::instrument(level = "debug", skip(session), err)] pub(crate) async fn did_close( session: &Session, params: lsp_types::DidCloseTextDocumentParams, From 6f697bf694f4b324a25567edbb87275ea1cb2d86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 4 Apr 2025 19:26:42 +0200 Subject: [PATCH 005/114] fix: make sure range is correct for eof deletions (#311) --- crates/pgt_lsp/tests/server.rs | 131 ++++++++++++++++++ crates/pgt_workspace/src/settings.rs | 16 +-- .../src/workspace/server/change.rs | 5 +- 3 files changed, 143 insertions(+), 9 deletions(-) diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 4a5655e6..8e40c097 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -981,3 +981,134 @@ async fn test_issue_281() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_issue_303() -> Result<()> { + let factory = ServerFactory::default(); + let mut fs = MemoryFileSystem::default(); + let test_db = get_new_test_db().await; + + let setup = r#" + create table public.users ( + id serial primary key, + name varchar(255) not null + ); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + fs.insert( + url!("postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf).unwrap(), + ); + + let (service, client) = factory + .create_with_fs(None, DynRef::Owned(Box::new(fs))) + .into_inner(); + + let (stream, sink) = client.split(); + let mut server = Server::new(service); + + let (sender, _) = channel(CHANNEL_BUFFER_SIZE); + let reader = tokio::spawn(client_handler(stream, sink, sender)); + + server.initialize().await?; + server.initialized().await?; + + server.load_configuration().await?; + + server.open_document("").await?; + + let chars = [ + "c", "r", "e", "a", "t", "e", " ", "t", "a", "b", "l", "e", " ", "\"\"", "h", "e", "l", + "l", "o", + ]; + let mut version = 1; + + for (i, c) in chars.iter().enumerate() { + version += 1; + server + .change_document( + version, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: i as u32, + }, + end: Position { + line: 0, + character: i as u32, + }, + }), + range_length: Some(0), + text: c.to_string(), + }], + ) + .await?; + } + + version += 1; + server + .change_document( + version, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 20, + }, + end: Position { + line: 0, + character: 20, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + version += 1; + server + .change_document( + version, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 20, + }, + end: Position { + line: 0, + character: 21, + }, + }), + range_length: Some(0), + text: "".to_string(), + }], + ) + .await?; + + server.shutdown().await?; + reader.abort(); + + Ok(()) +} diff --git a/crates/pgt_workspace/src/settings.rs b/crates/pgt_workspace/src/settings.rs index 2e7d8f53..d4ea462a 100644 --- a/crates/pgt_workspace/src/settings.rs +++ b/crates/pgt_workspace/src/settings.rs @@ -449,9 +449,9 @@ mod tests { #[test] fn should_identify_allowed_statement_executions() { let partial_config = PartialDatabaseConfiguration { - allow_statement_executions_against: Some(StringSet::from_iter( - vec![String::from("localhost/*")].into_iter(), - )), + allow_statement_executions_against: Some(StringSet::from_iter(vec![String::from( + "localhost/*", + )])), host: Some("localhost".into()), database: Some("test-db".into()), ..Default::default() @@ -459,15 +459,15 @@ mod tests { let config = DatabaseSettings::from(partial_config); - assert_eq!(config.allow_statement_executions, true) + assert!(config.allow_statement_executions) } #[test] fn should_identify_not_allowed_statement_executions() { let partial_config = PartialDatabaseConfiguration { - allow_statement_executions_against: Some(StringSet::from_iter( - vec![String::from("localhost/*")].into_iter(), - )), + allow_statement_executions_against: Some(StringSet::from_iter(vec![String::from( + "localhost/*", + )])), host: Some("production".into()), database: Some("test-db".into()), ..Default::default() @@ -475,6 +475,6 @@ mod tests { let config = DatabaseSettings::from(partial_config); - assert_eq!(config.allow_statement_executions, false) + assert!(!config.allow_statement_executions) } } diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index 226a0ffd..e31e4178 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -195,7 +195,10 @@ impl Document { affected_indices, prev_index, next_index, - full_affected_range: TextRange::new(start_incl, end_incl.min(content_size)), + full_affected_range: TextRange::new( + start_incl, + end_incl.min(content_size).max(start_incl), + ), } } From 4a1038825e1fae5da293c750df7e6739fc73a114 Mon Sep 17 00:00:00 2001 From: Jeffrey Guenther Date: Fri, 4 Apr 2025 23:44:29 -0700 Subject: [PATCH 006/114] doc: add zed installation instructions (#315) --- README.md | 1 + docs/index.md | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b16a13ca..162bb9c0 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ Install: [instructions](https://pgtools.dev/#installation) - [CLI releases](https://github.com/supabase-community/postgres-language-server/releases) - [VSCode](https://marketplace.visualstudio.com/items?itemName=Supabase.postgrestools) - [Neovim](https://github.com/neovim/nvim-lspconfig/blob/master/doc/configs.md#postgres_lsp) +- [Zed](https://github.com/LoamStudios/zed-postgres-language-server) ## Overview LSP Demo | CLI Demo diff --git a/docs/index.md b/docs/index.md index 0a89ca9c..9bb80102 100644 --- a/docs/index.md +++ b/docs/index.md @@ -58,12 +58,16 @@ npm add --save-dev --save-exact @postgrestools/postgrestools ### VSCode -The language server is available on the [VSCode Marketplace](https://marketplace.visualstudio.com/items?itemName=Supabase.postgrestools). Its published from [this repo](https://github.com/supabase-community/postgrestools-vscode). +The language server is available on the [VSCode Marketplace](https://marketplace.visualstudio.com/items?itemName=Supabase.postgrestools). It's published from [this repo](https://github.com/supabase-community/postgrestools-vscode). ### Neovim You will have to install `nvim-lspconfig`, and follow the [instructions](https://github.com/neovim/nvim-lspconfig/blob/master/doc/configs.md#postgres_lsp). +### Zed + +The language server is available as an Extension. It's published from [this repo](https://github.com/LoamStudios/zed-postgres-language-server). + ### GitHub Actions To use the CLI in GitHub Actions, you can install it via our [GitHub Action](https://github.com/supabase-community/postgrestools-cli-action). From 7278f9074ef4ec9842a3683f418427447d68a24f Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sun, 6 Apr 2025 18:24:48 +0200 Subject: [PATCH 007/114] fix: properly trim statement (#313) --- .../src/workspace/server/change.rs | 169 ++++++++++++++++-- 1 file changed, 153 insertions(+), 16 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index e31e4178..38769e67 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -151,6 +151,8 @@ impl Document { let mut start = change_range.start(); let mut end = change_range.end().min(content_size); + let is_trim = change_range.start() >= content_size; + let mut affected_indices = Vec::new(); let mut prev_index = None; let mut next_index = None; @@ -168,23 +170,20 @@ impl Document { } } - let start_incl = prev_index + let first_affected_stmt_start = prev_index .map(|i| self.positions[i].1.start()) .unwrap_or(start); - let end_incl = next_index + + let mut last_affected_stmt_end = next_index .map(|i| self.positions[i].1.end()) .unwrap_or_else(|| end); - let end_incl = if is_addition { - end_incl.add(diff_size) - } else { - end_incl.sub(diff_size) - }; - - let end = if is_addition { - end.add(diff_size) - } else { - end.sub(diff_size) + if is_addition { + end = end.add(diff_size); + last_affected_stmt_end = last_affected_stmt_end.add(diff_size); + } else if !is_trim { + end = end.sub(diff_size); + last_affected_stmt_end = last_affected_stmt_end.sub(diff_size) }; Affected { @@ -196,8 +195,10 @@ impl Document { prev_index, next_index, full_affected_range: TextRange::new( - start_incl, - end_incl.min(content_size).max(start_incl), + first_affected_stmt_start, + last_affected_stmt_end + .min(content_size) + .max(first_affected_stmt_start), ), } } @@ -232,6 +233,7 @@ impl Document { let mut changed: Vec = Vec::with_capacity(self.positions.len()); let change_range = change.range.unwrap(); + let previous_content = self.content.clone(); let new_content = change.apply_to_text(&self.content); // we first need to determine the affected range and all affected statements, as well as @@ -272,7 +274,7 @@ impl Document { let new_range = new_ranges[0].add(affected_range.start()); let (old_id, old_range) = self.positions[affected_idx]; - // move all statements after the afffected range + // move all statements after the affected range self.move_ranges(old_range.end(), change.diff_size(), change.is_addition()); let new_id = self.id_generator.next(); @@ -283,7 +285,7 @@ impl Document { id: old_id, path: self.path.clone(), }, - old_stmt_text: self.content[old_range].to_string(), + old_stmt_text: previous_content[old_range].to_string(), new_stmt: Statement { id: new_id, @@ -1325,4 +1327,139 @@ mod tests { assert_document_integrity(&d); } + + #[test] + fn remove_trailing_whitespace() { + let path = PgTPath::new("test.sql"); + + let mut doc = Document::new(path.clone(), "select * from ".to_string(), 0); + + let change = ChangeFileParams { + path: path.clone(), + version: 1, + changes: vec![ChangeParams { + text: "".to_string(), + range: Some(TextRange::new(13.into(), 14.into())), + }], + }; + + let changed = doc.apply_file_change(&change); + + assert_eq!(doc.content, "select * from"); + + assert_eq!(changed.len(), 1); + + match &changed[0] { + StatementChange::Modified(stmt) => { + let ModifiedStatement { + change_range, + change_text, + new_stmt_text, + old_stmt_text, + .. + } = stmt; + + assert_eq!(change_range, &TextRange::new(13.into(), 14.into())); + assert_eq!(change_text, ""); + assert_eq!(new_stmt_text, "select * from"); + + // the whitespace was not considered + // to be a part of the statement + assert_eq!(old_stmt_text, "select * from"); + } + + _ => assert!(false, "Did not yield a modified statement."), + } + + assert_document_integrity(&doc); + } + + #[test] + fn remove_trailing_whitespace_and_last_char() { + let path = PgTPath::new("test.sql"); + + let mut doc = Document::new(path.clone(), "select * from ".to_string(), 0); + + let change = ChangeFileParams { + path: path.clone(), + version: 1, + changes: vec![ChangeParams { + text: "".to_string(), + range: Some(TextRange::new(12.into(), 14.into())), + }], + }; + + let changed = doc.apply_file_change(&change); + + assert_eq!(doc.content, "select * fro"); + + assert_eq!(changed.len(), 1); + + match &changed[0] { + StatementChange::Modified(stmt) => { + let ModifiedStatement { + change_range, + change_text, + new_stmt_text, + old_stmt_text, + .. + } = stmt; + + assert_eq!(change_range, &TextRange::new(12.into(), 14.into())); + assert_eq!(change_text, ""); + assert_eq!(new_stmt_text, "select * fro"); + + // the whitespace was not considered + // to be a part of the statement + assert_eq!(old_stmt_text, "select * from"); + } + + _ => assert!(false, "Did not yield a modified statement."), + } + + assert_document_integrity(&doc); + } + + #[test] + fn remove_inbetween_whitespace() { + let path = PgTPath::new("test.sql"); + + let mut doc = Document::new(path.clone(), "select * from users".to_string(), 0); + + let change = ChangeFileParams { + path: path.clone(), + version: 1, + changes: vec![ChangeParams { + text: "".to_string(), + range: Some(TextRange::new(9.into(), 11.into())), + }], + }; + + let changed = doc.apply_file_change(&change); + + assert_eq!(doc.content, "select * from users"); + + assert_eq!(changed.len(), 1); + + match &changed[0] { + StatementChange::Modified(stmt) => { + let ModifiedStatement { + change_range, + change_text, + new_stmt_text, + old_stmt_text, + .. + } = stmt; + + assert_eq!(change_range, &TextRange::new(9.into(), 11.into())); + assert_eq!(change_text, ""); + assert_eq!(old_stmt_text, "select * from users"); + assert_eq!(new_stmt_text, "select * from users"); + } + + _ => assert!(false, "Did not yield a modified statement."), + } + + assert_document_integrity(&doc); + } } From 451579dff47a3129d43d410488fe0e824b866020 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sun, 6 Apr 2025 19:12:43 +0200 Subject: [PATCH 008/114] fix: downgrade tunners (#318) --- .github/workflows/publish.reusable.yml | 1 - .github/workflows/pull_request.yml | 16 +++++++++------- .github/workflows/release.yml | 16 ++++++++-------- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/.github/workflows/publish.reusable.yml b/.github/workflows/publish.reusable.yml index a06ff8af..31e625d7 100644 --- a/.github/workflows/publish.reusable.yml +++ b/.github/workflows/publish.reusable.yml @@ -16,7 +16,6 @@ jobs: runs-on: ubuntu-latest permissions: contents: write - # ? what's this?! required for executing the node script? id-token: write steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index bd7d6c49..e3db0782 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -102,10 +102,10 @@ jobs: version: latest - name: Run Lints run: | + cargo sqlx prepare --check --workspace cargo clippy cargo run -p rules_check biome lint - cargo sqlx prepare --check --workspace test: name: Test @@ -113,9 +113,9 @@ jobs: strategy: matrix: include: - # reactive once we upgrade to the latest version of pg_query that is windows-compatible - - os: windows-latest - - os: ubuntu-latest + # use the same images we use for compiling + - os: windows-2022 + - os: ubuntu-22.04 steps: - name: Checkout PR branch uses: actions/checkout@v4 @@ -138,8 +138,10 @@ jobs: run: cargo test --workspace test-js-bindings: - name: Test JS Bindings - runs-on: ubuntu-latest + name: + Test JS Bindings + # use the same image we use for compiling + runs-on: ubuntu-22.04 services: postgres: image: postgres:latest @@ -173,7 +175,7 @@ jobs: codegen: name: Check Codegen - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 services: postgres: image: postgres:latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 08b56ae4..f5d46604 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -37,12 +37,12 @@ jobs: strategy: matrix: config: - - { os: ubuntu-latest, target: x86_64-unknown-linux-gnu } - - { os: ubuntu-latest, target: aarch64-unknown-linux-gnu } - - { os: macos-latest, target: x86_64-apple-darwin } - - { os: macos-latest, target: aarch64-apple-darwin } - - { os: windows-latest, target: x86_64-pc-windows-msvc } - - { os: windows-latest, target: aarch64-pc-windows-msvc } + - { os: ubuntu-22.04, target: x86_64-unknown-linux-gnu } + - { os: ubuntu-22.04, target: aarch64-unknown-linux-gnu } + - { os: macos-14, target: x86_64-apple-darwin } + - { os: macos-14, target: aarch64-apple-darwin } + - { os: windows-2022, target: x86_64-pc-windows-msvc } + - { os: windows-2022, target: aarch64-pc-windows-msvc } runs-on: ${{ matrix.config.os }} @@ -87,12 +87,12 @@ jobs: # windows is a special snowflake too, it saves binaries as .exe - name: 👦 Name the Binary - if: matrix.config.os == 'windows-latest' + if: matrix.config.os == 'windows-2022' run: | mkdir dist cp target/${{ matrix.config.target }}/release/postgrestools.exe ./dist/postgrestools_${{ matrix.config.target }} - name: 👦 Name the Binary - if: matrix.config.os != 'windows-latest' + if: matrix.config.os != 'windows-2022' run: | mkdir dist cp target/${{ matrix.config.target }}/release/postgrestools ./dist/postgrestools_${{ matrix.config.target }} From 48671891a049feca21db06733b015d8726356e15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 11 Apr 2025 23:00:56 +0200 Subject: [PATCH 009/114] refactor: parser (#322) --- crates/pgt_completions/src/complete.rs | 2 +- crates/pgt_completions/src/context.rs | 27 +- crates/pgt_completions/src/test_helper.rs | 2 +- crates/pgt_lsp/src/handlers/code_actions.rs | 11 +- crates/pgt_typecheck/src/diagnostics.rs | 20 +- crates/pgt_typecheck/src/lib.rs | 2 +- crates/pgt_typecheck/tests/diagnostics.rs | 4 +- crates/pgt_workspace/src/workspace.rs | 2 +- crates/pgt_workspace/src/workspace/server.rs | 349 +++++++--------- .../src/workspace/server/change.rs | 168 +++----- .../src/workspace/server/document.rs | 122 ++---- .../src/workspace/server/parsed_document.rs | 374 ++++++++++++++++++ .../src/workspace/server/pg_query.rs | 50 +-- .../src/workspace/server/sql_function.rs | 111 ++++++ .../workspace/server/statement_identifier.rs | 90 +++++ .../src/workspace/server/tree_sitter.rs | 50 ++- 16 files changed, 892 insertions(+), 492 deletions(-) create mode 100644 crates/pgt_workspace/src/workspace/server/parsed_document.rs create mode 100644 crates/pgt_workspace/src/workspace/server/sql_function.rs create mode 100644 crates/pgt_workspace/src/workspace/server/statement_identifier.rs diff --git a/crates/pgt_completions/src/complete.rs b/crates/pgt_completions/src/complete.rs index fb00aeaf..ed51c653 100644 --- a/crates/pgt_completions/src/complete.rs +++ b/crates/pgt_completions/src/complete.rs @@ -14,7 +14,7 @@ pub struct CompletionParams<'a> { pub position: TextSize, pub schema: &'a pgt_schema_cache::SchemaCache, pub text: String, - pub tree: Option<&'a tree_sitter::Tree>, + pub tree: &'a tree_sitter::Tree, } pub fn complete(params: CompletionParams) -> Vec { diff --git a/crates/pgt_completions/src/context.rs b/crates/pgt_completions/src/context.rs index 8b12742d..775b8870 100644 --- a/crates/pgt_completions/src/context.rs +++ b/crates/pgt_completions/src/context.rs @@ -50,7 +50,7 @@ impl TryFrom for ClauseType { pub(crate) struct CompletionContext<'a> { pub ts_node: Option>, - pub tree: Option<&'a tree_sitter::Tree>, + pub tree: &'a tree_sitter::Tree, pub text: &'a str, pub schema_cache: &'a SchemaCache, pub position: usize, @@ -85,10 +85,7 @@ impl<'a> CompletionContext<'a> { } fn gather_info_from_ts_queries(&mut self) { - let tree = match self.tree.as_ref() { - None => return, - Some(t) => t, - }; + let tree = self.tree; let stmt_range = self.wrapping_statement_range.as_ref(); let sql = self.text; @@ -126,11 +123,7 @@ impl<'a> CompletionContext<'a> { } fn gather_tree_context(&mut self) { - if self.tree.is_none() { - return; - } - - let mut cursor = self.tree.as_ref().unwrap().root_node().walk(); + let mut cursor = self.tree.root_node().walk(); /* * The head node of any treesitter tree is always the "PROGRAM" node. @@ -262,7 +255,7 @@ mod tests { let params = crate::CompletionParams { position: (position as u32).into(), text, - tree: Some(&tree), + tree: &tree, schema: &pgt_schema_cache::SchemaCache::default(), }; @@ -294,7 +287,7 @@ mod tests { let params = crate::CompletionParams { position: (position as u32).into(), text, - tree: Some(&tree), + tree: &tree, schema: &pgt_schema_cache::SchemaCache::default(), }; @@ -328,7 +321,7 @@ mod tests { let params = crate::CompletionParams { position: (position as u32).into(), text, - tree: Some(&tree), + tree: &tree, schema: &pgt_schema_cache::SchemaCache::default(), }; @@ -353,7 +346,7 @@ mod tests { let params = crate::CompletionParams { position: (position as u32).into(), text, - tree: Some(&tree), + tree: &tree, schema: &pgt_schema_cache::SchemaCache::default(), }; @@ -381,7 +374,7 @@ mod tests { let params = crate::CompletionParams { position: (position as u32).into(), text, - tree: Some(&tree), + tree: &tree, schema: &pgt_schema_cache::SchemaCache::default(), }; @@ -407,7 +400,7 @@ mod tests { let params = crate::CompletionParams { position: (position as u32).into(), text, - tree: Some(&tree), + tree: &tree, schema: &pgt_schema_cache::SchemaCache::default(), }; @@ -432,7 +425,7 @@ mod tests { let params = crate::CompletionParams { position: (position as u32).into(), text, - tree: Some(&tree), + tree: &tree, schema: &pgt_schema_cache::SchemaCache::default(), }; diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index a54aacbd..58e9baf7 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -70,7 +70,7 @@ pub(crate) fn get_test_params<'a>( CompletionParams { position: (position as u32).into(), schema: schema_cache, - tree: Some(tree), + tree, text, } } diff --git a/crates/pgt_lsp/src/handlers/code_actions.rs b/crates/pgt_lsp/src/handlers/code_actions.rs index 0d124cfc..a10bee03 100644 --- a/crates/pgt_lsp/src/handlers/code_actions.rs +++ b/crates/pgt_lsp/src/handlers/code_actions.rs @@ -43,7 +43,7 @@ pub fn get_actions( title: title.clone(), command: command_id, arguments: Some(vec![ - serde_json::Value::Number(stmt_id.into()), + serde_json::to_value(&stmt_id).unwrap(), serde_json::to_value(&url).unwrap(), ]), } @@ -81,17 +81,16 @@ pub async fn execute_command( match command.as_str() { "pgt.executeStatement" => { - let id: usize = serde_json::from_value(params.arguments[0].clone())?; + let statement_id = serde_json::from_value::( + params.arguments[0].clone(), + )?; let doc_url: lsp_types::Url = serde_json::from_value(params.arguments[1].clone())?; let path = session.file_path(&doc_url)?; let result = session .workspace - .execute_statement(ExecuteStatementParams { - statement_id: id, - path, - })?; + .execute_statement(ExecuteStatementParams { statement_id, path })?; /* * Updating all diagnostics: the changes caused by the statement execution diff --git a/crates/pgt_typecheck/src/diagnostics.rs b/crates/pgt_typecheck/src/diagnostics.rs index b443dcc9..8fd92da2 100644 --- a/crates/pgt_typecheck/src/diagnostics.rs +++ b/crates/pgt_typecheck/src/diagnostics.rs @@ -96,7 +96,7 @@ impl Advices for TypecheckAdvices { pub(crate) fn create_type_error( pg_err: &PgDatabaseError, - ts: Option<&tree_sitter::Tree>, + ts: &tree_sitter::Tree, ) -> TypecheckDiagnostic { let position = pg_err.position().and_then(|pos| match pos { sqlx::postgres::PgErrorPosition::Original(pos) => Some(pos - 1), @@ -104,16 +104,14 @@ pub(crate) fn create_type_error( }); let range = position.and_then(|pos| { - ts.and_then(|tree| { - tree.root_node() - .named_descendant_for_byte_range(pos, pos) - .map(|node| { - TextRange::new( - node.start_byte().try_into().unwrap(), - node.end_byte().try_into().unwrap(), - ) - }) - }) + ts.root_node() + .named_descendant_for_byte_range(pos, pos) + .map(|node| { + TextRange::new( + node.start_byte().try_into().unwrap(), + node.end_byte().try_into().unwrap(), + ) + }) }); let severity = match pg_err.severity() { diff --git a/crates/pgt_typecheck/src/lib.rs b/crates/pgt_typecheck/src/lib.rs index 4554689c..9311bb8e 100644 --- a/crates/pgt_typecheck/src/lib.rs +++ b/crates/pgt_typecheck/src/lib.rs @@ -13,7 +13,7 @@ pub struct TypecheckParams<'a> { pub conn: &'a PgPool, pub sql: &'a str, pub ast: &'a pgt_query_ext::NodeEnum, - pub tree: Option<&'a tree_sitter::Tree>, + pub tree: &'a tree_sitter::Tree, } #[derive(Debug, Clone)] diff --git a/crates/pgt_typecheck/tests/diagnostics.rs b/crates/pgt_typecheck/tests/diagnostics.rs index d0e53b15..46daa8a1 100644 --- a/crates/pgt_typecheck/tests/diagnostics.rs +++ b/crates/pgt_typecheck/tests/diagnostics.rs @@ -21,14 +21,14 @@ async fn test(name: &str, query: &str, setup: &str) { .expect("Error loading sql language"); let root = pgt_query_ext::parse(query).unwrap(); - let tree = parser.parse(query, None); + let tree = parser.parse(query, None).unwrap(); let conn = &test_db; let result = check_sql(TypecheckParams { conn, sql: query, ast: &root, - tree: tree.as_ref(), + tree: &tree, }) .await; diff --git a/crates/pgt_workspace/src/workspace.rs b/crates/pgt_workspace/src/workspace.rs index 4a503d5d..681ab95f 100644 --- a/crates/pgt_workspace/src/workspace.rs +++ b/crates/pgt_workspace/src/workspace.rs @@ -21,7 +21,7 @@ use crate::{ mod client; mod server; -pub(crate) use server::StatementId; +pub use server::StatementId; #[derive(Debug, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 8dcbfb1d..27f5e8be 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -2,22 +2,24 @@ use std::{fs, panic::RefUnwindSafe, path::Path, sync::RwLock}; use analyser::AnalyserVisitorBuilder; use async_helper::run_async; -use change::StatementChange; use dashmap::DashMap; use db_connection::DbConnection; -pub(crate) use document::StatementId; -use document::{Document, Statement}; +use document::Document; use futures::{StreamExt, stream}; -use pg_query::PgQueryStore; +use parsed_document::{ + AsyncDiagnosticsMapper, CursorPositionFilter, DefaultMapper, ExecuteStatementMapper, + GetCompletionsMapper, ParsedDocument, SyncDiagnosticsMapper, +}; use pgt_analyse::{AnalyserOptions, AnalysisFilter}; use pgt_analyser::{Analyser, AnalyserConfig, AnalyserContext}; -use pgt_diagnostics::{Diagnostic, DiagnosticExt, Severity, serde::Diagnostic as SDiagnostic}; +use pgt_diagnostics::{ + Diagnostic, DiagnosticExt, Error, Severity, serde::Diagnostic as SDiagnostic, +}; use pgt_fs::{ConfigName, PgTPath}; use pgt_typecheck::TypecheckParams; use schema_cache_manager::SchemaCacheManager; use sqlx::Executor; use tracing::info; -use tree_sitter::TreeSitterStore; use crate::{ WorkspaceError, @@ -38,14 +40,19 @@ use super::{ Workspace, }; +pub use statement_identifier::StatementId; + mod analyser; mod async_helper; mod change; mod db_connection; mod document; mod migration; +mod parsed_document; mod pg_query; mod schema_cache_manager; +mod sql_function; +mod statement_identifier; mod tree_sitter; pub(super) struct WorkspaceServer { @@ -55,11 +62,7 @@ pub(super) struct WorkspaceServer { /// Stores the schema cache for this workspace schema_cache: SchemaCacheManager, - /// Stores the document (text content + version number) associated with a URL - documents: DashMap, - - tree_sitter: TreeSitterStore, - pg_query: PgQueryStore, + parsed_documents: DashMap, connection: RwLock, } @@ -81,9 +84,7 @@ impl WorkspaceServer { pub(crate) fn new() -> Self { Self { settings: RwLock::default(), - documents: DashMap::default(), - tree_sitter: TreeSitterStore::new(), - pg_query: PgQueryStore::new(), + parsed_documents: DashMap::default(), schema_cache: SchemaCacheManager::default(), connection: RwLock::default(), } @@ -181,30 +182,21 @@ impl Workspace for WorkspaceServer { /// Add a new file to the workspace #[tracing::instrument(level = "info", skip_all, fields(path = params.path.as_path().as_os_str().to_str()), err)] fn open_file(&self, params: OpenFileParams) -> Result<(), WorkspaceError> { - let doc = Document::new(params.path.clone(), params.content, params.version); - - doc.iter_statements_with_text().for_each(|(stmt, content)| { - self.tree_sitter.add_statement(&stmt, content); - self.pg_query.add_statement(&stmt, content); - }); - - self.documents.insert(params.path, doc); + self.parsed_documents + .entry(params.path.clone()) + .or_insert_with(|| { + ParsedDocument::new(params.path.clone(), params.content, params.version) + }); Ok(()) } /// Remove a file from the workspace fn close_file(&self, params: super::CloseFileParams) -> Result<(), WorkspaceError> { - let (_, doc) = self - .documents + self.parsed_documents .remove(¶ms.path) .ok_or_else(WorkspaceError::not_found)?; - for stmt in doc.iter_statements() { - self.tree_sitter.remove_statement(&stmt); - self.pg_query.remove_statement(&stmt); - } - Ok(()) } @@ -214,53 +206,16 @@ impl Workspace for WorkspaceServer { version = params.version ), err)] fn change_file(&self, params: super::ChangeFileParams) -> Result<(), WorkspaceError> { - let mut doc = self - .documents - .entry(params.path.clone()) - .or_insert(Document::new( - params.path.clone(), - "".to_string(), - params.version, - )); - - for c in &doc.apply_file_change(¶ms) { - match c { - StatementChange::Added(added) => { - tracing::debug!( - "Adding statement: id:{:?}, path:{:?}, text:{:?}", - added.stmt.id, - added.stmt.path.as_os_str().to_str(), - added.text - ); - self.tree_sitter.add_statement(&added.stmt, &added.text); - self.pg_query.add_statement(&added.stmt, &added.text); - } - StatementChange::Deleted(s) => { - tracing::debug!( - "Deleting statement: id:{:?}, path:{:?}", - s.id, - s.path.as_os_str() - ); - self.tree_sitter.remove_statement(s); - self.pg_query.remove_statement(s); - } - StatementChange::Modified(s) => { - tracing::debug!( - "Modifying statement with id {:?} (new id {:?}) in {:?}. Range {:?}, Changed from '{:?}' to '{:?}', changed text: {:?}", - s.old_stmt.id, - s.new_stmt.id, - s.old_stmt.path.as_os_str().to_str(), - s.change_range, - s.old_stmt_text, - s.new_stmt_text, - s.change_text - ); + let mut parser = + self.parsed_documents + .entry(params.path.clone()) + .or_insert(ParsedDocument::new( + params.path.clone(), + "".to_string(), + params.version, + )); - self.tree_sitter.modify_statement(s); - self.pg_query.modify_statement(s); - } - } - } + parser.apply_change(params); Ok(()) } @@ -271,10 +226,10 @@ impl Workspace for WorkspaceServer { fn get_file_content(&self, params: GetFileContentParams) -> Result { let document = self - .documents + .parsed_documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; - Ok(document.content.clone()) + Ok(document.get_document_content().to_string()) } fn is_path_ignored(&self, params: IsPathIgnoredParams) -> Result { @@ -285,17 +240,11 @@ impl Workspace for WorkspaceServer { &self, params: code_actions::CodeActionsParams, ) -> Result { - let doc = self - .documents + let parser = self + .parsed_documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; - let eligible_statements = doc - .iter_statements_with_text_and_range() - .filter(|(_, range, _)| range.contains(params.cursor_position)); - - let mut actions: Vec = vec![]; - let settings = self .settings .read() @@ -307,20 +256,26 @@ impl Workspace for WorkspaceServer { Some("Statement execution not allowed against database.".into()) }; - for (stmt, _, txt) in eligible_statements { - let title = format!( - "Execute Statement: {}...", - txt.chars().take(50).collect::() - ); - - actions.push(CodeAction { - title, - kind: CodeActionKind::Command(CommandAction { - category: CommandActionCategory::ExecuteStatement(stmt.id), - }), - disabled_reason: disabled_reason.clone(), - }); - } + let actions = parser + .iter_with_filter( + DefaultMapper, + CursorPositionFilter::new(params.cursor_position), + ) + .map(|(stmt, _, txt)| { + let title = format!( + "Execute Statement: {}...", + txt.chars().take(50).collect::() + ); + + CodeAction { + title, + kind: CodeActionKind::Command(CommandAction { + category: CommandActionCategory::ExecuteStatement(stmt), + }), + disabled_reason: disabled_reason.clone(), + } + }) + .collect(); Ok(CodeActionsResult { actions }) } @@ -329,31 +284,25 @@ impl Workspace for WorkspaceServer { &self, params: ExecuteStatementParams, ) -> Result { - let doc = self - .documents + let parser = self + .parsed_documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; - if self - .pg_query - .get_ast(&Statement { - path: params.path, - id: params.statement_id, - }) - .is_none() - { + let stmt = parser.find(params.statement_id, ExecuteStatementMapper); + + if stmt.is_none() { return Ok(ExecuteStatementResult { - message: "Statement is invalid.".into(), + message: "Statement was not found in document.".into(), }); }; - let sql: String = match doc.get_txt(params.statement_id) { - Some(txt) => txt, - None => { - return Ok(ExecuteStatementResult { - message: "Statement was not found in document.".into(), - }); - } + let (_id, _range, content, ast) = stmt.unwrap(); + + if ast.is_none() { + return Ok(ExecuteStatementResult { + message: "Statement is invalid.".into(), + }); }; let conn = self.connection.read().unwrap(); @@ -366,7 +315,7 @@ impl Workspace for WorkspaceServer { } }; - let result = run_async(async move { pool.execute(sqlx::query(&sql)).await })??; + let result = run_async(async move { pool.execute(sqlx::query(&content)).await })??; Ok(ExecuteStatementResult { message: format!( @@ -380,13 +329,6 @@ impl Workspace for WorkspaceServer { &self, params: PullDiagnosticsParams, ) -> Result { - // get all statements form the requested document and pull diagnostics out of every - // source - let doc = self - .documents - .get(¶ms.path) - .ok_or(WorkspaceError::not_found())?; - let settings = self.settings(); // create analyser for this run @@ -410,7 +352,14 @@ impl Workspace for WorkspaceServer { filter, }); - let mut diagnostics: Vec = doc.diagnostics().to_vec(); + let parser = self + .parsed_documents + .get(¶ms.path) + .ok_or(WorkspaceError::not_found())?; + + let mut diagnostics: Vec = parser.document_diagnostics().to_vec(); + + // TODO: run this in parallel with rayon based on rayon.count() if let Some(pool) = self .connection @@ -418,29 +367,20 @@ impl Workspace for WorkspaceServer { .expect("DbConnection RwLock panicked") .get_pool() { - let typecheck_params: Vec<_> = doc - .iter_statements_with_text_and_range() - .map(|(stmt, range, text)| { - let ast = self.pg_query.get_ast(&stmt); - let tree = self.tree_sitter.get_parse_tree(&stmt); - (text.to_string(), ast, tree, *range) - }) - .collect(); - - // run diagnostics for each statement in parallel if its mostly i/o work let path_clone = params.path.clone(); + let input = parser.iter(AsyncDiagnosticsMapper).collect::>(); let async_results = run_async(async move { - stream::iter(typecheck_params) - .map(|(text, ast, tree, range)| { + stream::iter(input) + .map(|(_id, range, content, ast, cst)| { let pool = pool.clone(); let path = path_clone.clone(); async move { if let Some(ast) = ast { pgt_typecheck::check_sql(TypecheckParams { conn: &pool, - sql: &text, + sql: &content, ast: &ast, - tree: tree.as_deref(), + tree: &cst, }) .await .map(|d| { @@ -464,45 +404,49 @@ impl Workspace for WorkspaceServer { } } - diagnostics.extend(doc.iter_statements_with_range().flat_map(|(stmt, r)| { - let mut stmt_diagnostics = self.pg_query.get_diagnostics(&stmt); + diagnostics.extend(parser.iter(SyncDiagnosticsMapper).flat_map( + |(_id, range, ast, diag)| { + let mut errors: Vec = vec![]; - let ast = self.pg_query.get_ast(&stmt); + if let Some(diag) = diag { + errors.push(diag.into()); + } - if let Some(ast) = ast { - stmt_diagnostics.extend( - analyser - .run(AnalyserContext { root: &ast }) - .into_iter() - .map(SDiagnostic::new) - .collect::>(), - ); - } + if let Some(ast) = ast { + errors.extend( + analyser + .run(AnalyserContext { root: &ast }) + .into_iter() + .map(Error::from) + .collect::>(), + ); + } - stmt_diagnostics - .into_iter() - .map(|d| { - let severity = d - .category() - .filter(|category| category.name().starts_with("lint/")) - .map_or_else( - || d.severity(), - |category| { - settings - .as_ref() - .get_severity_from_rule_code(category) - .unwrap_or(Severity::Warning) - }, - ); - - SDiagnostic::new( - d.with_file_path(params.path.as_path().display().to_string()) - .with_file_span(r) - .with_severity(severity), - ) - }) - .collect::>() - })); + errors + .into_iter() + .map(|d| { + let severity = d + .category() + .filter(|category| category.name().starts_with("lint/")) + .map_or_else( + || d.severity(), + |category| { + settings + .as_ref() + .get_severity_from_rule_code(category) + .unwrap_or(Severity::Warning) + }, + ); + + SDiagnostic::new( + d.with_file_path(params.path.as_path().display().to_string()) + .with_file_span(range) + .with_severity(severity), + ) + }) + .collect::>() + }, + )); let errors = diagnostics .iter() @@ -525,46 +469,35 @@ impl Workspace for WorkspaceServer { &self, params: GetCompletionsParams, ) -> Result { - let pool = match self.connection.read().unwrap().get_pool() { - Some(pool) => pool, - None => return Ok(CompletionsResult::default()), - }; - - let doc = self - .documents + let parser = self + .parsed_documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; - let (statement, stmt_range, text) = match doc - .iter_statements_with_text_and_range() - .find(|(_, r, _)| r.contains(params.position)) - { - Some(s) => s, + let pool = match self.connection.read().unwrap().get_pool() { + Some(pool) => pool, None => return Ok(CompletionsResult::default()), }; - // `offset` is the position in the document, - // but we need the position within the *statement*. - let position = params.position - stmt_range.start(); - - let tree = self.tree_sitter.get_parse_tree(&statement); - - tracing::debug!( - "Found the statement. We're looking for position {:?}. Statement Range {:?} to {:?}. Statement: {:?}", - position, - stmt_range.start(), - stmt_range.end(), - text - ); - let schema_cache = self.schema_cache.load(pool)?; - let items = pgt_completions::complete(pgt_completions::CompletionParams { - position, - schema: schema_cache.as_ref(), - tree: tree.as_deref(), - text: text.to_string(), - }); + let items = parser + .iter_with_filter( + GetCompletionsMapper, + CursorPositionFilter::new(params.position), + ) + .flat_map(|(_id, range, content, cst)| { + // `offset` is the position in the document, + // but we need the position within the *statement*. + let position = params.position - range.start(); + pgt_completions::complete(pgt_completions::CompletionParams { + position, + schema: schema_cache.as_ref(), + tree: &cst, + text: content, + }) + }) + .collect(); Ok(CompletionsResult { items }) } diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index 38769e67..afe0eb64 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -3,27 +3,27 @@ use std::ops::{Add, Sub}; use crate::workspace::{ChangeFileParams, ChangeParams}; -use super::{Document, Statement, document}; +use super::{Document, document, statement_identifier::StatementId}; #[derive(Debug, PartialEq, Eq)] pub enum StatementChange { Added(AddedStatement), - Deleted(Statement), + Deleted(StatementId), Modified(ModifiedStatement), } #[derive(Debug, PartialEq, Eq)] pub struct AddedStatement { - pub stmt: Statement, + pub stmt: StatementId, pub text: String, } #[derive(Debug, PartialEq, Eq)] pub struct ModifiedStatement { - pub old_stmt: Statement, + pub old_stmt: StatementId, pub old_stmt_text: String, - pub new_stmt: Statement, + pub new_stmt: StatementId, pub new_stmt_text: String, pub change_range: TextRange, @@ -32,7 +32,7 @@ pub struct ModifiedStatement { impl StatementChange { #[allow(dead_code)] - pub fn statement(&self) -> &Statement { + pub fn statement(&self) -> &StatementId { match self { StatementChange::Added(stmt) => &stmt.stmt, StatementChange::Deleted(stmt) => stmt, @@ -78,12 +78,7 @@ impl Document { fn drain_positions(&mut self) -> Vec { self.positions .drain(..) - .map(|(id, _)| { - StatementChange::Deleted(Statement { - id, - path: self.path.clone(), - }) - }) + .map(|(id, _)| StatementChange::Deleted(id)) .collect() } @@ -109,28 +104,22 @@ impl Document { changes.extend(ranges.into_iter().map(|range| { let id = self.id_generator.next(); let text = self.content[range].to_string(); - self.positions.push((id, range)); + self.positions.push((id.clone(), range)); - StatementChange::Added(AddedStatement { - stmt: Statement { - path: self.path.clone(), - id, - }, - text, - }) + StatementChange::Added(AddedStatement { stmt: id, text }) })); changes } - fn insert_statement(&mut self, range: TextRange) -> usize { + fn insert_statement(&mut self, range: TextRange) -> StatementId { let pos = self .positions .binary_search_by(|(_, r)| r.start().cmp(&range.start())) .unwrap_err(); let new_id = self.id_generator.next(); - self.positions.insert(pos, (new_id, range)); + self.positions.insert(pos, (new_id.clone(), range)); new_id } @@ -272,25 +261,19 @@ impl Document { if new_ranges.len() == 1 { let affected_idx = affected_indices[0]; let new_range = new_ranges[0].add(affected_range.start()); - let (old_id, old_range) = self.positions[affected_idx]; + let (old_id, old_range) = self.positions[affected_idx].clone(); // move all statements after the affected range self.move_ranges(old_range.end(), change.diff_size(), change.is_addition()); let new_id = self.id_generator.next(); - self.positions[affected_idx] = (new_id, new_range); + self.positions[affected_idx] = (new_id.clone(), new_range); changed.push(StatementChange::Modified(ModifiedStatement { - old_stmt: Statement { - id: old_id, - path: self.path.clone(), - }, + old_stmt: old_id.clone(), old_stmt_text: previous_content[old_range].to_string(), - new_stmt: Statement { - id: new_id, - path: self.path.clone(), - }, + new_stmt: new_id, new_stmt_text: changed_content[new_ranges[0]].to_string(), // change must be relative to the statement change_text: change.text.clone(), @@ -324,24 +307,19 @@ impl Document { // delete and add new ones if let Some(next_index) = next_index { - changed.push(StatementChange::Deleted(Statement { - id: self.positions[next_index].0, - path: self.path.clone(), - })); + changed.push(StatementChange::Deleted( + self.positions[next_index].0.clone(), + )); self.positions.remove(next_index); } for idx in affected_indices.iter().rev() { - changed.push(StatementChange::Deleted(Statement { - id: self.positions[*idx].0, - path: self.path.clone(), - })); + changed.push(StatementChange::Deleted(self.positions[*idx].0.clone())); self.positions.remove(*idx); } if let Some(prev_index) = prev_index { - changed.push(StatementChange::Deleted(Statement { - id: self.positions[prev_index].0, - path: self.path.clone(), - })); + changed.push(StatementChange::Deleted( + self.positions[prev_index].0.clone(), + )); self.positions.remove(prev_index); } @@ -349,10 +327,7 @@ impl Document { let actual_range = range.add(full_affected_range.start()); let new_id = self.insert_statement(actual_range); changed.push(StatementChange::Added(AddedStatement { - stmt: Statement { - id: new_id, - path: self.path.clone(), - }, + stmt: new_id, text: new_content[actual_range].to_string(), })); }); @@ -429,11 +404,12 @@ fn get_affected(content: &str, range: TextRange) -> &str { #[cfg(test)] mod tests { + use super::*; use pgt_diagnostics::Diagnostic; use pgt_text_size::TextRange; - use crate::workspace::{ChangeFileParams, ChangeParams}; + use crate::workspace::{ChangeFileParams, ChangeParams, server::statement_identifier::root_id}; use pgt_fs::PgTPath; @@ -466,7 +442,7 @@ mod tests { fn open_doc_with_scan_error() { let input = "select id from users;\n\n\n\nselect 1443ddwwd33djwdkjw13331333333333;"; - let d = Document::new(PgTPath::new("test.sql"), input.to_string(), 0); + let d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 0); assert!(d.has_fatal_error()); @@ -477,7 +453,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select id from users;\n\n\n\nselect 1;"; - let mut d = Document::new(PgTPath::new("test.sql"), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 2); assert!(!d.has_fatal_error()); @@ -515,7 +491,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select id from users;\n\n\n\nselect 1;"; - let mut d = Document::new(PgTPath::new("test.sql"), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 2); assert!(!d.has_fatal_error()); @@ -553,7 +529,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select 1d;"; - let mut d = Document::new(PgTPath::new("test.sql"), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 0); assert!(d.has_fatal_error()); @@ -587,7 +563,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select 1d;"; - let mut d = Document::new(PgTPath::new("test.sql"), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 0); assert!(d.has_fatal_error()); @@ -620,7 +596,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select id from users;\n\n\n\nselect * from contacts;"; - let mut d = Document::new(PgTPath::new("test.sql"), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 2); @@ -658,7 +634,7 @@ mod tests { fn within_statements_2() { let path = PgTPath::new("test.sql"); let input = "alter table deal alter column value drop not null;\n"; - let mut d = Document::new(path.clone(), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 1); @@ -735,7 +711,7 @@ mod tests { fn julians_sample() { let path = PgTPath::new("test.sql"); let input = "select\n *\nfrom\n test;\n\nselect\n\nalter table test\n\ndrop column id;"; - let mut d = Document::new(path.clone(), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 4); @@ -817,7 +793,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select id from users;\nselect * from contacts;"; - let mut d = Document::new(PgTPath::new("test.sql"), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 2); @@ -833,14 +809,13 @@ mod tests { let changed = d.apply_file_change(&change); assert_eq!(changed.len(), 4); - assert!(matches!( - changed[0], - StatementChange::Deleted(Statement { id: 1, .. }) - )); + assert!(matches!(changed[0], StatementChange::Deleted(_))); + assert_eq!(changed[0].statement().raw(), 1); assert!(matches!( changed[1], - StatementChange::Deleted(Statement { id: 0, .. }) + StatementChange::Deleted(StatementId::Root(_)) )); + assert_eq!(changed[1].statement().raw(), 0); assert!( matches!(&changed[2], StatementChange::Added(AddedStatement { stmt: _, text }) if text == "select id,test from users;") ); @@ -856,7 +831,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select id"; - let mut d = Document::new(PgTPath::new("test.sql"), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 1); @@ -881,7 +856,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select id from users;\nselect * from contacts;"; - let mut d = Document::new(PgTPath::new("test.sql"), input.to_string(), 0); + let mut d = Document::new(input.to_string(), 0); assert_eq!(d.positions.len(), 2); @@ -898,37 +873,27 @@ mod tests { assert_eq!(changed.len(), 4); - assert_eq!( + assert!(matches!( changed[0], - StatementChange::Deleted(Statement { - path: path.clone(), - id: 1 - }) - ); - assert_eq!( + StatementChange::Deleted(StatementId::Root(_)) + )); + assert_eq!(changed[0].statement().raw(), 1); + assert!(matches!( changed[1], - StatementChange::Deleted(Statement { - path: path.clone(), - id: 0 - }) - ); + StatementChange::Deleted(StatementId::Root(_)) + )); + assert_eq!(changed[1].statement().raw(), 0); assert_eq!( changed[2], StatementChange::Added(AddedStatement { - stmt: Statement { - path: path.clone(), - id: 2 - }, + stmt: StatementId::Root(root_id(2)), text: "select id,test from users".to_string() }) ); assert_eq!( changed[3], StatementChange::Added(AddedStatement { - stmt: Statement { - path: path.clone(), - id: 3 - }, + stmt: StatementId::Root(root_id(3)), text: "select 1;".to_string() }) ); @@ -943,7 +908,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "\n"; - let mut d = Document::new(path.clone(), input.to_string(), 1); + let mut d = Document::new(input.to_string(), 1); assert_eq!(d.positions.len(), 0); @@ -983,7 +948,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select id from\nselect * from contacts;"; - let mut d = Document::new(path.clone(), input.to_string(), 1); + let mut d = Document::new(input.to_string(), 1); assert_eq!(d.positions.len(), 2); @@ -1014,7 +979,7 @@ mod tests { fn apply_changes_replacement() { let path = PgTPath::new("test.sql"); - let mut doc = Document::new(path.clone(), "".to_string(), 0); + let mut doc = Document::new("".to_string(), 0); let change = ChangeFileParams { path: path.clone(), @@ -1136,7 +1101,6 @@ mod tests { let path = PgTPath::new("test.sql"); let mut doc = Document::new( - path.clone(), "-- Add new schema named \"private\"\nCREATE SCHEMA \"private\";".to_string(), 0, ); @@ -1156,12 +1120,8 @@ mod tests { doc.content, "- Add new schema named \"private\"\nCREATE SCHEMA \"private\";" ); - assert_eq!(changed.len(), 3); - assert!(matches!( - changed[0], - StatementChange::Deleted(Statement { id: 0, .. }) - )); + assert!(matches!(&changed[0], StatementChange::Deleted(_))); assert!(matches!( changed[1], StatementChange::Added(AddedStatement { .. }) @@ -1190,11 +1150,11 @@ mod tests { assert_eq!(changed_2.len(), 3); assert!(matches!( changed_2[0], - StatementChange::Deleted(Statement { .. }) + StatementChange::Deleted(StatementId::Root(_)) )); assert!(matches!( changed_2[1], - StatementChange::Deleted(Statement { .. }) + StatementChange::Deleted(StatementId::Root(_)) )); assert!(matches!( changed_2[2], @@ -1209,12 +1169,12 @@ mod tests { let input = "select id from users;\nselect * from contacts;"; let path = PgTPath::new("test.sql"); - let mut doc = Document::new(path.clone(), input.to_string(), 0); + let mut doc = Document::new(input.to_string(), 0); assert_eq!(doc.positions.len(), 2); - let stmt_1_range = doc.positions[0]; - let stmt_2_range = doc.positions[1]; + let stmt_1_range = doc.positions[0].clone(); + let stmt_2_range = doc.positions[1].clone(); let update_text = ",test"; @@ -1261,7 +1221,7 @@ mod tests { let path = PgTPath::new("test.sql"); let input = "select id from contacts;\n\nselect * from contacts;"; - let mut d = Document::new(path.clone(), input.to_string(), 1); + let mut d = Document::new(input.to_string(), 1); assert_eq!(d.positions.len(), 2); @@ -1310,7 +1270,7 @@ mod tests { assert!(matches!( changes[0], - StatementChange::Deleted(Statement { .. }) + StatementChange::Deleted(StatementId::Root(_)) )); assert!(matches!( @@ -1332,7 +1292,7 @@ mod tests { fn remove_trailing_whitespace() { let path = PgTPath::new("test.sql"); - let mut doc = Document::new(path.clone(), "select * from ".to_string(), 0); + let mut doc = Document::new("select * from ".to_string(), 0); let change = ChangeFileParams { path: path.clone(), @@ -1378,7 +1338,7 @@ mod tests { fn remove_trailing_whitespace_and_last_char() { let path = PgTPath::new("test.sql"); - let mut doc = Document::new(path.clone(), "select * from ".to_string(), 0); + let mut doc = Document::new("select * from ".to_string(), 0); let change = ChangeFileParams { path: path.clone(), @@ -1424,7 +1384,7 @@ mod tests { fn remove_inbetween_whitespace() { let path = PgTPath::new("test.sql"); - let mut doc = Document::new(path.clone(), "select * from users".to_string(), 0); + let mut doc = Document::new("select * from users".to_string(), 0); let change = ChangeFileParams { path: path.clone(), diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index 9ef8c234..f2c500cc 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -1,22 +1,11 @@ use pgt_diagnostics::{Diagnostic, DiagnosticExt, Severity, serde::Diagnostic as SDiagnostic}; -use pgt_fs::PgTPath; use pgt_text_size::{TextRange, TextSize}; -/// Global unique identifier for a statement -#[derive(Debug, Hash, Eq, PartialEq, Clone)] -pub(crate) struct Statement { - /// Path of the document - pub(crate) path: PgTPath, - /// Unique id within the document - pub(crate) id: StatementId, -} - -pub(crate) type StatementId = usize; +use super::statement_identifier::{StatementId, StatementIdGenerator}; type StatementPos = (StatementId, TextRange); pub(crate) struct Document { - pub(crate) path: PgTPath, pub(crate) content: String, pub(crate) version: i32, @@ -24,17 +13,16 @@ pub(crate) struct Document { /// List of statements sorted by range.start() pub(super) positions: Vec, - pub(super) id_generator: IdGenerator, + pub(super) id_generator: StatementIdGenerator, } impl Document { - pub(crate) fn new(path: PgTPath, content: String, version: i32) -> Self { - let mut id_generator = IdGenerator::new(); + pub(crate) fn new(content: String, version: i32) -> Self { + let mut id_generator = StatementIdGenerator::new(); let (ranges, diagnostics) = split_with_diagnostics(&content, None); Self { - path, positions: ranges .into_iter() .map(|range| (id_generator.next(), range)) @@ -42,15 +30,10 @@ impl Document { content, version, diagnostics, - id_generator, } } - pub fn diagnostics(&self) -> &[SDiagnostic] { - &self.diagnostics - } - /// Returns true if there is at least one fatal error in the diagnostics /// /// A fatal error is a scan error that prevents the document from being used @@ -60,74 +43,8 @@ impl Document { .any(|d| d.severity() == Severity::Fatal) } - pub fn iter_statements(&self) -> impl Iterator + '_ { - self.positions.iter().map(move |(id, _)| Statement { - id: *id, - path: self.path.clone(), - }) - } - - pub fn iter_statements_with_text(&self) -> impl Iterator + '_ { - self.positions.iter().map(move |(id, range)| { - let statement = Statement { - id: *id, - path: self.path.clone(), - }; - let text = &self.content[range.start().into()..range.end().into()]; - (statement, text) - }) - } - - pub fn iter_statements_with_range(&self) -> impl Iterator + '_ { - self.positions.iter().map(move |(id, range)| { - let statement = Statement { - id: *id, - path: self.path.clone(), - }; - (statement, range) - }) - } - - pub fn iter_statements_with_text_and_range( - &self, - ) -> impl Iterator + '_ { - self.positions.iter().map(move |(id, range)| { - let statement = Statement { - id: *id, - path: self.path.clone(), - }; - ( - statement, - range, - &self.content[range.start().into()..range.end().into()], - ) - }) - } - - pub fn get_txt(&self, stmt_id: StatementId) -> Option { - self.positions - .iter() - .find(|pos| pos.0 == stmt_id) - .map(|(_, range)| { - let stmt = &self.content[range.start().into()..range.end().into()]; - stmt.to_owned() - }) - } -} - -pub(crate) struct IdGenerator { - pub(super) next_id: usize, -} - -impl IdGenerator { - fn new() -> Self { - Self { next_id: 0 } - } - - pub(super) fn next(&mut self) -> usize { - let id = self.next_id; - self.next_id += 1; - id + pub fn iter<'a>(&'a self) -> StatementIterator<'a> { + StatementIterator::new(self) } } @@ -165,3 +82,30 @@ pub(crate) fn split_with_diagnostics( ), } } + +pub struct StatementIterator<'a> { + document: &'a Document, + positions: std::slice::Iter<'a, StatementPos>, +} + +impl<'a> StatementIterator<'a> { + pub fn new(document: &'a Document) -> Self { + Self { + document, + positions: document.positions.iter(), + } + } +} + +impl<'a> Iterator for StatementIterator<'a> { + type Item = (StatementId, TextRange, &'a str); + + fn next(&mut self) -> Option { + self.positions.next().map(|(id, range)| { + let range = *range; + let doc = self.document; + let id = id.clone(); + (id, range, &doc.content[range]) + }) + } +} diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs new file mode 100644 index 00000000..a110fb1f --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server/parsed_document.rs @@ -0,0 +1,374 @@ +use std::sync::Arc; + +use pgt_diagnostics::serde::Diagnostic as SDiagnostic; +use pgt_fs::PgTPath; +use pgt_query_ext::diagnostics::SyntaxDiagnostic; +use pgt_text_size::{TextRange, TextSize}; + +use crate::workspace::ChangeFileParams; + +use super::{ + change::StatementChange, + document::{Document, StatementIterator}, + pg_query::PgQueryStore, + sql_function::SQLFunctionBodyStore, + statement_identifier::StatementId, + tree_sitter::TreeSitterStore, +}; + +pub struct ParsedDocument { + #[allow(dead_code)] + path: PgTPath, + + doc: Document, + ast_db: PgQueryStore, + cst_db: TreeSitterStore, + sql_fn_db: SQLFunctionBodyStore, +} + +impl ParsedDocument { + pub fn new(path: PgTPath, content: String, version: i32) -> ParsedDocument { + let doc = Document::new(content, version); + + let cst_db = TreeSitterStore::new(); + let ast_db = PgQueryStore::new(); + let sql_fn_db = SQLFunctionBodyStore::new(); + + doc.iter().for_each(|(stmt, _, content)| { + cst_db.add_statement(&stmt, content); + }); + + ParsedDocument { + path, + doc, + ast_db, + cst_db, + sql_fn_db, + } + } + + /// Applies a change to the document and updates the CST and AST databases accordingly. + /// + /// Note that only tree-sitter cares about statement modifications vs remove + add. + /// Hence, we just clear the AST for the old statements and lazily load them when requested. + /// + /// * `params`: ChangeFileParams - The parameters for the change to be applied. + pub fn apply_change(&mut self, params: ChangeFileParams) { + for c in &self.doc.apply_file_change(¶ms) { + match c { + StatementChange::Added(added) => { + tracing::debug!( + "Adding statement: id:{:?}, text:{:?}", + added.stmt, + added.text + ); + self.cst_db.add_statement(&added.stmt, &added.text); + } + StatementChange::Deleted(s) => { + tracing::debug!("Deleting statement: id {:?}", s,); + self.cst_db.remove_statement(s); + self.ast_db.clear_statement(s); + self.sql_fn_db.clear_statement(s); + } + StatementChange::Modified(s) => { + tracing::debug!( + "Modifying statement with id {:?} (new id {:?}). Range {:?}, Changed from '{:?}' to '{:?}', changed text: {:?}", + s.old_stmt, + s.new_stmt, + s.change_range, + s.old_stmt_text, + s.new_stmt_text, + s.change_text + ); + + self.cst_db.modify_statement(s); + self.ast_db.clear_statement(&s.old_stmt); + self.sql_fn_db.clear_statement(&s.old_stmt); + } + } + } + } + + pub fn get_document_content(&self) -> &str { + &self.doc.content + } + + pub fn document_diagnostics(&self) -> &Vec { + &self.doc.diagnostics + } + + pub fn find<'a, M>(&'a self, id: StatementId, mapper: M) -> Option + where + M: StatementMapper<'a>, + { + self.iter_with_filter(mapper, IdFilter::new(id)).next() + } + + pub fn iter<'a, M>(&'a self, mapper: M) -> ParseIterator<'a, M, NoFilter> + where + M: StatementMapper<'a>, + { + self.iter_with_filter(mapper, NoFilter) + } + + pub fn iter_with_filter<'a, M, F>(&'a self, mapper: M, filter: F) -> ParseIterator<'a, M, F> + where + M: StatementMapper<'a>, + F: StatementFilter<'a>, + { + ParseIterator::new(self, mapper, filter) + } + + #[allow(dead_code)] + pub fn count(&self) -> usize { + self.iter(DefaultMapper).count() + } +} + +pub trait StatementMapper<'a> { + type Output; + + fn map( + &self, + parser: &'a ParsedDocument, + id: StatementId, + range: TextRange, + content: &str, + ) -> Self::Output; +} + +pub trait StatementFilter<'a> { + fn predicate(&self, id: &StatementId, range: &TextRange) -> bool; +} + +pub struct ParseIterator<'a, M, F> { + parser: &'a ParsedDocument, + statements: StatementIterator<'a>, + mapper: M, + filter: F, + pending_sub_statements: Vec<(StatementId, TextRange, String)>, +} + +impl<'a, M, F> ParseIterator<'a, M, F> { + pub fn new(parser: &'a ParsedDocument, mapper: M, filter: F) -> Self { + Self { + parser, + statements: parser.doc.iter(), + mapper, + filter, + pending_sub_statements: Vec::new(), + } + } +} + +impl<'a, M, F> Iterator for ParseIterator<'a, M, F> +where + M: StatementMapper<'a>, + F: StatementFilter<'a>, +{ + type Item = M::Output; + + fn next(&mut self) -> Option { + // First check if we have any pending sub-statements to process + if let Some((id, range, content)) = self.pending_sub_statements.pop() { + if self.filter.predicate(&id, &range) { + return Some(self.mapper.map(self.parser, id, range, &content)); + } + // If the sub-statement doesn't pass the filter, continue to the next item + return self.next(); + } + + // Process the next top-level statement + let next_statement = self.statements.next(); + + if let Some((root_id, range, content)) = next_statement { + // If we should include sub-statements and this statement has an AST + let content_owned = content.to_string(); + if let Ok(ast) = self + .parser + .ast_db + .get_or_cache_ast(&root_id, &content_owned) + .as_ref() + { + // Check if this is a SQL function definition with a body + if let Some(sub_statement) = + self.parser + .sql_fn_db + .get_function_body(&root_id, ast, &content_owned) + { + // Add sub-statements to our pending queue + self.pending_sub_statements.push(( + root_id.create_child(), + // adjust range to document + sub_statement.range + range.start(), + sub_statement.body.clone(), + )); + } + } + + // Return the current statement if it passes the filter + if self.filter.predicate(&root_id, &range) { + return Some(self.mapper.map(self.parser, root_id, range, content)); + } + + // If the current statement doesn't pass the filter, try the next one + return self.next(); + } + + None + } +} + +pub struct DefaultMapper; +impl<'a> StatementMapper<'a> for DefaultMapper { + type Output = (StatementId, TextRange, String); + + fn map( + &self, + _parser: &'a ParsedDocument, + id: StatementId, + range: TextRange, + content: &str, + ) -> Self::Output { + (id, range, content.to_string()) + } +} + +pub struct ExecuteStatementMapper; +impl<'a> StatementMapper<'a> for ExecuteStatementMapper { + type Output = ( + StatementId, + TextRange, + String, + Option, + ); + + fn map( + &self, + parser: &'a ParsedDocument, + id: StatementId, + range: TextRange, + content: &str, + ) -> Self::Output { + let ast_result = parser.ast_db.get_or_cache_ast(&id, content); + let ast_option = match &*ast_result { + Ok(node) => Some(node.clone()), + Err(_) => None, + }; + + (id, range, content.to_string(), ast_option) + } +} + +pub struct AsyncDiagnosticsMapper; +impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { + type Output = ( + StatementId, + TextRange, + String, + Option, + Arc, + ); + + fn map( + &self, + parser: &'a ParsedDocument, + id: StatementId, + range: TextRange, + content: &str, + ) -> Self::Output { + let content_owned = content.to_string(); + let ast_result = parser.ast_db.get_or_cache_ast(&id, &content_owned); + + let ast_option = match &*ast_result { + Ok(node) => Some(node.clone()), + Err(_) => None, + }; + + let cst_result = parser.cst_db.get_or_cache_tree(&id, &content_owned); + + (id, range, content_owned, ast_option, cst_result) + } +} + +pub struct SyncDiagnosticsMapper; +impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { + type Output = ( + StatementId, + TextRange, + Option, + Option, + ); + + fn map( + &self, + parser: &'a ParsedDocument, + id: StatementId, + range: TextRange, + content: &str, + ) -> Self::Output { + let ast_result = parser.ast_db.get_or_cache_ast(&id, content); + + let (ast_option, diagnostics) = match &*ast_result { + Ok(node) => (Some(node.clone()), None), + Err(diag) => (None, Some(diag.clone())), + }; + + (id, range, ast_option, diagnostics) + } +} + +pub struct GetCompletionsMapper; +impl<'a> StatementMapper<'a> for GetCompletionsMapper { + type Output = (StatementId, TextRange, String, Arc); + + fn map( + &self, + parser: &'a ParsedDocument, + id: StatementId, + range: TextRange, + content: &str, + ) -> Self::Output { + let cst_result = parser.cst_db.get_or_cache_tree(&id, content); + (id, range, content.to_string(), cst_result) + } +} + +pub struct NoFilter; +impl<'a> StatementFilter<'a> for NoFilter { + fn predicate(&self, _id: &StatementId, _range: &TextRange) -> bool { + true + } +} + +pub struct CursorPositionFilter { + pos: TextSize, +} + +impl CursorPositionFilter { + pub fn new(pos: TextSize) -> Self { + Self { pos } + } +} + +impl<'a> StatementFilter<'a> for CursorPositionFilter { + fn predicate(&self, _id: &StatementId, range: &TextRange) -> bool { + range.contains(self.pos) + } +} + +pub struct IdFilter { + id: StatementId, +} + +impl IdFilter { + pub fn new(id: StatementId) -> Self { + Self { id } + } +} + +impl<'a> StatementFilter<'a> for IdFilter { + fn predicate(&self, id: &StatementId, _range: &TextRange) -> bool { + *id == self.id + } +} diff --git a/crates/pgt_workspace/src/workspace/server/pg_query.rs b/crates/pgt_workspace/src/workspace/server/pg_query.rs index 3ed452fc..e5c0cac8 100644 --- a/crates/pgt_workspace/src/workspace/server/pg_query.rs +++ b/crates/pgt_workspace/src/workspace/server/pg_query.rs @@ -1,52 +1,38 @@ use std::sync::Arc; use dashmap::DashMap; -use pgt_diagnostics::serde::Diagnostic as SDiagnostic; use pgt_query_ext::diagnostics::*; -use super::{change::ModifiedStatement, document::Statement}; +use super::statement_identifier::StatementId; pub struct PgQueryStore { - ast_db: DashMap>, - diagnostics: DashMap, + db: DashMap>>, } impl PgQueryStore { pub fn new() -> PgQueryStore { - PgQueryStore { - ast_db: DashMap::new(), - diagnostics: DashMap::new(), - } - } - - pub fn get_ast(&self, statement: &Statement) -> Option> { - self.ast_db.get(statement).map(|x| x.clone()) + PgQueryStore { db: DashMap::new() } } - pub fn add_statement(&self, statement: &Statement, content: &str) { - let r = pgt_query_ext::parse(content); - if let Ok(ast) = r { - self.ast_db.insert(statement.clone(), Arc::new(ast)); - } else { - tracing::info!("invalid statement, adding diagnostics."); - self.diagnostics - .insert(statement.clone(), SyntaxDiagnostic::from(r.unwrap_err())); + pub fn get_or_cache_ast( + &self, + statement: &StatementId, + content: &str, + ) -> Arc> { + if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { + return existing; } - } - pub fn remove_statement(&self, statement: &Statement) { - self.ast_db.remove(statement); - self.diagnostics.remove(statement); + let r = Arc::new(pgt_query_ext::parse(content).map_err(SyntaxDiagnostic::from)); + self.db.insert(statement.clone(), r.clone()); + r } - pub fn modify_statement(&self, change: &ModifiedStatement) { - self.remove_statement(&change.old_stmt); - self.add_statement(&change.new_stmt, &change.new_stmt_text); - } + pub fn clear_statement(&self, id: &StatementId) { + self.db.remove(id); - pub fn get_diagnostics(&self, stmt: &Statement) -> Vec { - self.diagnostics - .get(stmt) - .map_or_else(Vec::new, |err| vec![SDiagnostic::new(err.value().clone())]) + if let Some(child_id) = id.get_child_id() { + self.db.remove(&child_id); + } } } diff --git a/crates/pgt_workspace/src/workspace/server/sql_function.rs b/crates/pgt_workspace/src/workspace/server/sql_function.rs new file mode 100644 index 00000000..3273466d --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server/sql_function.rs @@ -0,0 +1,111 @@ +use std::sync::Arc; + +use dashmap::DashMap; +use pgt_text_size::TextRange; + +use super::statement_identifier::StatementId; + +pub struct SQLFunctionBody { + pub range: TextRange, + pub body: String, +} + +pub struct SQLFunctionBodyStore { + db: DashMap>>, +} + +impl SQLFunctionBodyStore { + pub fn new() -> SQLFunctionBodyStore { + SQLFunctionBodyStore { db: DashMap::new() } + } + + pub fn get_function_body( + &self, + statement: &StatementId, + ast: &pgt_query_ext::NodeEnum, + content: &str, + ) -> Option> { + // First check if we already have this statement cached + if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { + return existing; + } + + // If not cached, try to extract it from the AST + let fn_body = get_sql_fn(ast, content).map(Arc::new); + + // Cache the result and return it + self.db.insert(statement.clone(), fn_body.clone()); + fn_body + } + + pub fn clear_statement(&self, id: &StatementId) { + self.db.remove(id); + + if let Some(child_id) = id.get_child_id() { + self.db.remove(&child_id); + } + } +} + +/// Extracts SQL function body and its text range from a CreateFunctionStmt node. +/// Returns None if the function is not an SQL function or if the body can't be found. +fn get_sql_fn(ast: &pgt_query_ext::NodeEnum, content: &str) -> Option { + let create_fn = match ast { + pgt_query_ext::NodeEnum::CreateFunctionStmt(cf) => cf, + _ => return None, + }; + + // Extract language from function options + let language = find_option_value(create_fn, "language")?; + + // Only process SQL functions + if language != "sql" { + return None; + } + + // Extract SQL body from function options + let sql_body = find_option_value(create_fn, "as")?; + + // Find the range of the SQL body in the content + let start = content.find(&sql_body)?; + let end = start + sql_body.len(); + + let range = TextRange::new(start.try_into().unwrap(), end.try_into().unwrap()); + + Some(SQLFunctionBody { + range, + body: sql_body.clone(), + }) +} + +/// Helper function to find a specific option value from function options +fn find_option_value( + create_fn: &pgt_query_ext::protobuf::CreateFunctionStmt, + option_name: &str, +) -> Option { + create_fn + .options + .iter() + .filter_map(|opt_wrapper| opt_wrapper.node.as_ref()) + .find_map(|opt| { + if let pgt_query_ext::NodeEnum::DefElem(def_elem) = opt { + if def_elem.defname == option_name { + def_elem + .arg + .iter() + .filter_map(|arg_wrapper| arg_wrapper.node.as_ref()) + .find_map(|arg| { + if let pgt_query_ext::NodeEnum::String(s) = arg { + Some(s.sval.clone()) + } else { + None + } + }) + } else { + None + } + } else { + None + } + }) +} diff --git a/crates/pgt_workspace/src/workspace/server/statement_identifier.rs b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs new file mode 100644 index 00000000..0739fb2f --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs @@ -0,0 +1,90 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] +pub struct RootId { + inner: usize, +} + +#[cfg(test)] +pub fn root_id(inner: usize) -> RootId { + RootId { inner } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] +/// `StatementId` can represent IDs for nested statements. +/// +/// For example, an SQL function really consist of two statements; the function creation +/// and the body: +/// +/// ```sql +/// create or replace function get_product_name(product_id INT) -- the root statement +/// returns varchar as $$ +/// select * from … -- the child statement +/// $$ LANGUAGE plpgsql; +/// ``` +/// +/// For now, we only support SQL functions – no complex, nested statements. +/// +/// An SQL function only ever has ONE child, that's why the inner `RootId` of a `Root` +/// is the same as the one of its `Child`. +pub enum StatementId { + Root(RootId), + // StatementId is the same as the root id since we can only have a single sql function body per Root + Child(RootId), +} + +impl Default for StatementId { + fn default() -> Self { + StatementId::Root(RootId { inner: 0 }) + } +} + +impl StatementId { + pub fn raw(&self) -> usize { + match self { + StatementId::Root(s) => s.inner, + StatementId::Child(s) => s.inner, + } + } +} + +/// Helper struct to generate unique statement ids +pub struct StatementIdGenerator { + next_id: usize, +} + +impl StatementIdGenerator { + pub fn new() -> Self { + Self { next_id: 0 } + } + + pub fn next(&mut self) -> StatementId { + let id = self.next_id; + self.next_id += 1; + StatementId::Root(RootId { inner: id }) + } +} + +impl StatementId { + /// Use this to get the matching `StatementId::Child` for + /// a `StatementId::Root`. + /// If the `StatementId` was already a `Child`, this will return `None`. + /// It is not guaranteed that the `Root` actually has a `Child` statement in the workspace. + pub fn get_child_id(&self) -> Option { + match self { + StatementId::Root(id) => Some(StatementId::Child(RootId { inner: id.inner })), + StatementId::Child(_) => None, + } + } + + /// Use this if you need to create a matching `StatementId::Child` for `Root`. + /// You cannot create a `Child` of a `Child`. + pub fn create_child(&self) -> StatementId { + match self { + StatementId::Root(id) => StatementId::Child(RootId { inner: id.inner }), + StatementId::Child(_) => panic!("Cannot create child from a child statement id"), + } + } +} diff --git a/crates/pgt_workspace/src/workspace/server/tree_sitter.rs b/crates/pgt_workspace/src/workspace/server/tree_sitter.rs index 09cff74c..a8932535 100644 --- a/crates/pgt_workspace/src/workspace/server/tree_sitter.rs +++ b/crates/pgt_workspace/src/workspace/server/tree_sitter.rs @@ -1,14 +1,13 @@ -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, Mutex}; use dashmap::DashMap; use tree_sitter::InputEdit; -use super::{change::ModifiedStatement, document::Statement}; +use super::{change::ModifiedStatement, statement_identifier::StatementId}; pub struct TreeSitterStore { - db: DashMap>, - - parser: RwLock, + db: DashMap>, + parser: Mutex, } impl TreeSitterStore { @@ -20,24 +19,38 @@ impl TreeSitterStore { TreeSitterStore { db: DashMap::new(), - parser: RwLock::new(parser), + parser: Mutex::new(parser), } } - pub fn get_parse_tree(&self, statement: &Statement) -> Option> { - self.db.get(statement).map(|x| x.clone()) + pub fn get_or_cache_tree( + &self, + statement: &StatementId, + content: &str, + ) -> Arc { + if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { + return existing; + } + + let mut parser = self.parser.lock().expect("Failed to lock parser"); + let tree = Arc::new(parser.parse(content, None).unwrap()); + self.db.insert(statement.clone(), tree.clone()); + + tree } - pub fn add_statement(&self, statement: &Statement, content: &str) { - let mut guard = self.parser.write().expect("Error reading parser"); - // todo handle error - let tree = guard.parse(content, None).unwrap(); - drop(guard); + pub fn add_statement(&self, statement: &StatementId, content: &str) { + let mut parser = self.parser.lock().expect("Failed to lock parser"); + let tree = parser.parse(content, None).unwrap(); self.db.insert(statement.clone(), Arc::new(tree)); } - pub fn remove_statement(&self, statement: &Statement) { - self.db.remove(statement); + pub fn remove_statement(&self, id: &StatementId) { + self.db.remove(id); + + if let Some(child_id) = id.get_child_id() { + self.db.remove(&child_id); + } } pub fn modify_statement(&self, change: &ModifiedStatement) { @@ -61,18 +74,17 @@ impl TreeSitterStore { tree.edit(&edit); - let mut guard = self.parser.write().expect("Error reading parser"); + let mut parser = self.parser.lock().expect("Failed to lock parser"); // todo handle error self.db.insert( change.new_stmt.clone(), - Arc::new(guard.parse(&change.new_stmt_text, Some(&tree)).unwrap()), + Arc::new(parser.parse(&change.new_stmt_text, Some(&tree)).unwrap()), ); - drop(guard); } } // Converts character positions and replacement text into a tree-sitter InputEdit -fn edit_from_change( +pub(crate) fn edit_from_change( text: &str, start_char: usize, end_char: usize, From a1fdb5b960943fe20cf14e12aefcd07fcb52ab19 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 12 Apr 2025 17:42:40 +0200 Subject: [PATCH 010/114] fix: parse unions (#329) --- crates/pgt_statement_splitter/src/parser.rs | 2 ++ crates/pgt_statement_splitter/src/parser/common.rs | 6 ++++++ .../pgt_statement_splitter/tests/data/simple_union__4.sql | 7 +++++++ 3 files changed, 15 insertions(+) create mode 100644 crates/pgt_statement_splitter/tests/data/simple_union__4.sql diff --git a/crates/pgt_statement_splitter/src/parser.rs b/crates/pgt_statement_splitter/src/parser.rs index 4cdf1fc6..05de8cb0 100644 --- a/crates/pgt_statement_splitter/src/parser.rs +++ b/crates/pgt_statement_splitter/src/parser.rs @@ -147,6 +147,7 @@ impl Parser { } /// Look ahead to the next relevant token + /// Returns `None` if we are already at the last relevant token fn look_ahead(&self) -> Option<&Token> { // we need to look ahead to the next relevant token let mut look_ahead_pos = self.next_pos + 1; @@ -161,6 +162,7 @@ impl Parser { } } + /// Returns `None` if there are no previous relevant tokens fn look_back(&self) -> Option<&Token> { // we need to look back to the last relevant token let mut look_back_pos = self.next_pos - 1; diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index af3dc6cc..d145018d 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -145,6 +145,12 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { SyntaxKind::Also, // for create rule SyntaxKind::Instead, + // for UNION + SyntaxKind::Union, + // for UNION ALL + SyntaxKind::All, + // for UNION ... EXCEPT + SyntaxKind::Except, ] .iter() .all(|x| Some(x) != prev.as_ref()) diff --git a/crates/pgt_statement_splitter/tests/data/simple_union__4.sql b/crates/pgt_statement_splitter/tests/data/simple_union__4.sql new file mode 100644 index 00000000..100b59ea --- /dev/null +++ b/crates/pgt_statement_splitter/tests/data/simple_union__4.sql @@ -0,0 +1,7 @@ +select 1 union all select 2; + +select 1 union select 2; + +select 1 union select 2 except select 3; + +select 1 union all select 2 except select 3; \ No newline at end of file From 1cfa5b8389009f77354dd9240570c086b298a5c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 12 Apr 2025 22:10:34 +0200 Subject: [PATCH 011/114] fix: c-style comments (#332) --- crates/pgt_lexer/src/lib.rs | 5 +++-- crates/pgt_statement_splitter/src/lib.rs | 5 +++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/pgt_lexer/src/lib.rs b/crates/pgt_lexer/src/lib.rs index ce47725d..32bbdd42 100644 --- a/crates/pgt_lexer/src/lib.rs +++ b/crates/pgt_lexer/src/lib.rs @@ -22,8 +22,8 @@ pub enum TokenType { impl From<&ScanToken> for TokenType { fn from(token: &ScanToken) -> TokenType { match token.token { - // SqlComment - 275 => TokenType::Whitespace, + // SqlComment | CComment + 275 | 276 => TokenType::Whitespace, _ => match token.keyword_kind() { KeywordKind::NoKeyword => TokenType::NoKeyword, KeywordKind::UnreservedKeyword => TokenType::UnreservedKeyword, @@ -59,6 +59,7 @@ pub static WHITESPACE_TOKENS: &[SyntaxKind] = &[ SyntaxKind::Tab, SyntaxKind::Newline, SyntaxKind::SqlComment, + SyntaxKind::CComment, ]; static PATTERN_LEXER: LazyLock = LazyLock::new(|| { diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index 68f5daaf..4af5d5b4 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -142,6 +142,11 @@ mod tests { .expect_statements(vec!["insert into tbl (id) select 1", "select 3"]); } + #[test] + fn c_style_comments() { + Tester::from("/* this is a test */\nselect 1").expect_statements(vec!["select 1"]); + } + #[test] fn with_check() { Tester::from("create policy employee_insert on journey_execution for insert to authenticated with check ((select private.organisation_id()) = organisation_id);") From 70f0c93e38bdd0ce3eedfb236dfbe8f6cf2bb44f Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 12 Apr 2025 22:11:08 +0200 Subject: [PATCH 012/114] refactor: simplify parser ? (#330) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * simplify * simplify 2 * simplify 3 * ok * ffs * comment * ok............ * tidying up * comment… * ok * comment * more * ok * ok * end test --- crates/pgt_statement_splitter/src/lib.rs | 6 +- crates/pgt_statement_splitter/src/parser.rs | 235 +++++++++--------- .../src/parser/common.rs | 10 +- .../pgt_statement_splitter/src/parser/dml.rs | 4 +- 4 files changed, 134 insertions(+), 121 deletions(-) diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index 4af5d5b4..06440da1 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -4,10 +4,10 @@ pub mod diagnostics; mod parser; -use parser::{Parse, Parser, source}; +use parser::{Parser, ParserResult, source}; use pgt_lexer::diagnostics::ScanError; -pub fn split(sql: &str) -> Result> { +pub fn split(sql: &str) -> Result> { let tokens = pgt_lexer::lex(sql)?; let mut parser = Parser::new(tokens); @@ -28,7 +28,7 @@ mod tests { struct Tester { input: String, - parse: Parse, + parse: ParserResult, } impl From<&str> for Tester { diff --git a/crates/pgt_statement_splitter/src/parser.rs b/crates/pgt_statement_splitter/src/parser.rs index 05de8cb0..c94fe245 100644 --- a/crates/pgt_statement_splitter/src/parser.rs +++ b/crates/pgt_statement_splitter/src/parser.rs @@ -13,24 +13,24 @@ use crate::diagnostics::SplitDiagnostic; /// Main parser that exposes the `cstree` api, and collects errors and statements /// It is modelled after a Pratt Parser. For a gentle introduction to Pratt Parsing, see https://matklad.github.io/2020/04/13/simple-but-powerful-pratt-parsing.html pub struct Parser { - /// The ranges of the statements - ranges: Vec<(usize, usize)>, + /// The statement ranges are defined by the indices of the start/end tokens + stmt_ranges: Vec<(usize, usize)>, + /// The syntax errors accumulated during parsing errors: Vec, - /// The start of the current statement, if any + current_stmt_start: Option, - /// The tokens to parse - pub tokens: Vec, + + tokens: Vec, eof_token: Token, - next_pos: usize, + current_pos: usize, } -/// Result of Building #[derive(Debug)] -pub struct Parse { - /// The ranges of the errors +pub struct ParserResult { + /// The ranges of the parsed statements pub ranges: Vec, /// The syntax errors accumulated during parsing pub errors: Vec, @@ -41,40 +41,34 @@ impl Parser { let eof_token = Token::eof(usize::from( tokens .last() - .map(|t| t.span.start()) + .map(|t| t.span.end()) .unwrap_or(TextSize::from(0)), )); - // next_pos should be the initialised with the first valid token already - let mut next_pos = 0; - loop { - let token = tokens.get(next_pos).unwrap_or(&eof_token); - - if is_irrelevant_token(token) { - next_pos += 1; - } else { - break; - } + // Place `current_pos` on the first relevant token + let mut current_pos = 0; + while is_irrelevant_token(tokens.get(current_pos).unwrap_or(&eof_token)) { + current_pos += 1; } Self { - ranges: Vec::new(), + stmt_ranges: Vec::new(), eof_token, errors: Vec::new(), current_stmt_start: None, tokens, - next_pos, + current_pos, } } - pub fn finish(self) -> Parse { - Parse { + pub fn finish(self) -> ParserResult { + ParserResult { ranges: self - .ranges + .stmt_ranges .iter() - .map(|(start, end)| { - let from = self.tokens.get(*start); - let to = self.tokens.get(*end).unwrap_or(&self.eof_token); + .map(|(start_token_pos, end_token_pos)| { + let from = self.tokens.get(*start_token_pos); + let to = self.tokens.get(*end_token_pos).unwrap_or(&self.eof_token); TextRange::new(from.unwrap().span.start(), to.span.end()) }) @@ -83,124 +77,87 @@ impl Parser { } } - /// Start statement pub fn start_stmt(&mut self) { assert!( self.current_stmt_start.is_none(), "cannot start statement within statement at {:?}", self.tokens.get(self.current_stmt_start.unwrap()) ); - self.current_stmt_start = Some(self.next_pos); + self.current_stmt_start = Some(self.current_pos); } - /// Close statement pub fn close_stmt(&mut self) { - assert!(self.next_pos > 0); - - // go back the positions until we find the first relevant token - let mut end_token_pos = self.next_pos - 1; - loop { - let token = self.tokens.get(end_token_pos); + assert!( + self.current_stmt_start.is_some(), + "Must start statement before closing it." + ); - if end_token_pos == 0 || token.is_none() { - break; - } + let start_token_pos = self.current_stmt_start.unwrap(); - if !is_irrelevant_token(token.unwrap()) { - break; - } + assert!( + self.current_pos > start_token_pos, + "Must close the statement on a token that's later than the start token." + ); - end_token_pos -= 1; - } + let (end_token_pos, _) = self.find_last_relevant().unwrap(); - self.ranges.push(( - self.current_stmt_start.expect("Expected active statement"), - end_token_pos, - )); + self.stmt_ranges.push((start_token_pos, end_token_pos)); self.current_stmt_start = None; } - fn advance(&mut self) -> &Token { - let mut first_relevant_token = None; - loop { - let token = self.tokens.get(self.next_pos).unwrap_or(&self.eof_token); - - // we need to continue with next_pos until the next relevant token after we already - // found the first one - if !is_irrelevant_token(token) { - if let Some(t) = first_relevant_token { - return t; - } - first_relevant_token = Some(token); - } - - self.next_pos += 1; - } - } - - fn peek(&self) -> &Token { - match self.tokens.get(self.next_pos) { + fn current(&self) -> &Token { + match self.tokens.get(self.current_pos) { Some(token) => token, None => &self.eof_token, } } - /// Look ahead to the next relevant token - /// Returns `None` if we are already at the last relevant token - fn look_ahead(&self) -> Option<&Token> { - // we need to look ahead to the next relevant token - let mut look_ahead_pos = self.next_pos + 1; - loop { - let token = self.tokens.get(look_ahead_pos)?; - - if !is_irrelevant_token(token) { - return Some(token); - } + fn advance(&mut self) -> &Token { + // can't reuse any `find_next_relevant` logic because of Mr. Borrow Checker + let (pos, token) = self + .tokens + .iter() + .enumerate() + .skip(self.current_pos + 1) + .find(|(_, t)| is_relevant(t)) + .unwrap_or((self.tokens.len(), &self.eof_token)); + + self.current_pos = pos; + token + } - look_ahead_pos += 1; - } + fn look_ahead(&self) -> Option<&Token> { + self.tokens + .iter() + .skip(self.current_pos + 1) + .find(|t| is_relevant(t)) } /// Returns `None` if there are no previous relevant tokens fn look_back(&self) -> Option<&Token> { - // we need to look back to the last relevant token - let mut look_back_pos = self.next_pos - 1; - loop { - let token = self.tokens.get(look_back_pos); - - if look_back_pos == 0 || token.is_none() { - return None; - } - - if !is_irrelevant_token(token.unwrap()) { - return token; - } - - look_back_pos -= 1; - } + self.find_last_relevant().map(|it| it.1) } - /// checks if the current token is of `kind` and advances if true - /// returns true if the current token is of `kind` - pub fn eat(&mut self, kind: SyntaxKind) -> bool { - if self.peek().kind == kind { + /// Will advance if the `kind` matches the current token. + /// Otherwise, will add a diagnostic to the internal `errors`. + pub fn expect(&mut self, kind: SyntaxKind) { + if self.current().kind == kind { self.advance(); - true } else { - false + self.errors.push(SplitDiagnostic::new( + format!("Expected {:#?}", kind), + self.current().span, + )); } } - pub fn expect(&mut self, kind: SyntaxKind) { - if self.eat(kind) { - return; - } - - self.errors.push(SplitDiagnostic::new( - format!("Expected {:#?}", kind), - self.peek().span, - )); + fn find_last_relevant(&self) -> Option<(usize, &Token)> { + self.tokens + .iter() + .enumerate() + .take(self.current_pos) + .rfind(|(_, t)| is_relevant(t)) } } @@ -219,3 +176,57 @@ fn is_irrelevant_token(t: &Token) -> bool { WHITESPACE_TOKENS.contains(&t.kind) && (t.kind != SyntaxKind::Newline || t.text.chars().count() == 1) } + +fn is_relevant(t: &Token) -> bool { + !is_irrelevant_token(t) +} + +#[cfg(test)] +mod tests { + use pgt_lexer::SyntaxKind; + + use crate::parser::Parser; + + #[test] + fn advance_works_as_expected() { + let sql = r#" + create table users ( + id serial primary key, + name text, + email text + ); + "#; + let tokens = pgt_lexer::lex(sql).unwrap(); + let total_num_tokens = tokens.len(); + + let mut parser = Parser::new(tokens); + + let expected = vec![ + (SyntaxKind::Create, 2), + (SyntaxKind::Table, 4), + (SyntaxKind::Ident, 6), + (SyntaxKind::Ascii40, 8), + (SyntaxKind::Ident, 11), + (SyntaxKind::Ident, 13), + (SyntaxKind::Primary, 15), + (SyntaxKind::Key, 17), + (SyntaxKind::Ascii44, 18), + (SyntaxKind::NameP, 21), + (SyntaxKind::TextP, 23), + (SyntaxKind::Ascii44, 24), + (SyntaxKind::Ident, 27), + (SyntaxKind::TextP, 29), + (SyntaxKind::Ascii41, 32), + (SyntaxKind::Ascii59, 33), + ]; + + for (kind, pos) in expected { + assert_eq!(parser.current().kind, kind); + assert_eq!(parser.current_pos, pos); + parser.advance(); + } + + assert_eq!(parser.current().kind, SyntaxKind::Eof); + assert_eq!(parser.current_pos, total_num_tokens); + } +} diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index d145018d..1a355f08 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -9,7 +9,7 @@ use super::{ pub fn source(p: &mut Parser) { loop { - match p.peek() { + match p.current() { Token { kind: SyntaxKind::Eof, .. @@ -33,7 +33,7 @@ pub fn source(p: &mut Parser) { pub(crate) fn statement(p: &mut Parser) { p.start_stmt(); - match p.peek().kind { + match p.current().kind { SyntaxKind::With => { cte(p); } @@ -68,7 +68,7 @@ pub(crate) fn parenthesis(p: &mut Parser) { let mut depth = 1; loop { - match p.peek().kind { + match p.current().kind { SyntaxKind::Ascii40 => { p.advance(); depth += 1; @@ -91,7 +91,7 @@ pub(crate) fn case(p: &mut Parser) { p.expect(SyntaxKind::Case); loop { - match p.peek().kind { + match p.current().kind { SyntaxKind::EndP => { p.advance(); break; @@ -105,7 +105,7 @@ pub(crate) fn case(p: &mut Parser) { pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { loop { - match p.peek() { + match p.current() { Token { kind: SyntaxKind::Ascii59, .. diff --git a/crates/pgt_statement_splitter/src/parser/dml.rs b/crates/pgt_statement_splitter/src/parser/dml.rs index a45f6c40..015c50b6 100644 --- a/crates/pgt_statement_splitter/src/parser/dml.rs +++ b/crates/pgt_statement_splitter/src/parser/dml.rs @@ -13,7 +13,9 @@ pub(crate) fn cte(p: &mut Parser) { p.expect(SyntaxKind::As); parenthesis(p); - if !p.eat(SyntaxKind::Ascii44) { + if p.current().kind == SyntaxKind::Ascii44 { + p.advance(); + } else { break; } } From a358cee8527630eeb8c1a49a860234e710e052ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 12 Apr 2025 22:32:58 +0200 Subject: [PATCH 013/114] feat: annotations (#331) --- Cargo.lock | 1 + crates/pgt_workspace/Cargo.toml | 1 + crates/pgt_workspace/src/workspace/server.rs | 1 + .../src/workspace/server/annotation.rs | 87 +++++++++++++++++++ .../src/workspace/server/change.rs | 6 +- .../src/workspace/server/parsed_document.rs | 6 ++ .../workspace/server/statement_identifier.rs | 13 ++- 7 files changed, 110 insertions(+), 5 deletions(-) create mode 100644 crates/pgt_workspace/src/workspace/server/annotation.rs diff --git a/Cargo.lock b/Cargo.lock index 79ec52f0..4a1f6ea5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2765,6 +2765,7 @@ dependencies = [ "pgt_console", "pgt_diagnostics", "pgt_fs", + "pgt_lexer", "pgt_query_ext", "pgt_schema_cache", "pgt_statement_splitter", diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index 7df42b19..5f598b2d 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -25,6 +25,7 @@ pgt_configuration = { workspace = true } pgt_console = { workspace = true } pgt_diagnostics = { workspace = true } pgt_fs = { workspace = true, features = ["serde"] } +pgt_lexer = { workspace = true } pgt_query_ext = { workspace = true } pgt_schema_cache = { workspace = true } pgt_statement_splitter = { workspace = true } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 27f5e8be..5e33bc27 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -43,6 +43,7 @@ use super::{ pub use statement_identifier::StatementId; mod analyser; +mod annotation; mod async_helper; mod change; mod db_connection; diff --git a/crates/pgt_workspace/src/workspace/server/annotation.rs b/crates/pgt_workspace/src/workspace/server/annotation.rs new file mode 100644 index 00000000..321dd3ac --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server/annotation.rs @@ -0,0 +1,87 @@ +use std::sync::Arc; + +use dashmap::DashMap; +use pgt_lexer::{SyntaxKind, WHITESPACE_TOKENS}; + +use super::statement_identifier::StatementId; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StatementAnnotations { + ends_with_semicolon: bool, +} + +pub struct AnnotationStore { + db: DashMap>>, +} + +impl AnnotationStore { + pub fn new() -> AnnotationStore { + AnnotationStore { db: DashMap::new() } + } + + #[allow(unused)] + pub fn get_annotations( + &self, + statement: &StatementId, + content: &str, + ) -> Option> { + if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { + return existing; + } + + // we swallow the error here because the lexing within the document would have already + // thrown and we wont even get here if that happened. + let annotations = pgt_lexer::lex(content).ok().map(|tokens| { + let ends_with_semicolon = tokens + .iter() + .rev() + .find(|token| !WHITESPACE_TOKENS.contains(&token.kind)) + .is_some_and(|token| token.kind == SyntaxKind::Ascii59); + + Arc::new(StatementAnnotations { + ends_with_semicolon, + }) + }); + + self.db.insert(statement.clone(), None); + annotations + } + + pub fn clear_statement(&self, id: &StatementId) { + self.db.remove(id); + + if let Some(child_id) = id.get_child_id() { + self.db.remove(&child_id); + } + } +} + +#[cfg(test)] +mod tests { + use crate::workspace::StatementId; + + use super::AnnotationStore; + + #[test] + fn annotates_correctly() { + let store = AnnotationStore::new(); + + let test_cases = [ + ("SELECT * FROM foo", false), + ("SELECT * FROM foo;", true), + ("SELECT * FROM foo ;", true), + ("SELECT * FROM foo ; ", true), + ("SELECT * FROM foo ;\n", true), + ("SELECT * FROM foo\n", false), + ]; + + for (idx, (content, expected)) in test_cases.iter().enumerate() { + let statement_id = StatementId::Root(idx.into()); + + let annotations = store.get_annotations(&statement_id, content); + + assert!(annotations.is_some()); + assert_eq!(annotations.unwrap().ends_with_semicolon, *expected); + } + } +} diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index afe0eb64..69c68189 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -409,7 +409,7 @@ mod tests { use pgt_diagnostics::Diagnostic; use pgt_text_size::TextRange; - use crate::workspace::{ChangeFileParams, ChangeParams, server::statement_identifier::root_id}; + use crate::workspace::{ChangeFileParams, ChangeParams}; use pgt_fs::PgTPath; @@ -886,14 +886,14 @@ mod tests { assert_eq!( changed[2], StatementChange::Added(AddedStatement { - stmt: StatementId::Root(root_id(2)), + stmt: StatementId::Root(2.into()), text: "select id,test from users".to_string() }) ); assert_eq!( changed[3], StatementChange::Added(AddedStatement { - stmt: StatementId::Root(root_id(3)), + stmt: StatementId::Root(3.into()), text: "select 1;".to_string() }) ); diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs index a110fb1f..2b64d24a 100644 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ b/crates/pgt_workspace/src/workspace/server/parsed_document.rs @@ -8,6 +8,7 @@ use pgt_text_size::{TextRange, TextSize}; use crate::workspace::ChangeFileParams; use super::{ + annotation::AnnotationStore, change::StatementChange, document::{Document, StatementIterator}, pg_query::PgQueryStore, @@ -24,6 +25,7 @@ pub struct ParsedDocument { ast_db: PgQueryStore, cst_db: TreeSitterStore, sql_fn_db: SQLFunctionBodyStore, + annotation_db: AnnotationStore, } impl ParsedDocument { @@ -33,6 +35,7 @@ impl ParsedDocument { let cst_db = TreeSitterStore::new(); let ast_db = PgQueryStore::new(); let sql_fn_db = SQLFunctionBodyStore::new(); + let annotation_db = AnnotationStore::new(); doc.iter().for_each(|(stmt, _, content)| { cst_db.add_statement(&stmt, content); @@ -44,6 +47,7 @@ impl ParsedDocument { ast_db, cst_db, sql_fn_db, + annotation_db, } } @@ -69,6 +73,7 @@ impl ParsedDocument { self.cst_db.remove_statement(s); self.ast_db.clear_statement(s); self.sql_fn_db.clear_statement(s); + self.annotation_db.clear_statement(s); } StatementChange::Modified(s) => { tracing::debug!( @@ -84,6 +89,7 @@ impl ParsedDocument { self.cst_db.modify_statement(s); self.ast_db.clear_statement(&s.old_stmt); self.sql_fn_db.clear_statement(&s.old_stmt); + self.annotation_db.clear_statement(&s.old_stmt); } } } diff --git a/crates/pgt_workspace/src/workspace/server/statement_identifier.rs b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs index 0739fb2f..8c02814d 100644 --- a/crates/pgt_workspace/src/workspace/server/statement_identifier.rs +++ b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs @@ -7,8 +7,17 @@ pub struct RootId { } #[cfg(test)] -pub fn root_id(inner: usize) -> RootId { - RootId { inner } +impl From for usize { + fn from(val: RootId) -> Self { + val.inner + } +} + +#[cfg(test)] +impl From for RootId { + fn from(inner: usize) -> Self { + RootId { inner } + } } #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] From d439c36af0f5c11212f3512dd8bfd035951b31c8 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sun, 13 Apr 2025 07:19:41 +0200 Subject: [PATCH 014/114] feat: request autocompletion without typing a letter (#310) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * so far… * hmmm * awesome! * refactor, terminate by semicolons * got the tests… * hell yeah * give it a spin * yeah * leave the logs to the beavers * add benchmarks * cant fail --- Cargo.lock | 169 ++++++++++- Cargo.toml | 7 - crates/pgt_completions/Cargo.toml | 6 + .../pgt_completions/benches/sanitization.rs | 249 +++++++++++++++ crates/pgt_completions/src/complete.rs | 9 +- crates/pgt_completions/src/context.rs | 121 +++++--- crates/pgt_completions/src/lib.rs | 2 + .../pgt_completions/src/providers/columns.rs | 82 ++++- .../pgt_completions/src/providers/tables.rs | 6 +- crates/pgt_completions/src/relevance.rs | 29 +- crates/pgt_completions/src/sanitization.rs | 283 ++++++++++++++++++ crates/pgt_completions/src/test_helper.rs | 41 ++- crates/pgt_text_size/src/range.rs | 18 ++ .../pgt_workspace/src/features/completions.rs | 170 ++++++++++- crates/pgt_workspace/src/workspace.rs | 1 + crates/pgt_workspace/src/workspace/server.rs | 37 ++- .../src/workspace/server/parsed_document.rs | 44 ++- 17 files changed, 1169 insertions(+), 105 deletions(-) create mode 100644 crates/pgt_completions/benches/sanitization.rs create mode 100644 crates/pgt_completions/src/sanitization.rs diff --git a/Cargo.lock b/Cargo.lock index 4a1f6ea5..72ba810f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -45,6 +45,12 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "anstream" version = "0.6.18" @@ -733,6 +739,12 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" version = "1.2.3" @@ -766,6 +778,33 @@ dependencies = [ "num-traits", ] +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "clang-sys" version = "1.8.1" @@ -898,6 +937,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + [[package]] name = "crossbeam" version = "0.8.4" @@ -954,6 +1029,12 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + [[package]] name = "crypto-common" version = "0.1.6" @@ -1513,6 +1594,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "half" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -1572,6 +1663,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" +[[package]] +name = "hermit-abi" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" + [[package]] name = "hex" version = "0.4.3" @@ -1821,6 +1918,17 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "is-terminal" +version = "0.4.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +dependencies = [ + "hermit-abi 0.5.0", + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "is_ci" version = "1.2.0" @@ -1842,6 +1950,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.14" @@ -2229,6 +2346,12 @@ version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "option-ext" version = "0.2.0" @@ -2361,7 +2484,7 @@ dependencies = [ "cc", "fs_extra", "glob", - "itertools", + "itertools 0.10.5", "prost", "prost-build", "serde", @@ -2443,6 +2566,7 @@ name = "pgt_completions" version = "0.0.0" dependencies = [ "async-std", + "criterion", "pgt_schema_cache", "pgt_test_utils", "pgt_text_size", @@ -2452,6 +2576,7 @@ dependencies = [ "serde_json", "sqlx", "tokio", + "tracing", "tree-sitter", "tree_sitter_sql", ] @@ -2854,6 +2979,34 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + [[package]] name = "polling" version = "2.8.0" @@ -2999,7 +3152,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" dependencies = [ "heck", - "itertools", + "itertools 0.14.0", "log", "multimap", "once_cell", @@ -3019,7 +3172,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.90", @@ -4069,6 +4222,16 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.8.0" diff --git a/Cargo.toml b/Cargo.toml index e4472250..ef29bcaa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,7 +46,6 @@ syn = "1.0.109" termcolor = "1.4.1" test-log = "0.2.17" tokio = { version = "1.40.0", features = ["full"] } -tower-lsp = "0.20.0" tracing = { version = "0.1.40", default-features = false, features = ["std"] } tracing-bunyan-formatter = { version = "0.3.10 " } tracing-subscriber = "0.3.18" @@ -57,7 +56,6 @@ unicode-width = "0.1.12" # postgres specific crates pgt_analyse = { path = "./crates/pgt_analyse", version = "0.0.0" } pgt_analyser = { path = "./crates/pgt_analyser", version = "0.0.0" } -pgt_base_db = { path = "./crates/pgt_base_db", version = "0.0.0" } pgt_cli = { path = "./crates/pgt_cli", version = "0.0.0" } pgt_completions = { path = "./crates/pgt_completions", version = "0.0.0" } pgt_configuration = { path = "./crates/pgt_configuration", version = "0.0.0" } @@ -69,9 +67,7 @@ pgt_flags = { path = "./crates/pgt_flags", version = "0.0.0" } pgt_fs = { path = "./crates/pgt_fs", version = "0.0.0" } pgt_lexer = { path = "./crates/pgt_lexer", version = "0.0.0" } pgt_lexer_codegen = { path = "./crates/pgt_lexer_codegen", version = "0.0.0" } -pgt_lint = { path = "./crates/pgt_lint", version = "0.0.0" } pgt_lsp = { path = "./crates/pgt_lsp", version = "0.0.0" } -pgt_lsp_converters = { path = "./crates/pgt_lsp_converters", version = "0.0.0" } pgt_markup = { path = "./crates/pgt_markup", version = "0.0.0" } pgt_query_ext = { path = "./crates/pgt_query_ext", version = "0.0.0" } pgt_query_ext_codegen = { path = "./crates/pgt_query_ext_codegen", version = "0.0.0" } @@ -81,14 +77,11 @@ pgt_statement_splitter = { path = "./crates/pgt_statement_splitter", version pgt_text_edit = { path = "./crates/pgt_text_edit", version = "0.0.0" } pgt_text_size = { path = "./crates/pgt_text_size", version = "0.0.0" } pgt_treesitter_queries = { path = "./crates/pgt_treesitter_queries", version = "0.0.0" } -pgt_type_resolver = { path = "./crates/pgt_type_resolver", version = "0.0.0" } pgt_typecheck = { path = "./crates/pgt_typecheck", version = "0.0.0" } pgt_workspace = { path = "./crates/pgt_workspace", version = "0.0.0" } pgt_test_macros = { path = "./crates/pgt_test_macros" } pgt_test_utils = { path = "./crates/pgt_test_utils" } -docs_codegen = { path = "./docs/codegen", version = "0.0.0" } - [profile.dev.package] insta.opt-level = 3 diff --git a/crates/pgt_completions/Cargo.toml b/crates/pgt_completions/Cargo.toml index dba88f41..a69ee75a 100644 --- a/crates/pgt_completions/Cargo.toml +++ b/crates/pgt_completions/Cargo.toml @@ -22,6 +22,7 @@ pgt_treesitter_queries.workspace = true schemars = { workspace = true, optional = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } +tracing = { workspace = true } tree-sitter.workspace = true tree_sitter_sql.workspace = true @@ -30,6 +31,7 @@ sqlx.workspace = true tokio = { version = "1.41.1", features = ["full"] } [dev-dependencies] +criterion = "0.5.1" pgt_test_utils.workspace = true [lib] @@ -37,3 +39,7 @@ doctest = false [features] schema = ["dep:schemars"] + +[[bench]] +harness = false +name = "sanitization" diff --git a/crates/pgt_completions/benches/sanitization.rs b/crates/pgt_completions/benches/sanitization.rs new file mode 100644 index 00000000..c21538de --- /dev/null +++ b/crates/pgt_completions/benches/sanitization.rs @@ -0,0 +1,249 @@ +use criterion::{Criterion, black_box, criterion_group, criterion_main}; +use pgt_completions::{CompletionParams, benchmark_sanitization}; +use pgt_schema_cache::SchemaCache; +use pgt_text_size::TextSize; + +static CURSOR_POS: &str = "€"; + +fn sql_and_pos(sql: &str) -> (String, usize) { + let pos = sql.find(CURSOR_POS).unwrap(); + (sql.replace(CURSOR_POS, ""), pos) +} + +fn get_tree(sql: &str) -> tree_sitter::Tree { + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + parser.parse(sql.to_string(), None).unwrap() +} + +fn to_params<'a>( + text: String, + tree: &'a tree_sitter::Tree, + pos: usize, + cache: &'a SchemaCache, +) -> CompletionParams<'a> { + let pos: u32 = pos.try_into().unwrap(); + CompletionParams { + position: TextSize::new(pos), + schema: &cache, + text, + tree: Some(tree), + } +} + +pub fn criterion_benchmark(c: &mut Criterion) { + c.bench_function("small sql, adjusted", |b| { + let content = format!("select {} from users;", CURSOR_POS); + + let cache = SchemaCache::default(); + let (sql, pos) = sql_and_pos(content.as_str()); + let tree = get_tree(sql.as_str()); + + b.iter(|| benchmark_sanitization(black_box(to_params(sql.clone(), &tree, pos, &cache)))); + }); + + c.bench_function("mid sql, adjusted", |b| { + let content = format!( + r#"select + n.oid :: int8 as "id!", + n.nspname as name, + u.rolname as "owner!" +from + pg_namespace n, + {} +where + n.nspowner = u.oid + and ( + pg_has_role(n.nspowner, 'USAGE') + or has_schema_privilege(n.oid, 'CREATE, USAGE') + ) + and not pg_catalog.starts_with(n.nspname, 'pg_temp_') + and not pg_catalog.starts_with(n.nspname, 'pg_toast_temp_');"#, + CURSOR_POS + ); + + let cache = SchemaCache::default(); + let (sql, pos) = sql_and_pos(content.as_str()); + let tree = get_tree(sql.as_str()); + + b.iter(|| benchmark_sanitization(black_box(to_params(sql.clone(), &tree, pos, &cache)))); + }); + + c.bench_function("large sql, adjusted", |b| { + let content = format!( + r#"with + available_tables as ( + select + c.relname as table_name, + c.oid as table_oid, + c.relkind as class_kind, + n.nspname as schema_name + from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n on n.oid = c.relnamespace + where + -- r: normal tables + -- v: views + -- m: materialized views + -- f: foreign tables + -- p: partitioned tables + c.relkind in ('r', 'v', 'm', 'f', 'p') + ), + available_indexes as ( + select + unnest (ix.indkey) as attnum, + ix.indisprimary as is_primary, + ix.indisunique as is_unique, + ix.indrelid as table_oid + from + {} + where + c.relkind = 'i' + ) +select + atts.attname as name, + ts.table_name, + ts.table_oid :: int8 as "table_oid!", + ts.class_kind :: char as "class_kind!", + ts.schema_name, + atts.atttypid :: int8 as "type_id!", + not atts.attnotnull as "is_nullable!", + nullif( + information_schema._pg_char_max_length (atts.atttypid, atts.atttypmod), + -1 + ) as varchar_length, + pg_get_expr (def.adbin, def.adrelid) as default_expr, + coalesce(ix.is_primary, false) as "is_primary_key!", + coalesce(ix.is_unique, false) as "is_unique!", + pg_catalog.col_description (ts.table_oid, atts.attnum) as comment +from + pg_catalog.pg_attribute atts + join available_tables ts on atts.attrelid = ts.table_oid + left join available_indexes ix on atts.attrelid = ix.table_oid + and atts.attnum = ix.attnum + left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid + and atts.attnum = def.adnum +where + -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s + atts.attnum >= 0; +"#, + CURSOR_POS + ); + + let cache = SchemaCache::default(); + let (sql, pos) = sql_and_pos(content.as_str()); + let tree = get_tree(sql.as_str()); + + b.iter(|| benchmark_sanitization(black_box(to_params(sql.clone(), &tree, pos, &cache)))); + }); + + c.bench_function("small sql, unadjusted", |b| { + let content = format!("select e{} from users;", CURSOR_POS); + + let cache = SchemaCache::default(); + let (sql, pos) = sql_and_pos(content.as_str()); + let tree = get_tree(sql.as_str()); + + b.iter(|| benchmark_sanitization(black_box(to_params(sql.clone(), &tree, pos, &cache)))); + }); + + c.bench_function("mid sql, unadjusted", |b| { + let content = format!( + r#"select + n.oid :: int8 as "id!", + n.nspname as name, + u.rolname as "owner!" +from + pg_namespace n, + pg_r{} +where + n.nspowner = u.oid + and ( + pg_has_role(n.nspowner, 'USAGE') + or has_schema_privilege(n.oid, 'CREATE, USAGE') + ) + and not pg_catalog.starts_with(n.nspname, 'pg_temp_') + and not pg_catalog.starts_with(n.nspname, 'pg_toast_temp_');"#, + CURSOR_POS + ); + + let cache = SchemaCache::default(); + let (sql, pos) = sql_and_pos(content.as_str()); + let tree = get_tree(sql.as_str()); + + b.iter(|| benchmark_sanitization(black_box(to_params(sql.clone(), &tree, pos, &cache)))); + }); + + c.bench_function("large sql, unadjusted", |b| { + let content = format!( + r#"with + available_tables as ( + select + c.relname as table_name, + c.oid as table_oid, + c.relkind as class_kind, + n.nspname as schema_name + from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n on n.oid = c.relnamespace + where + -- r: normal tables + -- v: views + -- m: materialized views + -- f: foreign tables + -- p: partitioned tables + c.relkind in ('r', 'v', 'm', 'f', 'p') + ), + available_indexes as ( + select + unnest (ix.indkey) as attnum, + ix.indisprimary as is_primary, + ix.indisunique as is_unique, + ix.indrelid as table_oid + from + pg_catalog.pg_class c + join pg_catalog.pg_index ix on c.oid = ix.indexrelid + where + c.relkind = 'i' + ) +select + atts.attname as name, + ts.table_name, + ts.table_oid :: int8 as "table_oid!", + ts.class_kind :: char as "class_kind!", + ts.schema_name, + atts.atttypid :: int8 as "type_id!", + not atts.attnotnull as "is_nullable!", + nullif( + information_schema._pg_char_max_length (atts.atttypid, atts.atttypmod), + -1 + ) as varchar_length, + pg_get_expr (def.adbin, def.adrelid) as default_expr, + coalesce(ix.is_primary, false) as "is_primary_key!", + coalesce(ix.is_unique, false) as "is_unique!", + pg_catalog.col_description (ts.table_oid, atts.attnum) as comment +from + pg_catalog.pg_attribute atts + join available_tables ts on atts.attrelid = ts.table_oid + left join available_indexes ix on atts.attrelid = ix.table_oid + and atts.attnum = ix.attnum + left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid + and atts.attnum = def.adnum +where + -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s + atts.attnum >= 0 +order by + sch{} "#, + CURSOR_POS + ); + + let cache = SchemaCache::default(); + let (sql, pos) = sql_and_pos(content.as_str()); + let tree = get_tree(sql.as_str()); + + b.iter(|| benchmark_sanitization(black_box(to_params(sql.clone(), &tree, pos, &cache)))); + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/crates/pgt_completions/src/complete.rs b/crates/pgt_completions/src/complete.rs index ed51c653..ec1232a5 100644 --- a/crates/pgt_completions/src/complete.rs +++ b/crates/pgt_completions/src/complete.rs @@ -5,6 +5,7 @@ use crate::{ context::CompletionContext, item::CompletionItem, providers::{complete_columns, complete_functions, complete_tables}, + sanitization::SanitizedCompletionParams, }; pub const LIMIT: usize = 50; @@ -17,8 +18,14 @@ pub struct CompletionParams<'a> { pub tree: &'a tree_sitter::Tree, } +#[tracing::instrument(level = "debug", skip_all, fields( + text = params.text, + position = params.position.to_string() +))] pub fn complete(params: CompletionParams) -> Vec { - let ctx = CompletionContext::new(¶ms); + let sanitized_params = SanitizedCompletionParams::from(params); + + let ctx = CompletionContext::new(&sanitized_params); let mut builder = CompletionBuilder::new(); diff --git a/crates/pgt_completions/src/context.rs b/crates/pgt_completions/src/context.rs index 775b8870..a4578df8 100644 --- a/crates/pgt_completions/src/context.rs +++ b/crates/pgt_completions/src/context.rs @@ -6,7 +6,7 @@ use pgt_treesitter_queries::{ queries::{self, QueryResult}, }; -use crate::CompletionParams; +use crate::sanitization::SanitizedCompletionParams; #[derive(Debug, PartialEq, Eq)] pub enum ClauseType { @@ -17,6 +17,12 @@ pub enum ClauseType { Delete, } +#[derive(PartialEq, Eq, Debug)] +pub(crate) enum NodeText<'a> { + Replaced, + Original(&'a str), +} + impl TryFrom<&str> for ClauseType { type Error = String; @@ -49,7 +55,8 @@ impl TryFrom for ClauseType { } pub(crate) struct CompletionContext<'a> { - pub ts_node: Option>, + pub node_under_cursor: Option>, + pub tree: &'a tree_sitter::Tree, pub text: &'a str, pub schema_cache: &'a SchemaCache, @@ -64,13 +71,13 @@ pub(crate) struct CompletionContext<'a> { } impl<'a> CompletionContext<'a> { - pub fn new(params: &'a CompletionParams) -> Self { + pub fn new(params: &'a SanitizedCompletionParams) -> Self { let mut ctx = Self { - tree: params.tree, + tree: params.tree.as_ref(), text: ¶ms.text, schema_cache: params.schema, position: usize::from(params.position), - ts_node: None, + node_under_cursor: None, schema_name: None, wrapping_clause_type: None, wrapping_statement_range: None, @@ -85,12 +92,10 @@ impl<'a> CompletionContext<'a> { } fn gather_info_from_ts_queries(&mut self) { - let tree = self.tree; - let stmt_range = self.wrapping_statement_range.as_ref(); let sql = self.text; - let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + let mut executor = TreeSitterQueriesExecutor::new(self.tree.root_node(), sql); executor.add_query_results::(); @@ -117,9 +122,15 @@ impl<'a> CompletionContext<'a> { } } - pub fn get_ts_node_content(&self, ts_node: tree_sitter::Node<'a>) -> Option<&'a str> { + pub fn get_ts_node_content(&self, ts_node: tree_sitter::Node<'a>) -> Option> { let source = self.text; - ts_node.utf8_text(source.as_bytes()).ok() + ts_node.utf8_text(source.as_bytes()).ok().map(|txt| { + if SanitizedCompletionParams::is_sanitized_token(txt) { + NodeText::Replaced + } else { + NodeText::Original(txt) + } + }) } fn gather_tree_context(&mut self) { @@ -148,20 +159,20 @@ impl<'a> CompletionContext<'a> { fn gather_context_from_node( &mut self, mut cursor: tree_sitter::TreeCursor<'a>, - previous_node: tree_sitter::Node<'a>, + parent_node: tree_sitter::Node<'a>, ) { let current_node = cursor.node(); // prevent infinite recursion – this can happen if we only have a PROGRAM node - if current_node.kind() == previous_node.kind() { - self.ts_node = Some(current_node); + if current_node.kind() == parent_node.kind() { + self.node_under_cursor = Some(current_node); return; } - match previous_node.kind() { + match parent_node.kind() { "statement" | "subquery" => { self.wrapping_clause_type = current_node.kind().try_into().ok(); - self.wrapping_statement_range = Some(previous_node.range()); + self.wrapping_statement_range = Some(parent_node.range()); } "invocation" => self.is_invocation = true, @@ -170,11 +181,16 @@ impl<'a> CompletionContext<'a> { match current_node.kind() { "object_reference" => { - let txt = self.get_ts_node_content(current_node); - if let Some(txt) = txt { - let parts: Vec<&str> = txt.split('.').collect(); - if parts.len() == 2 { - self.schema_name = Some(parts[0].to_string()); + let content = self.get_ts_node_content(current_node); + if let Some(node_txt) = content { + match node_txt { + NodeText::Original(txt) => { + let parts: Vec<&str> = txt.split('.').collect(); + if parts.len() == 2 { + self.schema_name = Some(parts[0].to_string()); + } + } + NodeText::Replaced => {} } } } @@ -193,7 +209,14 @@ impl<'a> CompletionContext<'a> { // We have arrived at the leaf node if current_node.child_count() == 0 { - self.ts_node = Some(current_node); + if matches!( + self.get_ts_node_content(current_node).unwrap(), + NodeText::Replaced + ) { + self.node_under_cursor = None; + } else { + self.node_under_cursor = Some(current_node); + } return; } @@ -205,7 +228,8 @@ impl<'a> CompletionContext<'a> { #[cfg(test)] mod tests { use crate::{ - context::{ClauseType, CompletionContext}, + context::{ClauseType, CompletionContext, NodeText}, + sanitization::SanitizedCompletionParams, test_helper::{CURSOR_POS, get_text_and_position}, }; @@ -252,10 +276,10 @@ mod tests { let tree = get_tree(text.as_str()); - let params = crate::CompletionParams { + let params = SanitizedCompletionParams { position: (position as u32).into(), text, - tree: &tree, + tree: std::borrow::Cow::Owned(tree), schema: &pgt_schema_cache::SchemaCache::default(), }; @@ -284,10 +308,10 @@ mod tests { let (position, text) = get_text_and_position(query.as_str().into()); let tree = get_tree(text.as_str()); - let params = crate::CompletionParams { + let params = SanitizedCompletionParams { position: (position as u32).into(), text, - tree: &tree, + tree: std::borrow::Cow::Owned(tree), schema: &pgt_schema_cache::SchemaCache::default(), }; @@ -318,10 +342,10 @@ mod tests { let (position, text) = get_text_and_position(query.as_str().into()); let tree = get_tree(text.as_str()); - let params = crate::CompletionParams { + let params = SanitizedCompletionParams { position: (position as u32).into(), text, - tree: &tree, + tree: std::borrow::Cow::Owned(tree), schema: &pgt_schema_cache::SchemaCache::default(), }; @@ -343,18 +367,21 @@ mod tests { let tree = get_tree(text.as_str()); - let params = crate::CompletionParams { + let params = SanitizedCompletionParams { position: (position as u32).into(), text, - tree: &tree, + tree: std::borrow::Cow::Owned(tree), schema: &pgt_schema_cache::SchemaCache::default(), }; let ctx = CompletionContext::new(¶ms); - let node = ctx.ts_node.unwrap(); + let node = ctx.node_under_cursor.unwrap(); - assert_eq!(ctx.get_ts_node_content(node), Some("select")); + assert_eq!( + ctx.get_ts_node_content(node), + Some(NodeText::Original("select")) + ); assert_eq!( ctx.wrapping_clause_type, @@ -371,18 +398,21 @@ mod tests { let tree = get_tree(text.as_str()); - let params = crate::CompletionParams { + let params = SanitizedCompletionParams { position: (position as u32).into(), text, - tree: &tree, + tree: std::borrow::Cow::Owned(tree), schema: &pgt_schema_cache::SchemaCache::default(), }; let ctx = CompletionContext::new(¶ms); - let node = ctx.ts_node.unwrap(); + let node = ctx.node_under_cursor.unwrap(); - assert_eq!(ctx.get_ts_node_content(node), Some("from")); + assert_eq!( + ctx.get_ts_node_content(node), + Some(NodeText::Original("from")) + ); assert_eq!( ctx.wrapping_clause_type, Some(crate::context::ClauseType::From) @@ -397,18 +427,18 @@ mod tests { let tree = get_tree(text.as_str()); - let params = crate::CompletionParams { + let params = SanitizedCompletionParams { position: (position as u32).into(), text, - tree: &tree, + tree: std::borrow::Cow::Owned(tree), schema: &pgt_schema_cache::SchemaCache::default(), }; let ctx = CompletionContext::new(¶ms); - let node = ctx.ts_node.unwrap(); + let node = ctx.node_under_cursor.unwrap(); - assert_eq!(ctx.get_ts_node_content(node), Some("")); + assert_eq!(ctx.get_ts_node_content(node), Some(NodeText::Original(""))); assert_eq!(ctx.wrapping_clause_type, None); } @@ -422,18 +452,21 @@ mod tests { let tree = get_tree(text.as_str()); - let params = crate::CompletionParams { + let params = SanitizedCompletionParams { position: (position as u32).into(), text, - tree: &tree, + tree: std::borrow::Cow::Owned(tree), schema: &pgt_schema_cache::SchemaCache::default(), }; let ctx = CompletionContext::new(¶ms); - let node = ctx.ts_node.unwrap(); + let node = ctx.node_under_cursor.unwrap(); - assert_eq!(ctx.get_ts_node_content(node), Some("fro")); + assert_eq!( + ctx.get_ts_node_content(node), + Some(NodeText::Original("fro")) + ); assert_eq!(ctx.wrapping_clause_type, Some(ClauseType::Select)); } } diff --git a/crates/pgt_completions/src/lib.rs b/crates/pgt_completions/src/lib.rs index 62470ff4..f8ca1a55 100644 --- a/crates/pgt_completions/src/lib.rs +++ b/crates/pgt_completions/src/lib.rs @@ -4,9 +4,11 @@ mod context; mod item; mod providers; mod relevance; +mod sanitization; #[cfg(test)] mod test_helper; pub use complete::*; pub use item::*; +pub use sanitization::*; diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 3f1c5bb9..2898b63f 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -143,12 +143,86 @@ mod tests { let params = get_test_params(&tree, &cache, case.get_input_query()); let mut items = complete(params); - let _ = items.split_off(3); + let _ = items.split_off(6); - items.sort_by(|a, b| a.label.cmp(&b.label)); + #[derive(Eq, PartialEq, Debug)] + struct LabelAndDesc { + label: String, + desc: String, + } + + let labels: Vec = items + .into_iter() + .map(|c| LabelAndDesc { + label: c.label, + desc: c.description, + }) + .collect(); + + let expected = vec![ + ("name", "Table: public.users"), + ("narrator", "Table: public.audio_books"), + ("narrator_id", "Table: private.audio_books"), + ("name", "Schema: pg_catalog"), + ("nameconcatoid", "Schema: pg_catalog"), + ("nameeq", "Schema: pg_catalog"), + ] + .into_iter() + .map(|(label, schema)| LabelAndDesc { + label: label.into(), + desc: schema.into(), + }) + .collect::>(); + + assert_eq!(labels, expected); + } + + #[tokio::test] + async fn suggests_relevant_columns_without_letters() { + let setup = r#" + create table users ( + id serial primary key, + name text, + address text, + email text + ); + "#; + + let test_case = TestCase { + message: "suggests user created tables first", + query: format!(r#"select {} from users"#, CURSOR_POS), + label: "", + description: "", + }; + + let (tree, cache) = get_test_deps(setup, test_case.get_input_query()).await; + let params = get_test_params(&tree, &cache, test_case.get_input_query()); + let results = complete(params); - let labels: Vec = items.into_iter().map(|c| c.label).collect(); + let (first_four, _rest) = results.split_at(4); + + let has_column_in_first_four = |col: &'static str| { + first_four + .iter() + .find(|compl_item| compl_item.label.as_str() == col) + .is_some() + }; - assert_eq!(labels, vec!["name", "narrator", "narrator_id"]); + assert!( + has_column_in_first_four("id"), + "`id` not present in first four completion items." + ); + assert!( + has_column_in_first_four("name"), + "`name` not present in first four completion items." + ); + assert!( + has_column_in_first_four("address"), + "`address` not present in first four completion items." + ); + assert!( + has_column_in_first_four("email"), + "`email` not present in first four completion items." + ); } } diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index 6a1e00c9..2074a4f1 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -73,9 +73,9 @@ mod tests { "#; let test_cases = vec![ - (format!("select * from us{}", CURSOR_POS), "users"), - (format!("select * from em{}", CURSOR_POS), "emails"), - (format!("select * from {}", CURSOR_POS), "addresses"), + (format!("select * from u{}", CURSOR_POS), "users"), + (format!("select * from e{}", CURSOR_POS), "emails"), + (format!("select * from a{}", CURSOR_POS), "addresses"), ]; for (query, expected_label) in test_cases { diff --git a/crates/pgt_completions/src/relevance.rs b/crates/pgt_completions/src/relevance.rs index ffe6cb22..9650a94d 100644 --- a/crates/pgt_completions/src/relevance.rs +++ b/crates/pgt_completions/src/relevance.rs @@ -1,4 +1,4 @@ -use crate::context::{ClauseType, CompletionContext}; +use crate::context::{ClauseType, CompletionContext, NodeText}; #[derive(Debug)] pub(crate) enum CompletionRelevanceData<'a> { @@ -33,7 +33,6 @@ impl CompletionRelevance<'_> { self.check_is_user_defined(); self.check_matches_schema(ctx); self.check_matches_query_input(ctx); - self.check_if_catalog(ctx); self.check_is_invocation(ctx); self.check_matching_clause_type(ctx); self.check_relations_in_stmt(ctx); @@ -42,10 +41,16 @@ impl CompletionRelevance<'_> { } fn check_matches_query_input(&mut self, ctx: &CompletionContext) { - let node = ctx.ts_node.unwrap(); + let node = match ctx.node_under_cursor { + Some(node) => node, + None => return, + }; let content = match ctx.get_ts_node_content(node) { - Some(c) => c, + Some(c) => match c { + NodeText::Original(s) => s, + NodeText::Replaced => return, + }, None => return, }; @@ -61,7 +66,7 @@ impl CompletionRelevance<'_> { .try_into() .expect("The length of the input exceeds i32 capacity"); - self.score += len * 5; + self.score += len * 10; }; } @@ -135,14 +140,6 @@ impl CompletionRelevance<'_> { } } - fn check_if_catalog(&mut self, ctx: &CompletionContext) { - if ctx.schema_name.as_ref().is_some_and(|n| n == "pg_catalog") { - return; - } - - self.score -= 5; // unlikely that the user wants schema data - } - fn check_relations_in_stmt(&mut self, ctx: &CompletionContext) { match self.data { CompletionRelevanceData::Table(_) | CompletionRelevanceData::Function(_) => return, @@ -182,5 +179,11 @@ impl CompletionRelevance<'_> { if system_schemas.contains(&schema.as_str()) { self.score -= 10; } + + // "public" is the default postgres schema where users + // create objects. Prefer it by a slight bit. + if schema.as_str() == "public" { + self.score += 2; + } } } diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs new file mode 100644 index 00000000..5ad8ba0e --- /dev/null +++ b/crates/pgt_completions/src/sanitization.rs @@ -0,0 +1,283 @@ +use std::borrow::Cow; + +use pgt_text_size::TextSize; + +use crate::CompletionParams; + +pub(crate) struct SanitizedCompletionParams<'a> { + pub position: TextSize, + pub text: String, + pub schema: &'a pgt_schema_cache::SchemaCache, + pub tree: Cow<'a, tree_sitter::Tree>, +} + +pub fn benchmark_sanitization(params: CompletionParams) -> String { + let params: SanitizedCompletionParams = params.try_into().unwrap(); + params.text +} + +impl<'larger, 'smaller> From> for SanitizedCompletionParams<'smaller> +where + 'larger: 'smaller, +{ + fn from(params: CompletionParams<'larger>) -> Self { + if cursor_inbetween_nodes(params.tree, params.position) + || cursor_prepared_to_write_token_after_last_node(params.tree, params.position) + || cursor_before_semicolon(params.tree, params.position) + { + SanitizedCompletionParams::with_adjusted_sql(params) + } else { + SanitizedCompletionParams::unadjusted(params) + } + } +} + +static SANITIZED_TOKEN: &str = "REPLACED_TOKEN"; + +impl<'larger, 'smaller> SanitizedCompletionParams<'smaller> +where + 'larger: 'smaller, +{ + fn with_adjusted_sql(params: CompletionParams<'larger>) -> Self { + let cursor_pos: usize = params.position.into(); + let mut sql = String::new(); + + for (idx, c) in params.text.chars().enumerate() { + if idx == cursor_pos { + sql.push_str(SANITIZED_TOKEN); + sql.push(' '); + } + sql.push(c); + } + + let mut parser = tree_sitter::Parser::new(); + parser + .set_language(tree_sitter_sql::language()) + .expect("Error loading sql language"); + let tree = parser.parse(sql.clone(), None).unwrap(); + + Self { + position: params.position, + text: sql, + schema: params.schema, + tree: Cow::Owned(tree), + } + } + fn unadjusted(params: CompletionParams<'larger>) -> Self { + Self { + position: params.position, + text: params.text.clone(), + schema: params.schema, + tree: Cow::Borrowed(params.tree), + } + } + + pub fn is_sanitized_token(txt: &str) -> bool { + txt == SANITIZED_TOKEN + } +} + +/// Checks if the cursor is positioned inbetween two SQL nodes. +/// +/// ```sql +/// select| from users; -- cursor "touches" select node. returns false. +/// select |from users; -- cursor "touches" from node. returns false. +/// select | from users; -- cursor is between select and from nodes. returns true. +/// ``` +fn cursor_inbetween_nodes(tree: &tree_sitter::Tree, position: TextSize) -> bool { + let mut cursor = tree.walk(); + let mut leaf_node = tree.root_node(); + + let byte = position.into(); + + // if the cursor escapes the root node, it can't be between nodes. + if byte < leaf_node.start_byte() || byte >= leaf_node.end_byte() { + return false; + } + + /* + * Get closer and closer to the leaf node, until + * a) there is no more child *for the node* or + * b) there is no more child *under the cursor*. + */ + loop { + let child_idx = cursor.goto_first_child_for_byte(position.into()); + if child_idx.is_none() { + break; + } + leaf_node = cursor.node(); + } + + let cursor_on_leafnode = byte >= leaf_node.start_byte() && leaf_node.end_byte() >= byte; + + /* + * The cursor is inbetween nodes if it is not within the range + * of a leaf node. + */ + !cursor_on_leafnode +} + +/// Checks if the cursor is positioned after the last node, +/// ready to write the next token: +/// +/// ```sql +/// select * from | -- ready to write! +/// select * from| -- user still needs to type a space +/// select * from | -- too far off. +/// ``` +fn cursor_prepared_to_write_token_after_last_node( + tree: &tree_sitter::Tree, + position: TextSize, +) -> bool { + let cursor_pos: usize = position.into(); + cursor_pos == tree.root_node().end_byte() + 1 +} + +fn cursor_before_semicolon(tree: &tree_sitter::Tree, position: TextSize) -> bool { + let mut cursor = tree.walk(); + let mut leaf_node = tree.root_node(); + + let byte: usize = position.into(); + + // if the cursor escapes the root node, it can't be between nodes. + if byte < leaf_node.start_byte() || byte >= leaf_node.end_byte() { + return false; + } + + loop { + let child_idx = cursor.goto_first_child_for_byte(position.into()); + if child_idx.is_none() { + break; + } + leaf_node = cursor.node(); + } + + // The semicolon node is on the same level as the statement: + // + // program [0..26] + // statement [0..19] + // ; [25..26] + // + // However, if we search for position 21, we'll still land on the semi node. + // We must manually verify that the cursor is between the statement and the semi nodes. + + // if the last node is not a semi, the statement is not completed. + if leaf_node.kind() != ";" { + return false; + } + + // not okay to be on the semi. + if byte == leaf_node.start_byte() { + return false; + } + + leaf_node + .prev_named_sibling() + .map(|n| n.end_byte() < byte) + .unwrap_or(false) +} + +#[cfg(test)] +mod tests { + use pgt_text_size::TextSize; + + use crate::sanitization::{ + cursor_before_semicolon, cursor_inbetween_nodes, + cursor_prepared_to_write_token_after_last_node, + }; + + #[test] + fn test_cursor_inbetween_nodes() { + // note: two spaces between select and from. + let input = "select from users;"; + + let mut parser = tree_sitter::Parser::new(); + parser + .set_language(tree_sitter_sql::language()) + .expect("Error loading sql language"); + + let mut tree = parser.parse(input.to_string(), None).unwrap(); + + // select | from users; <-- just right, one space after select token, one space before from + assert!(cursor_inbetween_nodes(&mut tree, TextSize::new(7))); + + // select| from users; <-- still on select token + assert!(!cursor_inbetween_nodes(&mut tree, TextSize::new(6))); + + // select |from users; <-- already on from token + assert!(!cursor_inbetween_nodes(&mut tree, TextSize::new(8))); + + // select from users;| + assert!(!cursor_inbetween_nodes(&mut tree, TextSize::new(19))); + } + + #[test] + fn test_cursor_after_nodes() { + let input = "select * from"; + + let mut parser = tree_sitter::Parser::new(); + parser + .set_language(tree_sitter_sql::language()) + .expect("Error loading sql language"); + + let mut tree = parser.parse(input.to_string(), None).unwrap(); + + // select * from| <-- still on previous token + assert!(!cursor_prepared_to_write_token_after_last_node( + &mut tree, + TextSize::new(13) + )); + + // select * from | <-- too far off, two spaces afterward + assert!(!cursor_prepared_to_write_token_after_last_node( + &mut tree, + TextSize::new(15) + )); + + // select * |from <-- it's within + assert!(!cursor_prepared_to_write_token_after_last_node( + &mut tree, + TextSize::new(9) + )); + + // select * from | <-- just right + assert!(cursor_prepared_to_write_token_after_last_node( + &mut tree, + TextSize::new(14) + )); + } + + #[test] + fn test_cursor_before_semicolon() { + // Idx "13" is the exlusive end of `select * from` (first space after from) + // Idx "18" is right where the semi is + let input = "select * from ;"; + + let mut parser = tree_sitter::Parser::new(); + parser + .set_language(tree_sitter_sql::language()) + .expect("Error loading sql language"); + + let mut tree = parser.parse(input.to_string(), None).unwrap(); + + // select * from ;| <-- it's after the statement + assert!(!cursor_before_semicolon(&mut tree, TextSize::new(19))); + + // select * from| ; <-- still touches the from + assert!(!cursor_before_semicolon(&mut tree, TextSize::new(13))); + + // not okay to be ON the semi. + // select * from |; + assert!(!cursor_before_semicolon(&mut tree, TextSize::new(18))); + + // anything is fine here + // select * from | ; + // select * from | ; + // select * from | ; + // select * from |; + assert!(cursor_before_semicolon(&mut tree, TextSize::new(14))); + assert!(cursor_before_semicolon(&mut tree, TextSize::new(15))); + assert!(cursor_before_semicolon(&mut tree, TextSize::new(16))); + assert!(cursor_before_semicolon(&mut tree, TextSize::new(17))); + } +} diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index 58e9baf7..4339688e 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -15,7 +15,6 @@ impl From<&str> for InputQuery { fn from(value: &str) -> Self { let position = value .find(CURSOR_POS) - .map(|p| p.saturating_sub(1)) .expect("Insert Cursor Position into your Query."); InputQuery { @@ -74,3 +73,43 @@ pub(crate) fn get_test_params<'a>( text, } } + +#[cfg(test)] +mod tests { + use crate::test_helper::CURSOR_POS; + + use super::InputQuery; + + #[test] + fn input_query_should_extract_correct_position() { + struct TestCase { + query: String, + expected_pos: usize, + expected_sql_len: usize, + } + + let cases = vec![ + TestCase { + query: format!("select * from{}", CURSOR_POS), + expected_pos: 13, + expected_sql_len: 13, + }, + TestCase { + query: format!("{}select * from", CURSOR_POS), + expected_pos: 0, + expected_sql_len: 13, + }, + TestCase { + query: format!("select {} from", CURSOR_POS), + expected_pos: 7, + expected_sql_len: 12, + }, + ]; + + for case in cases { + let query = InputQuery::from(case.query.as_str()); + assert_eq!(query.position, case.expected_pos); + assert_eq!(query.sql.len(), case.expected_sql_len); + } + } +} diff --git a/crates/pgt_text_size/src/range.rs b/crates/pgt_text_size/src/range.rs index 95b0db58..3cfc3c96 100644 --- a/crates/pgt_text_size/src/range.rs +++ b/crates/pgt_text_size/src/range.rs @@ -281,6 +281,24 @@ impl TextRange { }) } + /// Expand the range's end by the given offset. + /// + /// # Examples + /// + /// ```rust + /// # use pgt_text_size::*; + /// assert_eq!( + /// TextRange::new(2.into(), 4.into()).checked_expand_end(16.into()).unwrap(), + /// TextRange::new(2.into(), 20.into()), + /// ); + /// ``` + #[inline] + pub fn checked_expand_end(self, offset: TextSize) -> Option { + Some(TextRange { + start: self.start, + end: self.end.checked_add(offset)?, + }) + } /// Subtract an offset from this range. /// /// Note that this is not appropriate for changing where a `TextRange` is diff --git a/crates/pgt_workspace/src/features/completions.rs b/crates/pgt_workspace/src/features/completions.rs index 8fb13313..4a5c5e29 100644 --- a/crates/pgt_workspace/src/features/completions.rs +++ b/crates/pgt_workspace/src/features/completions.rs @@ -1,6 +1,10 @@ +use std::sync::Arc; + use pgt_completions::CompletionItem; use pgt_fs::PgTPath; -use pgt_text_size::TextSize; +use pgt_text_size::{TextRange, TextSize}; + +use crate::workspace::{GetCompletionsFilter, GetCompletionsMapper, ParsedDocument, StatementId}; #[derive(Debug, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] @@ -24,3 +28,167 @@ impl IntoIterator for CompletionsResult { self.items.into_iter() } } + +pub(crate) fn get_statement_for_completions<'a>( + doc: &'a ParsedDocument, + position: TextSize, +) -> Option<(StatementId, TextRange, String, Arc)> { + let count = doc.count(); + // no arms no cookies + if count == 0 { + return None; + } + + let mut eligible_statements = doc.iter_with_filter( + GetCompletionsMapper, + GetCompletionsFilter { + cursor_position: position, + }, + ); + + if count == 1 { + eligible_statements.next() + } else { + let mut prev_stmt = None; + + for current_stmt in eligible_statements { + /* + * If we have multiple statements, we want to make sure that we do not overlap + * with the next one. + * + * select 1 |select 1; + */ + if prev_stmt.is_some_and(|_| current_stmt.1.contains(position)) { + return None; + } + prev_stmt = Some(current_stmt) + } + + prev_stmt + } +} + +#[cfg(test)] +mod tests { + use pgt_fs::PgTPath; + use pgt_text_size::TextSize; + + use crate::workspace::ParsedDocument; + + use super::get_statement_for_completions; + + static CURSOR_POSITION: &str = "€"; + + fn get_doc_and_pos(sql: &str) -> (ParsedDocument, TextSize) { + let pos = sql + .find(CURSOR_POSITION) + .expect("Please add cursor position to test sql"); + + let pos: u32 = pos.try_into().unwrap(); + + ( + ParsedDocument::new( + PgTPath::new("test.sql"), + sql.replace(CURSOR_POSITION, "").into(), + 5, + ), + TextSize::new(pos), + ) + } + + #[test] + fn finds_matching_statement() { + let sql = format!( + r#" + select * from users; + + update {}users set email = 'myemail@com'; + + select 1; + "#, + CURSOR_POSITION + ); + + let (doc, position) = get_doc_and_pos(sql.as_str()); + + let (_, _, text, _) = + get_statement_for_completions(&doc, position).expect("Expected Statement"); + + assert_eq!(text, "update users set email = 'myemail@com';") + } + + #[test] + fn does_not_break_when_no_statements_exist() { + let sql = format!("{}", CURSOR_POSITION); + + let (doc, position) = get_doc_and_pos(sql.as_str()); + + assert!(matches!( + get_statement_for_completions(&doc, position), + None + )); + } + + #[test] + fn does_not_return_overlapping_statements_if_too_close() { + let sql = format!("select * from {}select 1;", CURSOR_POSITION); + + let (doc, position) = get_doc_and_pos(sql.as_str()); + + // make sure these are parsed as two + assert_eq!(doc.count(), 2); + + assert!(matches!( + get_statement_for_completions(&doc, position), + None + )); + } + + #[test] + fn is_fine_with_spaces() { + let sql = format!("select * from {} ;", CURSOR_POSITION); + + let (doc, position) = get_doc_and_pos(sql.as_str()); + + let (_, _, text, _) = + get_statement_for_completions(&doc, position).expect("Expected Statement"); + + assert_eq!(text, "select * from ;") + } + + #[test] + fn considers_offset() { + let sql = format!("select * from {}", CURSOR_POSITION); + + let (doc, position) = get_doc_and_pos(sql.as_str()); + + let (_, _, text, _) = + get_statement_for_completions(&doc, position).expect("Expected Statement"); + + assert_eq!(text, "select * from") + } + + #[test] + fn does_not_consider_too_far_offset() { + let sql = format!("select * from {}", CURSOR_POSITION); + + let (doc, position) = get_doc_and_pos(sql.as_str()); + + assert!(matches!( + get_statement_for_completions(&doc, position), + None + )); + } + + #[test] + fn does_not_consider_offset_if_statement_terminated_by_semi() { + let sql = format!("select * from users;{}", CURSOR_POSITION); + + let (doc, position) = get_doc_and_pos(sql.as_str()); + + assert!(matches!( + get_statement_for_completions(&doc, position), + None + )); + } +} diff --git a/crates/pgt_workspace/src/workspace.rs b/crates/pgt_workspace/src/workspace.rs index 681ab95f..54f7200b 100644 --- a/crates/pgt_workspace/src/workspace.rs +++ b/crates/pgt_workspace/src/workspace.rs @@ -22,6 +22,7 @@ mod client; mod server; pub use server::StatementId; +pub(crate) use server::parsed_document::*; #[derive(Debug, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 5e33bc27..2ad119f5 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -8,7 +8,7 @@ use document::Document; use futures::{StreamExt, stream}; use parsed_document::{ AsyncDiagnosticsMapper, CursorPositionFilter, DefaultMapper, ExecuteStatementMapper, - GetCompletionsMapper, ParsedDocument, SyncDiagnosticsMapper, + ParsedDocument, SyncDiagnosticsMapper, }; use pgt_analyse::{AnalyserOptions, AnalysisFilter}; use pgt_analyser::{Analyser, AnalyserConfig, AnalyserContext}; @@ -29,7 +29,7 @@ use crate::{ self, CodeAction, CodeActionKind, CodeActionsResult, CommandAction, CommandActionCategory, ExecuteStatementParams, ExecuteStatementResult, }, - completions::{CompletionsResult, GetCompletionsParams}, + completions::{CompletionsResult, GetCompletionsParams, get_statement_for_completions}, diagnostics::{PullDiagnosticsParams, PullDiagnosticsResult}, }, settings::{Settings, SettingsHandle, SettingsHandleMut}, @@ -47,9 +47,9 @@ mod annotation; mod async_helper; mod change; mod db_connection; -mod document; +pub(crate) mod document; mod migration; -mod parsed_document; +pub(crate) mod parsed_document; mod pg_query; mod schema_cache_manager; mod sql_function; @@ -470,37 +470,36 @@ impl Workspace for WorkspaceServer { &self, params: GetCompletionsParams, ) -> Result { - let parser = self + let parsed_doc = self .parsed_documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; let pool = match self.connection.read().unwrap().get_pool() { Some(pool) => pool, - None => return Ok(CompletionsResult::default()), + None => { + tracing::debug!("No connection to database. Skipping completions."); + return Ok(CompletionsResult::default()); + } }; let schema_cache = self.schema_cache.load(pool)?; - let items = parser - .iter_with_filter( - GetCompletionsMapper, - CursorPositionFilter::new(params.position), - ) - .flat_map(|(_id, range, content, cst)| { - // `offset` is the position in the document, - // but we need the position within the *statement*. + match get_statement_for_completions(&parsed_doc, params.position) { + None => Ok(CompletionsResult::default()), + Some((_id, range, content, cst)) => { let position = params.position - range.start(); - pgt_completions::complete(pgt_completions::CompletionParams { + + let items = pgt_completions::complete(pgt_completions::CompletionParams { position, schema: schema_cache.as_ref(), tree: &cst, text: content, - }) - }) - .collect(); + }); - Ok(CompletionsResult { items }) + Ok(CompletionsResult { items }) + } + } } } diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs index 2b64d24a..dafd5697 100644 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ b/crates/pgt_workspace/src/workspace/server/parsed_document.rs @@ -136,7 +136,7 @@ pub trait StatementMapper<'a> { fn map( &self, - parser: &'a ParsedDocument, + parsed: &'a ParsedDocument, id: StatementId, range: TextRange, content: &str, @@ -144,7 +144,7 @@ pub trait StatementMapper<'a> { } pub trait StatementFilter<'a> { - fn predicate(&self, id: &StatementId, range: &TextRange) -> bool; + fn predicate(&self, id: &StatementId, range: &TextRange, content: &str) -> bool; } pub struct ParseIterator<'a, M, F> { @@ -177,7 +177,7 @@ where fn next(&mut self) -> Option { // First check if we have any pending sub-statements to process if let Some((id, range, content)) = self.pending_sub_statements.pop() { - if self.filter.predicate(&id, &range) { + if self.filter.predicate(&id, &range, content.as_str()) { return Some(self.mapper.map(self.parser, id, range, &content)); } // If the sub-statement doesn't pass the filter, continue to the next item @@ -213,7 +213,7 @@ where } // Return the current statement if it passes the filter - if self.filter.predicate(&root_id, &range) { + if self.filter.predicate(&root_id, &range, content) { return Some(self.mapper.map(self.parser, root_id, range, content)); } @@ -335,14 +335,40 @@ impl<'a> StatementMapper<'a> for GetCompletionsMapper { range: TextRange, content: &str, ) -> Self::Output { - let cst_result = parser.cst_db.get_or_cache_tree(&id, content); - (id, range, content.to_string(), cst_result) + let tree = parser.cst_db.get_or_cache_tree(&id, content); + (id, range, content.into(), tree) + } +} + +/* + * We allow an offset of two for the statement: + * + * select * from | <-- we want to suggest items for the next token. + * + * However, if the current statement is terminated by a semicolon, we don't apply any + * offset. + * + * select * from users; | <-- no autocompletions here. + */ +pub struct GetCompletionsFilter { + pub cursor_position: TextSize, +} +impl<'a> StatementFilter<'a> for GetCompletionsFilter { + fn predicate(&self, _id: &StatementId, range: &TextRange, content: &str) -> bool { + let is_terminated_by_semi = content.chars().last().is_some_and(|c| c == ';'); + + let measuring_range = if is_terminated_by_semi { + *range + } else { + range.checked_expand_end(2.into()).unwrap_or(*range) + }; + measuring_range.contains(self.cursor_position) } } pub struct NoFilter; impl<'a> StatementFilter<'a> for NoFilter { - fn predicate(&self, _id: &StatementId, _range: &TextRange) -> bool { + fn predicate(&self, _id: &StatementId, _range: &TextRange, _content: &str) -> bool { true } } @@ -358,7 +384,7 @@ impl CursorPositionFilter { } impl<'a> StatementFilter<'a> for CursorPositionFilter { - fn predicate(&self, _id: &StatementId, range: &TextRange) -> bool { + fn predicate(&self, _id: &StatementId, range: &TextRange, _content: &str) -> bool { range.contains(self.pos) } } @@ -374,7 +400,7 @@ impl IdFilter { } impl<'a> StatementFilter<'a> for IdFilter { - fn predicate(&self, id: &StatementId, _range: &TextRange) -> bool { + fn predicate(&self, id: &StatementId, _range: &TextRange, _content: &str) -> bool { *id == self.id } } From 777c80b3a30c5c999fdf53de477a842e110b038e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Tue, 15 Apr 2025 08:51:20 +0200 Subject: [PATCH 015/114] fix: comments only (#337) --- .../src/workspace/server/change.rs | 110 ++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index 69c68189..7dcd1a55 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -137,6 +137,18 @@ impl Document { diff_size: TextSize, is_addition: bool, ) -> Affected { + // special case: no previous statements -> always full range + if self.positions.is_empty() { + let full_range = TextRange::new(0.into(), content_size); + return Affected { + affected_range: full_range, + affected_indices: Vec::new(), + prev_index: None, + next_index: None, + full_affected_range: full_range, + }; + } + let mut start = change_range.start(); let mut end = change_range.end().min(content_size); @@ -1422,4 +1434,102 @@ mod tests { assert_document_integrity(&doc); } + + #[test] + fn test_comments_only() { + let path = PgTPath::new("test.sql"); + let initial_content = "-- atlas:import async_trigger/setup.sql\n-- atlas:import public/setup.sql\n-- atlas:import private/setup.sql\n-- atlas:import api/setup.sql\n-- atlas:import async_trigger/index.sql\n-- atlas:import public/enums/index.sql\n-- atlas:import public/types/index.sql\n-- atlas:import private/enums/index.sql\n-- atlas:import private/functions/index.sql\n-- atlas:import public/tables/index.sql\n-- atlas:import public/index.sql\n-- atlas:import private/index.sql\n-- atlas:import api/index.sql\n\n\n\n"; + + // Create a new document + let mut doc = Document::new(initial_content.to_string(), 0); + + // First change: Delete some text at line 2, character 24-29 + let change1 = ChangeFileParams { + path: path.clone(), + version: 3, + changes: vec![ChangeParams { + text: "".to_string(), + range: Some(TextRange::new( + // Calculate the correct position based on the content + // Line 2, character 24 + 98.into(), + // Line 2, character 29 + 103.into(), + )), + }], + }; + + let _changes1 = doc.apply_file_change(&change1); + + // Second change: Add 't' at line 2, character 24 + let change2 = ChangeFileParams { + path: path.clone(), + version: 4, + changes: vec![ChangeParams { + text: "t".to_string(), + range: Some(TextRange::new(98.into(), 98.into())), + }], + }; + + let _changes2 = doc.apply_file_change(&change2); + + assert_eq!( + doc.positions.len(), + 0, + "Document should have no statement after adding 't'" + ); + + // Third change: Add 'e' at line 2, character 25 + let change3 = ChangeFileParams { + path: path.clone(), + version: 5, + changes: vec![ChangeParams { + text: "e".to_string(), + range: Some(TextRange::new(99.into(), 99.into())), + }], + }; + + let _changes3 = doc.apply_file_change(&change3); + assert_eq!( + doc.positions.len(), + 0, + "Document should still have no statement" + ); + + // Fourth change: Add 's' at line 2, character 26 + let change4 = ChangeFileParams { + path: path.clone(), + version: 6, + changes: vec![ChangeParams { + text: "s".to_string(), + range: Some(TextRange::new(100.into(), 100.into())), + }], + }; + + let _changes4 = doc.apply_file_change(&change4); + assert_eq!( + doc.positions.len(), + 0, + "Document should still have no statement" + ); + + // Fifth change: Add 't' at line 2, character 27 + let change5 = ChangeFileParams { + path: path.clone(), + version: 7, + changes: vec![ChangeParams { + text: "t".to_string(), + range: Some(TextRange::new(101.into(), 101.into())), + }], + }; + + let _changes5 = doc.apply_file_change(&change5); + assert_eq!( + doc.positions.len(), + 0, + "Document should still have no statement" + ); + + assert_document_integrity(&doc); + } } From 8519fc6794cebc2b202d06bdf6e48e429816e68f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Tue, 15 Apr 2025 08:51:35 +0200 Subject: [PATCH 016/114] fix: release changelog (#338) --- .github/workflows/release.yml | 2 +- cliff.toml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f5d46604..7940a60e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -126,7 +126,7 @@ jobs: id: create_changelog with: config: cliff.toml - args: --bump + args: --bump --latest env: GITHUB_REPO: ${{ github.repository }} diff --git a/cliff.toml b/cliff.toml index 8656d82b..3e040a17 100644 --- a/cliff.toml +++ b/cliff.toml @@ -8,14 +8,14 @@ [changelog] # template for the changelog header header = """ -# Changelog\n -All notable changes to this project will be documented in this file.\n +# Postgres Language Server\n +A collection of language tools and a Language Server Protocol (LSP) implementation for Postgres, focusing on developer experience and reliable SQL tooling.\n """ # template for the changelog body # https://keats.github.io/tera/docs/#introduction body = """ {% if version %}\ - ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} + ## {{ version | trim_start_matches(pat="v") }} {% else %}\ ## [unreleased] {% endif %}\ From 9bc27b7039c1bce209f2c8ec27a74d85ad2e0d34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Tue, 15 Apr 2025 09:12:49 +0200 Subject: [PATCH 017/114] fix: dont point to 404 in config schema (#339) --- crates/pgt_workspace/src/configuration.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/crates/pgt_workspace/src/configuration.rs b/crates/pgt_workspace/src/configuration.rs index 536d5b9d..88c04eec 100644 --- a/crates/pgt_workspace/src/configuration.rs +++ b/crates/pgt_workspace/src/configuration.rs @@ -170,12 +170,13 @@ pub fn create_config( })?; // we now check if postgrestools is installed inside `node_modules` and if so, we use the schema from there - if VERSION == "0.0.0" { - let schema_path = Path::new("./node_modules/@postgrestools/postgrestools/schema.json"); - let options = OpenOptions::default().read(true); - if fs.open_with_options(schema_path, options).is_ok() { - configuration.schema = schema_path.to_str().map(String::from); - } + let node_schema_path = Path::new("./node_modules/@postgrestools/postgrestools/schema.json"); + let options = OpenOptions::default().read(true); + if fs.open_with_options(node_schema_path, options).is_ok() { + configuration.schema = node_schema_path.to_str().map(String::from); + } else if VERSION == "0.0.0" { + // VERSION is 0.0.0 if it has not been explicitly set (e.g local dev, as fallback) + configuration.schema = Some("https://pgtools.dev/schemas/latest/schema.json".to_string()); } else { configuration.schema = Some(format!("https://pgtools.dev/schemas/{VERSION}/schema.json")); } From 37137bca3578d506b4d821760538c92b976716e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Tue, 15 Apr 2025 09:37:36 +0200 Subject: [PATCH 018/114] fix: plpglsql cmds (#336) --- crates/pgt_statement_splitter/src/lib.rs | 18 ++++++ crates/pgt_statement_splitter/src/parser.rs | 3 + .../src/parser/common.rs | 56 ++++++++++++++++++- 3 files changed, 76 insertions(+), 1 deletion(-) diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index 06440da1..3fa67213 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -136,6 +136,24 @@ mod tests { )]); } + #[test] + fn command_between_not_starting() { + Tester::from("select 1\n \\com test\nselect 2") + .expect_statements(vec!["select 1", "select 2"]); + } + + #[test] + fn command_between() { + Tester::from("select 1\n\\com test\nselect 2") + .expect_statements(vec!["select 1", "select 2"]); + } + + #[test] + fn command_standalone() { + Tester::from("select 1\n\n\\com test\n\nselect 2") + .expect_statements(vec!["select 1", "select 2"]); + } + #[test] fn insert_with_select() { Tester::from("\ninsert into tbl (id) select 1\n\nselect 3") diff --git a/crates/pgt_statement_splitter/src/parser.rs b/crates/pgt_statement_splitter/src/parser.rs index c94fe245..183abd97 100644 --- a/crates/pgt_statement_splitter/src/parser.rs +++ b/crates/pgt_statement_splitter/src/parser.rs @@ -113,6 +113,9 @@ impl Parser { } } + /// Advances the parser to the next relevant token and returns it. + /// + /// NOTE: This will skip irrelevant tokens. fn advance(&mut self) -> &Token { // can't reuse any `find_next_relevant` logic because of Mr. Borrow Checker let (pos, token) = self diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index 1a355f08..a353791b 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -1,4 +1,4 @@ -use pgt_lexer::{SyntaxKind, Token, TokenType}; +use pgt_lexer::{SyntaxKind, Token, TokenType, WHITESPACE_TOKENS}; use super::{ Parser, @@ -24,6 +24,12 @@ pub fn source(p: &mut Parser) { } => { p.advance(); } + Token { + kind: SyntaxKind::Ascii92, + .. + } => { + plpgsql_command(p); + } _ => { statement(p); } @@ -87,6 +93,24 @@ pub(crate) fn parenthesis(p: &mut Parser) { } } +pub(crate) fn plpgsql_command(p: &mut Parser) { + p.expect(SyntaxKind::Ascii92); + + loop { + match p.current().kind { + SyntaxKind::Newline => { + p.advance(); + break; + } + _ => { + // advance the parser to the next token without ignoring irrelevant tokens + // we would skip a newline with `advance()` + p.current_pos += 1; + } + } + } +} + pub(crate) fn case(p: &mut Parser) { p.expect(SyntaxKind::Case); @@ -125,6 +149,36 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { } => { case(p); } + Token { + kind: SyntaxKind::Ascii92, + .. + } => { + // pgsql commands e.g. + // + // ``` + // \if test + // ``` + // + // we wait for "\" and check if the previous token is a newline + + // newline is a whitespace, but we do not want to ignore it here + let irrelevant = WHITESPACE_TOKENS + .iter() + .filter(|t| **t != SyntaxKind::Newline) + .collect::>(); + + // go back from the current position without ignoring irrelevant tokens + if p.tokens + .iter() + .take(p.current_pos) + .rev() + .find(|t| !irrelevant.contains(&&t.kind)) + .is_some_and(|t| t.kind == SyntaxKind::Newline) + { + break; + } + p.advance(); + } Token { kind: SyntaxKind::Ascii40, .. From c6001fe21e50db40ee5fe9ee145c5fe4aea0310b Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 15 Apr 2025 09:37:57 +0200 Subject: [PATCH 019/114] chore: add double newline comment (#341) --- crates/pgt_statement_splitter/src/parser.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/pgt_statement_splitter/src/parser.rs b/crates/pgt_statement_splitter/src/parser.rs index 183abd97..241d0c70 100644 --- a/crates/pgt_statement_splitter/src/parser.rs +++ b/crates/pgt_statement_splitter/src/parser.rs @@ -165,18 +165,20 @@ impl Parser { } #[cfg(windows)] -/// Returns true if the token is relevant for the paring process +/// Returns true if the token is relevant for the parsing process /// /// On windows, a newline is represented by `\r\n` which is two characters. fn is_irrelevant_token(t: &Token) -> bool { WHITESPACE_TOKENS.contains(&t.kind) + // double new lines are relevant, single ones are not && (t.kind != SyntaxKind::Newline || t.text == "\r\n" || t.text.chars().count() == 1) } #[cfg(not(windows))] -/// Returns true if the token is relevant for the paring process +/// Returns true if the token is relevant for the parsing process fn is_irrelevant_token(t: &Token) -> bool { WHITESPACE_TOKENS.contains(&t.kind) + // double new lines are relevant, single ones are not && (t.kind != SyntaxKind::Newline || t.text.chars().count() == 1) } From 8ae75b3998a6c022b24dfdca9ed272b921ab1250 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 15 Apr 2025 11:10:16 +0200 Subject: [PATCH 020/114] feat: autocomplete schemas (#340) --- .../pgt_completions/benches/sanitization.rs | 2 +- crates/pgt_completions/src/complete.rs | 3 +- crates/pgt_completions/src/item.rs | 1 + crates/pgt_completions/src/providers/mod.rs | 2 + .../pgt_completions/src/providers/schemas.rs | 70 +++++++++++++++++++ crates/pgt_completions/src/relevance.rs | 13 ++-- crates/pgt_completions/src/sanitization.rs | 26 +++++-- crates/pgt_completions/src/test_helper.rs | 2 +- crates/pgt_lsp/src/handlers/completions.rs | 1 + crates/pgt_schema_cache/src/lib.rs | 1 + crates/pgt_schema_cache/src/schemas.rs | 6 +- 11 files changed, 111 insertions(+), 16 deletions(-) create mode 100644 crates/pgt_completions/src/providers/schemas.rs diff --git a/crates/pgt_completions/benches/sanitization.rs b/crates/pgt_completions/benches/sanitization.rs index c21538de..1e5333ff 100644 --- a/crates/pgt_completions/benches/sanitization.rs +++ b/crates/pgt_completions/benches/sanitization.rs @@ -27,7 +27,7 @@ fn to_params<'a>( position: TextSize::new(pos), schema: &cache, text, - tree: Some(tree), + tree: tree, } } diff --git a/crates/pgt_completions/src/complete.rs b/crates/pgt_completions/src/complete.rs index ec1232a5..89d25738 100644 --- a/crates/pgt_completions/src/complete.rs +++ b/crates/pgt_completions/src/complete.rs @@ -4,7 +4,7 @@ use crate::{ builder::CompletionBuilder, context::CompletionContext, item::CompletionItem, - providers::{complete_columns, complete_functions, complete_tables}, + providers::{complete_columns, complete_functions, complete_schemas, complete_tables}, sanitization::SanitizedCompletionParams, }; @@ -32,6 +32,7 @@ pub fn complete(params: CompletionParams) -> Vec { complete_tables(&ctx, &mut builder); complete_functions(&ctx, &mut builder); complete_columns(&ctx, &mut builder); + complete_schemas(&ctx, &mut builder); builder.finish() } diff --git a/crates/pgt_completions/src/item.rs b/crates/pgt_completions/src/item.rs index 8f0e3b95..1f306d78 100644 --- a/crates/pgt_completions/src/item.rs +++ b/crates/pgt_completions/src/item.rs @@ -7,6 +7,7 @@ pub enum CompletionItemKind { Table, Function, Column, + Schema, } #[derive(Debug, Serialize, Deserialize)] diff --git a/crates/pgt_completions/src/providers/mod.rs b/crates/pgt_completions/src/providers/mod.rs index 93055129..d760fea0 100644 --- a/crates/pgt_completions/src/providers/mod.rs +++ b/crates/pgt_completions/src/providers/mod.rs @@ -1,7 +1,9 @@ mod columns; mod functions; +mod schemas; mod tables; pub use columns::*; pub use functions::*; +pub use schemas::*; pub use tables::*; diff --git a/crates/pgt_completions/src/providers/schemas.rs b/crates/pgt_completions/src/providers/schemas.rs new file mode 100644 index 00000000..2f41e8c3 --- /dev/null +++ b/crates/pgt_completions/src/providers/schemas.rs @@ -0,0 +1,70 @@ +use crate::{ + CompletionItem, builder::CompletionBuilder, context::CompletionContext, + relevance::CompletionRelevanceData, +}; + +pub fn complete_schemas(ctx: &CompletionContext, builder: &mut CompletionBuilder) { + let available_schemas = &ctx.schema_cache.schemas; + + for schema in available_schemas { + let relevance = CompletionRelevanceData::Schema(&schema); + + let item = CompletionItem { + label: schema.name.clone(), + description: "Schema".into(), + preselected: false, + kind: crate::CompletionItemKind::Schema, + score: relevance.get_score(ctx), + }; + + builder.add_item(item); + } +} + +#[cfg(test)] +mod tests { + + use crate::{ + CompletionItemKind, complete, + test_helper::{CURSOR_POS, get_test_deps, get_test_params}, + }; + + #[tokio::test] + async fn autocompletes_schemas() { + let setup = r#" + create schema private; + create schema auth; + create schema internal; + + -- add a table to compete against schemas + create table users ( + id serial primary key, + name text, + password text + ); + "#; + + let query = format!("select * from {}", CURSOR_POS); + + let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let params = get_test_params(&tree, &cache, query.as_str().into()); + let items = complete(params); + + assert!(!items.is_empty()); + + assert_eq!( + items + .into_iter() + .take(5) + .map(|i| (i.label, i.kind)) + .collect::>(), + vec![ + ("public".to_string(), CompletionItemKind::Schema), + ("auth".to_string(), CompletionItemKind::Schema), + ("internal".to_string(), CompletionItemKind::Schema), + ("private".to_string(), CompletionItemKind::Schema), + ("users".to_string(), CompletionItemKind::Table), + ] + ); + } +} diff --git a/crates/pgt_completions/src/relevance.rs b/crates/pgt_completions/src/relevance.rs index 9650a94d..2abb9f2c 100644 --- a/crates/pgt_completions/src/relevance.rs +++ b/crates/pgt_completions/src/relevance.rs @@ -5,6 +5,7 @@ pub(crate) enum CompletionRelevanceData<'a> { Table(&'a pgt_schema_cache::Table), Function(&'a pgt_schema_cache::Function), Column(&'a pgt_schema_cache::Column), + Schema(&'a pgt_schema_cache::Schema), } impl CompletionRelevanceData<'_> { @@ -58,6 +59,7 @@ impl CompletionRelevance<'_> { CompletionRelevanceData::Function(f) => f.name.as_str(), CompletionRelevanceData::Table(t) => t.name.as_str(), CompletionRelevanceData::Column(c) => c.name.as_str(), + CompletionRelevanceData::Schema(s) => s.name.as_str(), }; if name.starts_with(content) { @@ -97,6 +99,10 @@ impl CompletionRelevance<'_> { ClauseType::Where => 10, _ => -15, }, + CompletionRelevanceData::Schema(_) => match clause_type { + ClauseType::From => 10, + _ => -50, + }, } } @@ -129,6 +135,7 @@ impl CompletionRelevance<'_> { CompletionRelevanceData::Function(f) => f.schema.as_str(), CompletionRelevanceData::Table(t) => t.schema.as_str(), CompletionRelevanceData::Column(c) => c.schema_name.as_str(), + CompletionRelevanceData::Schema(s) => s.name.as_str(), } } @@ -168,11 +175,7 @@ impl CompletionRelevance<'_> { } fn check_is_user_defined(&mut self) { - let schema = match self.data { - CompletionRelevanceData::Column(c) => &c.schema_name, - CompletionRelevanceData::Function(f) => &f.schema, - CompletionRelevanceData::Table(t) => &t.schema, - }; + let schema = self.get_schema_name().to_string(); let system_schemas = ["pg_catalog", "information_schema", "pg_toast"]; diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index 5ad8ba0e..dc093847 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -42,12 +42,28 @@ where let cursor_pos: usize = params.position.into(); let mut sql = String::new(); - for (idx, c) in params.text.chars().enumerate() { - if idx == cursor_pos { - sql.push_str(SANITIZED_TOKEN); - sql.push(' '); + let mut sql_iter = params.text.chars(); + + for idx in 0..cursor_pos + 1 { + match sql_iter.next() { + Some(c) => { + if idx == cursor_pos { + sql.push_str(SANITIZED_TOKEN); + sql.push(' '); + } + sql.push(c); + } + None => { + // the cursor is outside the statement, + // we want to push spaces until we arrive at the cursor position. + // we'll then add the SANITIZED_TOKEN + if idx == cursor_pos { + sql.push_str(SANITIZED_TOKEN); + } else { + sql.push(' '); + } + } } - sql.push(c); } let mut parser = tree_sitter::Parser::new(); diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index 4339688e..4edf486f 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -18,7 +18,7 @@ impl From<&str> for InputQuery { .expect("Insert Cursor Position into your Query."); InputQuery { - sql: value.replace(CURSOR_POS, ""), + sql: value.replace(CURSOR_POS, "").trim().to_string(), position, } } diff --git a/crates/pgt_lsp/src/handlers/completions.rs b/crates/pgt_lsp/src/handlers/completions.rs index e9a18a6e..f9b68e7d 100644 --- a/crates/pgt_lsp/src/handlers/completions.rs +++ b/crates/pgt_lsp/src/handlers/completions.rs @@ -50,5 +50,6 @@ fn to_lsp_types_completion_item_kind( pgt_completions::CompletionItemKind::Function => lsp_types::CompletionItemKind::FUNCTION, pgt_completions::CompletionItemKind::Table => lsp_types::CompletionItemKind::CLASS, pgt_completions::CompletionItemKind::Column => lsp_types::CompletionItemKind::FIELD, + pgt_completions::CompletionItemKind::Schema => lsp_types::CompletionItemKind::CLASS, } } diff --git a/crates/pgt_schema_cache/src/lib.rs b/crates/pgt_schema_cache/src/lib.rs index c6dad0b7..28c5b641 100644 --- a/crates/pgt_schema_cache/src/lib.rs +++ b/crates/pgt_schema_cache/src/lib.rs @@ -13,4 +13,5 @@ mod versions; pub use columns::*; pub use functions::{Behavior, Function, FunctionArg, FunctionArgs}; pub use schema_cache::SchemaCache; +pub use schemas::Schema; pub use tables::{ReplicaIdentity, Table}; diff --git a/crates/pgt_schema_cache/src/schemas.rs b/crates/pgt_schema_cache/src/schemas.rs index 51eb8ea3..41747194 100644 --- a/crates/pgt_schema_cache/src/schemas.rs +++ b/crates/pgt_schema_cache/src/schemas.rs @@ -4,9 +4,9 @@ use crate::schema_cache::SchemaCacheItem; #[derive(Debug, Clone, Default)] pub struct Schema { - id: i64, - name: String, - owner: String, + pub id: i64, + pub name: String, + pub owner: String, } impl SchemaCacheItem for Schema { From 4792d062af06161305f42a7d938e534d2652f2b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 19 Apr 2025 09:14:09 +0200 Subject: [PATCH 021/114] fix: typing comments (#347) --- .../src/workspace/server/change.rs | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index 7dcd1a55..039c42db 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -460,6 +460,72 @@ mod tests { assert!(d.has_fatal_error()); } + #[test] + fn typing_comments() { + let path = PgTPath::new("test.sql"); + let input = "select id from users;\n"; + + let mut d = Document::new(input.to_string(), 0); + + let change1 = ChangeFileParams { + path: path.clone(), + version: 1, + changes: vec![ChangeParams { + text: "-".to_string(), + range: Some(TextRange::new(22.into(), 23.into())), + }], + }; + + let _changed1 = d.apply_file_change(&change1); + + assert_eq!(d.content, "select id from users;\n-"); + assert_eq!(d.positions.len(), 2); + + let change2 = ChangeFileParams { + path: path.clone(), + version: 2, + changes: vec![ChangeParams { + text: "-".to_string(), + range: Some(TextRange::new(23.into(), 24.into())), + }], + }; + + let _changed2 = d.apply_file_change(&change2); + + assert_eq!(d.content, "select id from users;\n--"); + assert_eq!(d.positions.len(), 1); + + let change3 = ChangeFileParams { + path: path.clone(), + version: 3, + changes: vec![ChangeParams { + text: " ".to_string(), + range: Some(TextRange::new(24.into(), 25.into())), + }], + }; + + let _changed3 = d.apply_file_change(&change3); + + assert_eq!(d.content, "select id from users;\n-- "); + assert_eq!(d.positions.len(), 1); + + let change4 = ChangeFileParams { + path: path.clone(), + version: 3, + changes: vec![ChangeParams { + text: "t".to_string(), + range: Some(TextRange::new(25.into(), 26.into())), + }], + }; + + let _changed4 = d.apply_file_change(&change4); + + assert_eq!(d.content, "select id from users;\n-- t"); + assert_eq!(d.positions.len(), 1); + + assert_document_integrity(&d); + } + #[test] fn change_into_scan_error_within_statement() { let path = PgTPath::new("test.sql"); From 1adfd23900923e9c5ebf2a345e1fdc9373f94d79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 19 Apr 2025 09:14:21 +0200 Subject: [PATCH 022/114] fix: report db connection errors (#348) * fix: report db connection errors * Update lib.rs --- crates/pgt_typecheck/src/lib.rs | 20 +++++++++----------- crates/pgt_typecheck/tests/diagnostics.rs | 2 +- crates/pgt_workspace/src/workspace/server.rs | 19 +++++++++++-------- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/crates/pgt_typecheck/src/lib.rs b/crates/pgt_typecheck/src/lib.rs index 9311bb8e..f741c0e6 100644 --- a/crates/pgt_typecheck/src/lib.rs +++ b/crates/pgt_typecheck/src/lib.rs @@ -3,10 +3,9 @@ mod diagnostics; pub use diagnostics::TypecheckDiagnostic; use diagnostics::create_type_error; use pgt_text_size::TextRange; -use sqlx::Executor; -use sqlx::PgPool; use sqlx::postgres::PgDatabaseError; pub use sqlx::postgres::PgSeverity; +use sqlx::{Executor, PgPool}; #[derive(Debug)] pub struct TypecheckParams<'a> { @@ -29,7 +28,9 @@ pub struct TypeError { pub constraint: Option, } -pub async fn check_sql(params: TypecheckParams<'_>) -> Option { +pub async fn check_sql( + params: TypecheckParams<'_>, +) -> Result, sqlx::Error> { // Check if the AST is not a supported statement type if !matches!( params.ast, @@ -39,13 +40,10 @@ pub async fn check_sql(params: TypecheckParams<'_>) -> Option c, - Err(_) => return None, - }; + let mut conn = params.conn.acquire().await?; // Postgres caches prepared statements within the current DB session (connection). // This can cause issues if the underlying table schema changes while statements @@ -56,11 +54,11 @@ pub async fn check_sql(params: TypecheckParams<'_>) -> Option None, + Ok(_) => Ok(None), Err(sqlx::Error::Database(err)) => { let pg_err = err.downcast_ref::(); - Some(create_type_error(pg_err, params.tree)) + Ok(Some(create_type_error(pg_err, params.tree))) } - Err(_) => None, + Err(err) => Err(err), } } diff --git a/crates/pgt_typecheck/tests/diagnostics.rs b/crates/pgt_typecheck/tests/diagnostics.rs index 46daa8a1..4c780d74 100644 --- a/crates/pgt_typecheck/tests/diagnostics.rs +++ b/crates/pgt_typecheck/tests/diagnostics.rs @@ -37,7 +37,7 @@ async fn test(name: &str, query: &str, setup: &str) { Formatter::new(&mut writer) .write_markup(markup! { - {PrintDiagnostic::simple(&result.unwrap())} + {PrintDiagnostic::simple(&result.unwrap().unwrap())} }) .unwrap(); diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 2ad119f5..3bf540cc 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -360,8 +360,6 @@ impl Workspace for WorkspaceServer { let mut diagnostics: Vec = parser.document_diagnostics().to_vec(); - // TODO: run this in parallel with rayon based on rayon.count() - if let Some(pool) = self .connection .read() @@ -385,13 +383,15 @@ impl Workspace for WorkspaceServer { }) .await .map(|d| { - let r = d.location().span.map(|span| span + range.start()); + d.map(|d| { + let r = d.location().span.map(|span| span + range.start()); - d.with_file_path(path.as_path().display().to_string()) - .with_file_span(r.unwrap_or(range)) + d.with_file_path(path.as_path().display().to_string()) + .with_file_span(r.unwrap_or(range)) + }) }) } else { - None + Ok(None) } } }) @@ -400,8 +400,11 @@ impl Workspace for WorkspaceServer { .await })?; - for result in async_results.into_iter().flatten() { - diagnostics.push(SDiagnostic::new(result)); + for result in async_results.into_iter() { + let result = result?; + if let Some(diag) = result { + diagnostics.push(SDiagnostic::new(diag)); + } } } From 13b661e0c7e652a59e9d4ba5424be8124bb00d80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 19 Apr 2025 09:14:31 +0200 Subject: [PATCH 023/114] chore: ensure auto fixes are applied (#349) * fix: lint * chore: check optional changes are applied in ci * debug * debug * fix: check for uncommitted changes only * fix: make gha linter happy * Update pull_request.yml * ... --- .github/workflows/pull_request.yml | 13 +++++++-- .../pgt_completions/benches/sanitization.rs | 6 ++-- .../pgt_completions/src/providers/columns.rs | 3 +- .../pgt_completions/src/providers/schemas.rs | 2 +- crates/pgt_completions/src/sanitization.rs | 8 +++--- .../pgt_workspace/src/features/completions.rs | 28 ++++++------------- 6 files changed, 28 insertions(+), 32 deletions(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index e3db0782..e8393757 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -100,12 +100,21 @@ jobs: uses: biomejs/setup-biome@v2 with: version: latest + - name: Run Lints run: | cargo sqlx prepare --check --workspace - cargo clippy + cargo clippy --fix cargo run -p rules_check - biome lint + biome lint --write + + - name: Check for changes + run: | + if [[ $(git status --porcelain) ]]; then + git status + git diff + exit 1 + fi test: name: Test diff --git a/crates/pgt_completions/benches/sanitization.rs b/crates/pgt_completions/benches/sanitization.rs index 1e5333ff..50c2a0e3 100644 --- a/crates/pgt_completions/benches/sanitization.rs +++ b/crates/pgt_completions/benches/sanitization.rs @@ -13,7 +13,7 @@ fn sql_and_pos(sql: &str) -> (String, usize) { fn get_tree(sql: &str) -> tree_sitter::Tree { let mut parser = tree_sitter::Parser::new(); parser.set_language(tree_sitter_sql::language()).unwrap(); - parser.parse(sql.to_string(), None).unwrap() + parser.parse(sql, None).unwrap() } fn to_params<'a>( @@ -25,9 +25,9 @@ fn to_params<'a>( let pos: u32 = pos.try_into().unwrap(); CompletionParams { position: TextSize::new(pos), - schema: &cache, + schema: cache, text, - tree: tree, + tree, } } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 2898b63f..b792ba2c 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -204,8 +204,7 @@ mod tests { let has_column_in_first_four = |col: &'static str| { first_four .iter() - .find(|compl_item| compl_item.label.as_str() == col) - .is_some() + .any(|compl_item| compl_item.label.as_str() == col) }; assert!( diff --git a/crates/pgt_completions/src/providers/schemas.rs b/crates/pgt_completions/src/providers/schemas.rs index 2f41e8c3..6e86ab56 100644 --- a/crates/pgt_completions/src/providers/schemas.rs +++ b/crates/pgt_completions/src/providers/schemas.rs @@ -7,7 +7,7 @@ pub fn complete_schemas(ctx: &CompletionContext, builder: &mut CompletionBuilder let available_schemas = &ctx.schema_cache.schemas; for schema in available_schemas { - let relevance = CompletionRelevanceData::Schema(&schema); + let relevance = CompletionRelevanceData::Schema(schema); let item = CompletionItem { label: schema.name.clone(), diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index dc093847..710d488d 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -12,7 +12,7 @@ pub(crate) struct SanitizedCompletionParams<'a> { } pub fn benchmark_sanitization(params: CompletionParams) -> String { - let params: SanitizedCompletionParams = params.try_into().unwrap(); + let params: SanitizedCompletionParams = params.into(); params.text } @@ -212,7 +212,7 @@ mod tests { .set_language(tree_sitter_sql::language()) .expect("Error loading sql language"); - let mut tree = parser.parse(input.to_string(), None).unwrap(); + let mut tree = parser.parse(input, None).unwrap(); // select | from users; <-- just right, one space after select token, one space before from assert!(cursor_inbetween_nodes(&mut tree, TextSize::new(7))); @@ -236,7 +236,7 @@ mod tests { .set_language(tree_sitter_sql::language()) .expect("Error loading sql language"); - let mut tree = parser.parse(input.to_string(), None).unwrap(); + let mut tree = parser.parse(input, None).unwrap(); // select * from| <-- still on previous token assert!(!cursor_prepared_to_write_token_after_last_node( @@ -274,7 +274,7 @@ mod tests { .set_language(tree_sitter_sql::language()) .expect("Error loading sql language"); - let mut tree = parser.parse(input.to_string(), None).unwrap(); + let mut tree = parser.parse(input, None).unwrap(); // select * from ;| <-- it's after the statement assert!(!cursor_before_semicolon(&mut tree, TextSize::new(19))); diff --git a/crates/pgt_workspace/src/features/completions.rs b/crates/pgt_workspace/src/features/completions.rs index 4a5c5e29..85342183 100644 --- a/crates/pgt_workspace/src/features/completions.rs +++ b/crates/pgt_workspace/src/features/completions.rs @@ -29,8 +29,8 @@ impl IntoIterator for CompletionsResult { } } -pub(crate) fn get_statement_for_completions<'a>( - doc: &'a ParsedDocument, +pub(crate) fn get_statement_for_completions( + doc: &ParsedDocument, position: TextSize, ) -> Option<(StatementId, TextRange, String, Arc)> { let count = doc.count(); @@ -89,7 +89,7 @@ mod tests { ( ParsedDocument::new( PgTPath::new("test.sql"), - sql.replace(CURSOR_POSITION, "").into(), + sql.replace(CURSOR_POSITION, ""), 5, ), TextSize::new(pos), @@ -119,14 +119,11 @@ mod tests { #[test] fn does_not_break_when_no_statements_exist() { - let sql = format!("{}", CURSOR_POSITION); + let sql = CURSOR_POSITION.to_string(); let (doc, position) = get_doc_and_pos(sql.as_str()); - assert!(matches!( - get_statement_for_completions(&doc, position), - None - )); + assert!(get_statement_for_completions(&doc, position).is_none()); } #[test] @@ -138,10 +135,7 @@ mod tests { // make sure these are parsed as two assert_eq!(doc.count(), 2); - assert!(matches!( - get_statement_for_completions(&doc, position), - None - )); + assert!(get_statement_for_completions(&doc, position).is_none()); } #[test] @@ -174,10 +168,7 @@ mod tests { let (doc, position) = get_doc_and_pos(sql.as_str()); - assert!(matches!( - get_statement_for_completions(&doc, position), - None - )); + assert!(get_statement_for_completions(&doc, position).is_none()); } #[test] @@ -186,9 +177,6 @@ mod tests { let (doc, position) = get_doc_and_pos(sql.as_str()); - assert!(matches!( - get_statement_for_completions(&doc, position), - None - )); + assert!(get_statement_for_completions(&doc, position).is_none()); } } From 6b3447ca2e05eb4ff2ab4dc5d557afc880fd7e80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 19 Apr 2025 09:14:40 +0200 Subject: [PATCH 024/114] fix: split grant stmt properly (#350) --- crates/pgt_statement_splitter/src/lib.rs | 8 ++++++++ crates/pgt_statement_splitter/src/parser/common.rs | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index 3fa67213..e5e995b7 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -105,6 +105,14 @@ mod tests { .expect_statements(vec!["select 1 from contact", "select 1"]); } + #[test] + fn grant() { + Tester::from("GRANT SELECT ON TABLE \"public\".\"my_table\" TO \"my_role\";") + .expect_statements(vec![ + "GRANT SELECT ON TABLE \"public\".\"my_table\" TO \"my_role\";", + ]); + } + #[test] fn double_newlines() { Tester::from("select 1 from contact\n\nselect 1\n\nselect 3").expect_statements(vec![ diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index a353791b..ab3f8173 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -205,6 +205,8 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { SyntaxKind::All, // for UNION ... EXCEPT SyntaxKind::Except, + // for grant + SyntaxKind::Grant, ] .iter() .all(|x| Some(x) != prev.as_ref()) @@ -230,6 +232,8 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { SyntaxKind::Also, // for create rule SyntaxKind::Instead, + // for grant + SyntaxKind::Grant, ] .iter() .all(|x| Some(x) != prev.as_ref()) From d33bc4fa89f669f9b629aa24d59cae9d30ed0cdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 19 Apr 2025 09:14:58 +0200 Subject: [PATCH 025/114] fix: extract sql fn body properly (#346) --- .../src/workspace/server/parsed_document.rs | 24 +++++++++++++++++++ .../src/workspace/server/sql_function.rs | 11 +++++++++ 2 files changed, 35 insertions(+) diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs index dafd5697..e5c0eeb1 100644 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ b/crates/pgt_workspace/src/workspace/server/parsed_document.rs @@ -404,3 +404,27 @@ impl<'a> StatementFilter<'a> for IdFilter { *id == self.id } } + +#[cfg(test)] +mod tests { + use super::*; + + use pgt_fs::PgTPath; + + #[test] + fn sql_function_body() { + let input = "CREATE FUNCTION add(integer, integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT;"; + + let path = PgTPath::new("test.sql"); + + let d = ParsedDocument::new(path, input.to_string(), 0); + + let stmts = d.iter(DefaultMapper).collect::>(); + + assert_eq!(stmts.len(), 2); + } +} diff --git a/crates/pgt_workspace/src/workspace/server/sql_function.rs b/crates/pgt_workspace/src/workspace/server/sql_function.rs index 3273466d..777210d5 100644 --- a/crates/pgt_workspace/src/workspace/server/sql_function.rs +++ b/crates/pgt_workspace/src/workspace/server/sql_function.rs @@ -5,6 +5,7 @@ use pgt_text_size::TextRange; use super::statement_identifier::StatementId; +#[derive(Debug, Clone)] pub struct SQLFunctionBody { pub range: TextRange, pub body: String, @@ -97,6 +98,16 @@ fn find_option_value( .find_map(|arg| { if let pgt_query_ext::NodeEnum::String(s) = arg { Some(s.sval.clone()) + } else if let pgt_query_ext::NodeEnum::List(l) = arg { + l.items.iter().find_map(|item_wrapper| { + if let Some(pgt_query_ext::NodeEnum::String(s)) = + item_wrapper.node.as_ref() + { + Some(s.sval.clone()) + } else { + None + } + }) } else { None } From 745bd334757f7953e7f91b4e2f3908f5daa3a8f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 19 Apr 2025 11:59:51 +0200 Subject: [PATCH 026/114] chore: test fn body content (#351) --- crates/pgt_workspace/src/workspace/server/parsed_document.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs index e5c0eeb1..01f18d3c 100644 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ b/crates/pgt_workspace/src/workspace/server/parsed_document.rs @@ -426,5 +426,6 @@ mod tests { let stmts = d.iter(DefaultMapper).collect::>(); assert_eq!(stmts.len(), 2); + assert_eq!(stmts[1].2, "select $1 + $2;"); } } From 1e35f2a9e3394fc6c53a0d7dc272b81fc9451512 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Sat, 19 Apr 2025 12:45:51 +0200 Subject: [PATCH 027/114] fix: release --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7940a60e..ff973f9f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -126,7 +126,7 @@ jobs: id: create_changelog with: config: cliff.toml - args: --bump --latest + args: --bump --unreleased env: GITHUB_REPO: ${{ github.repository }} From a5ba9cb9d774402b5e9c6ca6f42f6bd44b334689 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Sat, 19 Apr 2025 12:54:11 +0200 Subject: [PATCH 028/114] fix: ensure tag matches --- .github/workflows/release.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ff973f9f..aa0eb1b1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -113,7 +113,7 @@ jobs: create_changelog_and_release: runs-on: ubuntu-latest - needs: build_and_test # make sure that tests & build work correctly + needs: [extract_version, build_and_test] # make sure that tests & build work correctly steps: - name: Checkout Repo uses: actions/checkout@v4 @@ -130,6 +130,10 @@ jobs: env: GITHUB_REPO: ${{ github.repository }} + - name: Ensure tag matches + if: ${{ steps.create_changelog.outputs.version }} != ${{ needs.extract_version.outputs.version }} + run: exit 1 + - name: 👇 Download Artifacts uses: actions/download-artifact@v4 id: download From ebb0ca5bc95b1de18a0a935f63bfecaf75105b34 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Mon, 21 Apr 2025 18:41:29 +0200 Subject: [PATCH 029/114] fix: if clause in workflow file (#360) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index aa0eb1b1..934edba9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -131,7 +131,7 @@ jobs: GITHUB_REPO: ${{ github.repository }} - name: Ensure tag matches - if: ${{ steps.create_changelog.outputs.version }} != ${{ needs.extract_version.outputs.version }} + if: steps.create_changelog.outputs.version != needs.extract_version.outputs.version run: exit 1 - name: 👇 Download Artifacts From 1dd4791af8f27c2e1e8398b7ba2c29476ff9ca7d Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Mon, 21 Apr 2025 19:43:17 +0200 Subject: [PATCH 030/114] fix(completions): remove , as trigger char (#358) --- crates/pgt_lsp/src/capabilities.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/pgt_lsp/src/capabilities.rs b/crates/pgt_lsp/src/capabilities.rs index a801dd5e..b3e35b69 100644 --- a/crates/pgt_lsp/src/capabilities.rs +++ b/crates/pgt_lsp/src/capabilities.rs @@ -37,7 +37,7 @@ pub(crate) fn server_capabilities(capabilities: &ClientCapabilities) -> ServerCa // The request is used to get more information about a simple CompletionItem. resolve_provider: None, - trigger_characters: Some(vec![".".to_owned(), ",".to_owned(), " ".to_owned()]), + trigger_characters: Some(vec![".".to_owned(), " ".to_owned()]), // No character will lead to automatically inserting the selected completion-item all_commit_characters: None, From f837c1a18f0c6adf9f1a37a0379b77d2106f3e34 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Mon, 21 Apr 2025 19:43:32 +0200 Subject: [PATCH 031/114] fix(completions): cursor on dot, other issues (#359) * fix(completions): cursor on dot, other issues * ? * test belongs here * refs --- .../pgt_completions/src/providers/columns.rs | 65 +++++++++++++++++++ crates/pgt_completions/src/sanitization.rs | 29 +++++++-- 2 files changed, 90 insertions(+), 4 deletions(-) diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index b792ba2c..d1c3e110 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -224,4 +224,69 @@ mod tests { "`email` not present in first four completion items." ); } + + #[tokio::test] + async fn prefers_columns_of_mentioned_tables() { + let setup = r#" + create schema private; + + create table private.users ( + id1 serial primary key, + name1 text, + address1 text, + email1 text + ); + + create table public.users ( + id2 serial primary key, + name2 text, + address2 text, + email2 text + ); + "#; + + { + let test_case = TestCase { + message: "", + query: format!(r#"select {} from users"#, CURSOR_POS), + label: "suggests from table", + description: "", + }; + + let (tree, cache) = get_test_deps(setup, test_case.get_input_query()).await; + let params = get_test_params(&tree, &cache, test_case.get_input_query()); + let results = complete(params); + + assert_eq!( + results + .into_iter() + .take(4) + .map(|item| item.label) + .collect::>(), + vec!["address2", "email2", "id2", "name2"] + ); + } + + { + let test_case = TestCase { + message: "", + query: format!(r#"select {} from private.users"#, CURSOR_POS), + label: "suggests from table", + description: "", + }; + + let (tree, cache) = get_test_deps(setup, test_case.get_input_query()).await; + let params = get_test_params(&tree, &cache, test_case.get_input_query()); + let results = complete(params); + + assert_eq!( + results + .into_iter() + .take(4) + .map(|item| item.label) + .collect::>(), + vec!["address1", "email1", "id1", "name1"] + ); + } + } } diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index 710d488d..59eb609f 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -1,4 +1,4 @@ -use std::borrow::Cow; +use std::{borrow::Cow, cmp::max}; use pgt_text_size::TextSize; @@ -24,6 +24,7 @@ where if cursor_inbetween_nodes(params.tree, params.position) || cursor_prepared_to_write_token_after_last_node(params.tree, params.position) || cursor_before_semicolon(params.tree, params.position) + || cursor_on_a_dot(¶ms.text, params.position) { SanitizedCompletionParams::with_adjusted_sql(params) } else { @@ -44,12 +45,13 @@ where let mut sql_iter = params.text.chars(); - for idx in 0..cursor_pos + 1 { + let max = max(cursor_pos + 1, params.text.len()); + + for idx in 0..max { match sql_iter.next() { Some(c) => { if idx == cursor_pos { sql.push_str(SANITIZED_TOKEN); - sql.push(' '); } sql.push(c); } @@ -149,6 +151,11 @@ fn cursor_prepared_to_write_token_after_last_node( cursor_pos == tree.root_node().end_byte() + 1 } +fn cursor_on_a_dot(sql: &str, position: TextSize) -> bool { + let position: usize = position.into(); + sql.chars().nth(position - 1).is_some_and(|c| c == '.') +} + fn cursor_before_semicolon(tree: &tree_sitter::Tree, position: TextSize) -> bool { let mut cursor = tree.walk(); let mut leaf_node = tree.root_node(); @@ -198,7 +205,7 @@ mod tests { use pgt_text_size::TextSize; use crate::sanitization::{ - cursor_before_semicolon, cursor_inbetween_nodes, + cursor_before_semicolon, cursor_inbetween_nodes, cursor_on_a_dot, cursor_prepared_to_write_token_after_last_node, }; @@ -263,6 +270,20 @@ mod tests { )); } + #[test] + fn on_a_dot() { + let input = "select * from private."; + + // select * from private.| <-- on a dot + assert!(cursor_on_a_dot(input, TextSize::new(22))); + + // select * from private|. <-- before the dot + assert!(!cursor_on_a_dot(input, TextSize::new(21))); + + // select * from private. | <-- too far off the dot + assert!(!cursor_on_a_dot(input, TextSize::new(23))); + } + #[test] fn test_cursor_before_semicolon() { // Idx "13" is the exlusive end of `select * from` (first space after from) From b2f8f0c3e2800041a1707d3350150134aec5c684 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Mon, 21 Apr 2025 19:46:12 +0200 Subject: [PATCH 032/114] feat(completions): filter invalid completion items (#361) --- crates/pgt_completions/src/builder.rs | 87 +++++--- crates/pgt_completions/src/complete.rs | 2 +- .../pgt_completions/src/providers/columns.rs | 49 ++++- .../src/providers/functions.rs | 16 +- .../pgt_completions/src/providers/schemas.rs | 13 +- .../pgt_completions/src/providers/tables.rs | 16 +- crates/pgt_completions/src/relevance.rs | 188 +----------------- .../src/relevance/filtering.rs | 105 ++++++++++ .../pgt_completions/src/relevance/scoring.rs | 182 +++++++++++++++++ crates/pgt_completions/src/test_helper.rs | 26 +++ 10 files changed, 445 insertions(+), 239 deletions(-) create mode 100644 crates/pgt_completions/src/relevance/filtering.rs create mode 100644 crates/pgt_completions/src/relevance/scoring.rs diff --git a/crates/pgt_completions/src/builder.rs b/crates/pgt_completions/src/builder.rs index 39439afb..45c36d3b 100644 --- a/crates/pgt_completions/src/builder.rs +++ b/crates/pgt_completions/src/builder.rs @@ -1,47 +1,80 @@ -use crate::item::CompletionItem; +use crate::{ + CompletionItemKind, + context::CompletionContext, + item::CompletionItem, + relevance::{filtering::CompletionFilter, scoring::CompletionScore}, +}; -pub(crate) struct CompletionBuilder { - items: Vec, +pub(crate) struct PossibleCompletionItem<'a> { + pub label: String, + pub description: String, + pub kind: CompletionItemKind, + pub score: CompletionScore<'a>, + pub filter: CompletionFilter<'a>, } -impl CompletionBuilder { - pub fn new() -> Self { - CompletionBuilder { items: vec![] } +pub(crate) struct CompletionBuilder<'a> { + items: Vec>, + ctx: &'a CompletionContext<'a>, +} + +impl<'a> CompletionBuilder<'a> { + pub fn new(ctx: &'a CompletionContext) -> Self { + CompletionBuilder { items: vec![], ctx } } - pub fn add_item(&mut self, item: CompletionItem) { + pub fn add_item(&mut self, item: PossibleCompletionItem<'a>) { self.items.push(item); } - pub fn finish(mut self) -> Vec { - self.items - .sort_by(|a, b| b.score.cmp(&a.score).then_with(|| a.label.cmp(&b.label))); + pub fn finish(self) -> Vec { + let mut items: Vec = self + .items + .into_iter() + .filter(|i| i.filter.is_relevant(self.ctx).is_some()) + .collect(); + + for item in items.iter_mut() { + item.score.calc_score(self.ctx); + } - self.items.dedup_by(|a, b| a.label == b.label); - self.items.truncate(crate::LIMIT); + items.sort_by(|a, b| { + b.score + .get_score() + .cmp(&a.score.get_score()) + .then_with(|| a.label.cmp(&b.label)) + }); - let should_preselect_first_item = self.should_preselect_first_item(); + items.dedup_by(|a, b| a.label == b.label); + items.truncate(crate::LIMIT); - self.items + let should_preselect_first_item = should_preselect_first_item(&items); + + items .into_iter() .enumerate() - .map(|(idx, mut item)| { - if idx == 0 { - item.preselected = should_preselect_first_item; + .map(|(idx, item)| { + let preselected = idx == 0 && should_preselect_first_item; + + CompletionItem { + description: item.description, + kind: item.kind, + label: item.label, + preselected, + score: item.score.get_score(), } - item }) .collect() } +} - fn should_preselect_first_item(&mut self) -> bool { - let mut items_iter = self.items.iter(); - let first = items_iter.next(); - let second = items_iter.next(); +fn should_preselect_first_item(items: &Vec) -> bool { + let mut items_iter = items.iter(); + let first = items_iter.next(); + let second = items_iter.next(); - first.is_some_and(|f| match second { - Some(s) => (f.score - s.score) > 10, - None => true, - }) - } + first.is_some_and(|f| match second { + Some(s) => (f.score.get_score() - s.score.get_score()) > 10, + None => true, + }) } diff --git a/crates/pgt_completions/src/complete.rs b/crates/pgt_completions/src/complete.rs index 89d25738..442ee546 100644 --- a/crates/pgt_completions/src/complete.rs +++ b/crates/pgt_completions/src/complete.rs @@ -27,7 +27,7 @@ pub fn complete(params: CompletionParams) -> Vec { let ctx = CompletionContext::new(&sanitized_params); - let mut builder = CompletionBuilder::new(); + let mut builder = CompletionBuilder::new(&ctx); complete_tables(&ctx, &mut builder); complete_functions(&ctx, &mut builder); diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index d1c3e110..e8a51e48 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -1,17 +1,21 @@ use crate::{ - CompletionItem, CompletionItemKind, builder::CompletionBuilder, context::CompletionContext, - relevance::CompletionRelevanceData, + CompletionItemKind, + builder::{CompletionBuilder, PossibleCompletionItem}, + context::CompletionContext, + relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; -pub fn complete_columns(ctx: &CompletionContext, builder: &mut CompletionBuilder) { +pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut CompletionBuilder<'a>) { let available_columns = &ctx.schema_cache.columns; for col in available_columns { - let item = CompletionItem { + let relevance = CompletionRelevanceData::Column(col); + + let item = PossibleCompletionItem { label: col.name.clone(), - score: CompletionRelevanceData::Column(col).get_score(ctx), + score: CompletionScore::from(relevance.clone()), + filter: CompletionFilter::from(relevance), description: format!("Table: {}.{}", col.schema_name, col.table_name), - preselected: false, kind: CompletionItemKind::Column, }; @@ -22,7 +26,7 @@ pub fn complete_columns(ctx: &CompletionContext, builder: &mut CompletionBuilder #[cfg(test)] mod tests { use crate::{ - CompletionItem, complete, + CompletionItem, CompletionItemKind, complete, test_helper::{CURSOR_POS, InputQuery, get_test_deps, get_test_params}, }; @@ -225,6 +229,37 @@ mod tests { ); } + #[tokio::test] + async fn ignores_cols_in_from_clause() { + let setup = r#" + create schema private; + + create table private.users ( + id serial primary key, + name text, + address text, + email text + ); + "#; + + let test_case = TestCase { + message: "suggests user created tables first", + query: format!(r#"select * from private.{}"#, CURSOR_POS), + label: "", + description: "", + }; + + let (tree, cache) = get_test_deps(setup, test_case.get_input_query()).await; + let params = get_test_params(&tree, &cache, test_case.get_input_query()); + let results = complete(params); + + assert!( + !results + .into_iter() + .any(|item| item.kind == CompletionItemKind::Column) + ); + } + #[tokio::test] async fn prefers_columns_of_mentioned_tables() { let setup = r#" diff --git a/crates/pgt_completions/src/providers/functions.rs b/crates/pgt_completions/src/providers/functions.rs index b4a9c35a..b44a5ef5 100644 --- a/crates/pgt_completions/src/providers/functions.rs +++ b/crates/pgt_completions/src/providers/functions.rs @@ -1,17 +1,21 @@ use crate::{ - CompletionItem, CompletionItemKind, builder::CompletionBuilder, context::CompletionContext, - relevance::CompletionRelevanceData, + CompletionItemKind, + builder::{CompletionBuilder, PossibleCompletionItem}, + context::CompletionContext, + relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; -pub fn complete_functions(ctx: &CompletionContext, builder: &mut CompletionBuilder) { +pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { let available_functions = &ctx.schema_cache.functions; for func in available_functions { - let item = CompletionItem { + let relevance = CompletionRelevanceData::Function(func); + + let item = PossibleCompletionItem { label: func.name.clone(), - score: CompletionRelevanceData::Function(func).get_score(ctx), + score: CompletionScore::from(relevance.clone()), + filter: CompletionFilter::from(relevance), description: format!("Schema: {}", func.schema), - preselected: false, kind: CompletionItemKind::Function, }; diff --git a/crates/pgt_completions/src/providers/schemas.rs b/crates/pgt_completions/src/providers/schemas.rs index 6e86ab56..3d8f622e 100644 --- a/crates/pgt_completions/src/providers/schemas.rs +++ b/crates/pgt_completions/src/providers/schemas.rs @@ -1,20 +1,21 @@ use crate::{ - CompletionItem, builder::CompletionBuilder, context::CompletionContext, - relevance::CompletionRelevanceData, + builder::{CompletionBuilder, PossibleCompletionItem}, + context::CompletionContext, + relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; -pub fn complete_schemas(ctx: &CompletionContext, builder: &mut CompletionBuilder) { +pub fn complete_schemas<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { let available_schemas = &ctx.schema_cache.schemas; for schema in available_schemas { let relevance = CompletionRelevanceData::Schema(schema); - let item = CompletionItem { + let item = PossibleCompletionItem { label: schema.name.clone(), description: "Schema".into(), - preselected: false, kind: crate::CompletionItemKind::Schema, - score: relevance.get_score(ctx), + score: CompletionScore::from(relevance.clone()), + filter: CompletionFilter::from(relevance), }; builder.add_item(item); diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index 2074a4f1..fcc8fa00 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -1,19 +1,21 @@ use crate::{ - builder::CompletionBuilder, + builder::{CompletionBuilder, PossibleCompletionItem}, context::CompletionContext, - item::{CompletionItem, CompletionItemKind}, - relevance::CompletionRelevanceData, + item::CompletionItemKind, + relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; -pub fn complete_tables(ctx: &CompletionContext, builder: &mut CompletionBuilder) { +pub fn complete_tables<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { let available_tables = &ctx.schema_cache.tables; for table in available_tables { - let item = CompletionItem { + let relevance = CompletionRelevanceData::Table(table); + + let item = PossibleCompletionItem { label: table.name.clone(), - score: CompletionRelevanceData::Table(table).get_score(ctx), + score: CompletionScore::from(relevance.clone()), + filter: CompletionFilter::from(relevance), description: format!("Schema: {}", table.schema), - preselected: false, kind: CompletionItemKind::Table, }; diff --git a/crates/pgt_completions/src/relevance.rs b/crates/pgt_completions/src/relevance.rs index 2abb9f2c..911a6433 100644 --- a/crates/pgt_completions/src/relevance.rs +++ b/crates/pgt_completions/src/relevance.rs @@ -1,192 +1,10 @@ -use crate::context::{ClauseType, CompletionContext, NodeText}; +pub(crate) mod filtering; +pub(crate) mod scoring; -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum CompletionRelevanceData<'a> { Table(&'a pgt_schema_cache::Table), Function(&'a pgt_schema_cache::Function), Column(&'a pgt_schema_cache::Column), Schema(&'a pgt_schema_cache::Schema), } - -impl CompletionRelevanceData<'_> { - pub fn get_score(self, ctx: &CompletionContext) -> i32 { - CompletionRelevance::from(self).into_score(ctx) - } -} - -impl<'a> From> for CompletionRelevance<'a> { - fn from(value: CompletionRelevanceData<'a>) -> Self { - Self { - score: 0, - data: value, - } - } -} - -#[derive(Debug)] -pub(crate) struct CompletionRelevance<'a> { - score: i32, - data: CompletionRelevanceData<'a>, -} - -impl CompletionRelevance<'_> { - pub fn into_score(mut self, ctx: &CompletionContext) -> i32 { - self.check_is_user_defined(); - self.check_matches_schema(ctx); - self.check_matches_query_input(ctx); - self.check_is_invocation(ctx); - self.check_matching_clause_type(ctx); - self.check_relations_in_stmt(ctx); - - self.score - } - - fn check_matches_query_input(&mut self, ctx: &CompletionContext) { - let node = match ctx.node_under_cursor { - Some(node) => node, - None => return, - }; - - let content = match ctx.get_ts_node_content(node) { - Some(c) => match c { - NodeText::Original(s) => s, - NodeText::Replaced => return, - }, - None => return, - }; - - let name = match self.data { - CompletionRelevanceData::Function(f) => f.name.as_str(), - CompletionRelevanceData::Table(t) => t.name.as_str(), - CompletionRelevanceData::Column(c) => c.name.as_str(), - CompletionRelevanceData::Schema(s) => s.name.as_str(), - }; - - if name.starts_with(content) { - let len: i32 = content - .len() - .try_into() - .expect("The length of the input exceeds i32 capacity"); - - self.score += len * 10; - }; - } - - fn check_matching_clause_type(&mut self, ctx: &CompletionContext) { - let clause_type = match ctx.wrapping_clause_type.as_ref() { - None => return, - Some(ct) => ct, - }; - - let has_mentioned_tables = !ctx.mentioned_relations.is_empty(); - - self.score += match self.data { - CompletionRelevanceData::Table(_) => match clause_type { - ClauseType::From => 5, - ClauseType::Update => 15, - ClauseType::Delete => 15, - _ => -50, - }, - CompletionRelevanceData::Function(_) => match clause_type { - ClauseType::Select if !has_mentioned_tables => 15, - ClauseType::Select if has_mentioned_tables => 0, - ClauseType::From => 0, - _ => -50, - }, - CompletionRelevanceData::Column(_) => match clause_type { - ClauseType::Select if has_mentioned_tables => 10, - ClauseType::Select if !has_mentioned_tables => 0, - ClauseType::Where => 10, - _ => -15, - }, - CompletionRelevanceData::Schema(_) => match clause_type { - ClauseType::From => 10, - _ => -50, - }, - } - } - - fn check_is_invocation(&mut self, ctx: &CompletionContext) { - self.score += match self.data { - CompletionRelevanceData::Function(_) if ctx.is_invocation => 30, - CompletionRelevanceData::Function(_) if !ctx.is_invocation => -10, - _ if ctx.is_invocation => -10, - _ => 0, - }; - } - - fn check_matches_schema(&mut self, ctx: &CompletionContext) { - let schema_name = match ctx.schema_name.as_ref() { - None => return, - Some(n) => n, - }; - - let data_schema = self.get_schema_name(); - - if schema_name == data_schema { - self.score += 25; - } else { - self.score -= 10; - } - } - - fn get_schema_name(&self) -> &str { - match self.data { - CompletionRelevanceData::Function(f) => f.schema.as_str(), - CompletionRelevanceData::Table(t) => t.schema.as_str(), - CompletionRelevanceData::Column(c) => c.schema_name.as_str(), - CompletionRelevanceData::Schema(s) => s.name.as_str(), - } - } - - fn get_table_name(&self) -> Option<&str> { - match self.data { - CompletionRelevanceData::Column(c) => Some(c.table_name.as_str()), - CompletionRelevanceData::Table(t) => Some(t.name.as_str()), - _ => None, - } - } - - fn check_relations_in_stmt(&mut self, ctx: &CompletionContext) { - match self.data { - CompletionRelevanceData::Table(_) | CompletionRelevanceData::Function(_) => return, - _ => {} - } - - let schema = self.get_schema_name().to_string(); - let table_name = match self.get_table_name() { - Some(t) => t, - None => return, - }; - - if ctx - .mentioned_relations - .get(&Some(schema.to_string())) - .is_some_and(|tables| tables.contains(table_name)) - { - self.score += 45; - } else if ctx - .mentioned_relations - .get(&None) - .is_some_and(|tables| tables.contains(table_name)) - { - self.score += 30; - } - } - - fn check_is_user_defined(&mut self) { - let schema = self.get_schema_name().to_string(); - - let system_schemas = ["pg_catalog", "information_schema", "pg_toast"]; - - if system_schemas.contains(&schema.as_str()) { - self.score -= 10; - } - - // "public" is the default postgres schema where users - // create objects. Prefer it by a slight bit. - if schema.as_str() == "public" { - self.score += 2; - } - } -} diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs new file mode 100644 index 00000000..214fda56 --- /dev/null +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -0,0 +1,105 @@ +use crate::context::{ClauseType, CompletionContext}; + +use super::CompletionRelevanceData; + +#[derive(Debug)] +pub(crate) struct CompletionFilter<'a> { + data: CompletionRelevanceData<'a>, +} + +impl<'a> From> for CompletionFilter<'a> { + fn from(value: CompletionRelevanceData<'a>) -> Self { + Self { data: value } + } +} + +impl CompletionFilter<'_> { + pub fn is_relevant(&self, ctx: &CompletionContext) -> Option<()> { + self.completable_context(ctx)?; + self.check_clause(ctx)?; + self.check_invocation(ctx)?; + self.check_mentioned_schema(ctx)?; + + Some(()) + } + + fn completable_context(&self, ctx: &CompletionContext) -> Option<()> { + let current_node_kind = ctx.node_under_cursor.map(|n| n.kind()).unwrap_or(""); + + if current_node_kind.starts_with("keyword_") + || current_node_kind == "=" + || current_node_kind == "," + || current_node_kind == "literal" + || current_node_kind == "ERROR" + { + return None; + } + + Some(()) + } + + fn check_clause(&self, ctx: &CompletionContext) -> Option<()> { + let clause = ctx.wrapping_clause_type.as_ref(); + + match self.data { + CompletionRelevanceData::Table(_) => { + let in_select_clause = clause.is_some_and(|c| c == &ClauseType::Select); + let in_where_clause = clause.is_some_and(|c| c == &ClauseType::Where); + + if in_select_clause || in_where_clause { + return None; + }; + } + CompletionRelevanceData::Column(_) => { + let in_from_clause = clause.is_some_and(|c| c == &ClauseType::From); + + if in_from_clause { + return None; + } + } + _ => {} + } + + Some(()) + } + + fn check_invocation(&self, ctx: &CompletionContext) -> Option<()> { + if !ctx.is_invocation { + return Some(()); + } + + match self.data { + CompletionRelevanceData::Table(_) | CompletionRelevanceData::Column(_) => return None, + _ => {} + } + + Some(()) + } + + fn check_mentioned_schema(&self, ctx: &CompletionContext) -> Option<()> { + if ctx.schema_name.is_none() { + return Some(()); + } + + let name = ctx.schema_name.as_ref().unwrap(); + + let does_not_match = match self.data { + CompletionRelevanceData::Table(table) => &table.schema != name, + CompletionRelevanceData::Function(f) => &f.schema != name, + CompletionRelevanceData::Column(_) => { + // columns belong to tables, not schemas + true + } + CompletionRelevanceData::Schema(_) => { + // we should never allow schema suggestions if there already was one. + true + } + }; + + if does_not_match { + return None; + } + + Some(()) + } +} diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs new file mode 100644 index 00000000..7c3f3a06 --- /dev/null +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -0,0 +1,182 @@ +use crate::context::{ClauseType, CompletionContext, NodeText}; + +use super::CompletionRelevanceData; + +#[derive(Debug)] +pub(crate) struct CompletionScore<'a> { + score: i32, + data: CompletionRelevanceData<'a>, +} + +impl<'a> From> for CompletionScore<'a> { + fn from(value: CompletionRelevanceData<'a>) -> Self { + Self { + score: 0, + data: value, + } + } +} + +impl CompletionScore<'_> { + pub fn get_score(&self) -> i32 { + self.score + } + + pub fn calc_score(&mut self, ctx: &CompletionContext) { + self.check_is_user_defined(); + self.check_matches_schema(ctx); + self.check_matches_query_input(ctx); + self.check_is_invocation(ctx); + self.check_matching_clause_type(ctx); + self.check_relations_in_stmt(ctx); + } + + fn check_matches_query_input(&mut self, ctx: &CompletionContext) { + let node = match ctx.node_under_cursor { + Some(node) => node, + None => return, + }; + + let content = match ctx.get_ts_node_content(node) { + Some(c) => match c { + NodeText::Original(s) => s, + NodeText::Replaced => return, + }, + None => return, + }; + + let name = match self.data { + CompletionRelevanceData::Function(f) => f.name.as_str(), + CompletionRelevanceData::Table(t) => t.name.as_str(), + CompletionRelevanceData::Column(c) => c.name.as_str(), + CompletionRelevanceData::Schema(s) => s.name.as_str(), + }; + + if name.starts_with(content) { + let len: i32 = content + .len() + .try_into() + .expect("The length of the input exceeds i32 capacity"); + + self.score += len * 10; + }; + } + + fn check_matching_clause_type(&mut self, ctx: &CompletionContext) { + let clause_type = match ctx.wrapping_clause_type.as_ref() { + None => return, + Some(ct) => ct, + }; + + let has_mentioned_tables = !ctx.mentioned_relations.is_empty(); + + self.score += match self.data { + CompletionRelevanceData::Table(_) => match clause_type { + ClauseType::From => 5, + ClauseType::Update => 15, + ClauseType::Delete => 15, + _ => -50, + }, + CompletionRelevanceData::Function(_) => match clause_type { + ClauseType::Select if !has_mentioned_tables => 15, + ClauseType::Select if has_mentioned_tables => 0, + ClauseType::From => 0, + _ => -50, + }, + CompletionRelevanceData::Column(_) => match clause_type { + ClauseType::Select if has_mentioned_tables => 10, + ClauseType::Select if !has_mentioned_tables => 0, + ClauseType::Where => 10, + _ => -15, + }, + CompletionRelevanceData::Schema(_) => match clause_type { + ClauseType::From => 10, + _ => -50, + }, + } + } + + fn check_is_invocation(&mut self, ctx: &CompletionContext) { + self.score += match self.data { + CompletionRelevanceData::Function(_) if ctx.is_invocation => 30, + CompletionRelevanceData::Function(_) if !ctx.is_invocation => -10, + _ if ctx.is_invocation => -10, + _ => 0, + }; + } + + fn check_matches_schema(&mut self, ctx: &CompletionContext) { + let schema_name = match ctx.schema_name.as_ref() { + None => return, + Some(n) => n, + }; + + let data_schema = self.get_schema_name(); + + if schema_name == data_schema { + self.score += 25; + } else { + self.score -= 10; + } + } + + fn get_schema_name(&self) -> &str { + match self.data { + CompletionRelevanceData::Function(f) => f.schema.as_str(), + CompletionRelevanceData::Table(t) => t.schema.as_str(), + CompletionRelevanceData::Column(c) => c.schema_name.as_str(), + CompletionRelevanceData::Schema(s) => s.name.as_str(), + } + } + + fn get_table_name(&self) -> Option<&str> { + match self.data { + CompletionRelevanceData::Column(c) => Some(c.table_name.as_str()), + CompletionRelevanceData::Table(t) => Some(t.name.as_str()), + _ => None, + } + } + + fn check_relations_in_stmt(&mut self, ctx: &CompletionContext) { + match self.data { + CompletionRelevanceData::Table(_) | CompletionRelevanceData::Function(_) => return, + _ => {} + } + + let schema = self.get_schema_name().to_string(); + let table_name = match self.get_table_name() { + Some(t) => t, + None => return, + }; + + if ctx + .mentioned_relations + .get(&Some(schema.to_string())) + .is_some_and(|tables| tables.contains(table_name)) + { + self.score += 45; + } else if ctx + .mentioned_relations + .get(&None) + .is_some_and(|tables| tables.contains(table_name)) + { + self.score += 30; + } + } + + fn check_is_user_defined(&mut self) { + let schema = self.get_schema_name().to_string(); + + let system_schemas = ["pg_catalog", "information_schema", "pg_toast"]; + + if system_schemas.contains(&schema.as_str()) { + self.score -= 10; + } + + // "public" is the default postgres schema where users + // create objects. Prefer it by a slight bit. + if schema.as_str() == "public" { + self.score += 2; + } + } +} diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index 4edf486f..fc2cf403 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -6,6 +6,7 @@ use crate::CompletionParams; pub static CURSOR_POS: char = '€'; +#[derive(Clone)] pub struct InputQuery { sql: String, position: usize, @@ -55,6 +56,31 @@ pub(crate) async fn get_test_deps( (tree, schema_cache) } +/// Careful: This will connect against the passed database. +/// Use this only to debug issues. Do not commit to version control. +#[allow(dead_code)] +pub(crate) async fn test_against_connection_string( + conn_str: &str, + input: InputQuery, +) -> (tree_sitter::Tree, pgt_schema_cache::SchemaCache) { + let pool = sqlx::PgPool::connect(conn_str) + .await + .expect("Unable to connect to database."); + + let schema_cache = SchemaCache::load(&pool) + .await + .expect("Failed to load Schema Cache"); + + let mut parser = tree_sitter::Parser::new(); + parser + .set_language(tree_sitter_sql::language()) + .expect("Error loading sql language"); + + let tree = parser.parse(input.to_string(), None).unwrap(); + + (tree, schema_cache) +} + pub(crate) fn get_text_and_position(q: InputQuery) -> (usize, String) { (q.position, q.sql) } From 4618af1606b140bc04b195848eeb8789e1d33a7b Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Mon, 21 Apr 2025 19:47:32 +0200 Subject: [PATCH 033/114] fix(completions): sort items by relevance on client (#362) --- crates/pgt_completions/src/builder.rs | 12 +++++++++++- crates/pgt_completions/src/item.rs | 18 +++++++++++++++++- crates/pgt_lsp/src/handlers/completions.rs | 3 ++- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/crates/pgt_completions/src/builder.rs b/crates/pgt_completions/src/builder.rs index 45c36d3b..40db6e1a 100644 --- a/crates/pgt_completions/src/builder.rs +++ b/crates/pgt_completions/src/builder.rs @@ -50,6 +50,14 @@ impl<'a> CompletionBuilder<'a> { let should_preselect_first_item = should_preselect_first_item(&items); + /* + * LSP Clients themselves sort the completion items. + * They'll use the `sort_text` property if present (or fallback to the `label`). + * Since our items are already sorted, we're 'hijacking' the sort_text. + * We're simply adding the index of the item, padded by zeroes to the max length. + */ + let max_padding = items.len().to_string().len(); + items .into_iter() .enumerate() @@ -61,7 +69,9 @@ impl<'a> CompletionBuilder<'a> { kind: item.kind, label: item.label, preselected, - score: item.score.get_score(), + + // wonderous Rust syntax ftw + sort_text: format!("{:0>padding$}", idx, padding = max_padding), } }) .collect() diff --git a/crates/pgt_completions/src/item.rs b/crates/pgt_completions/src/item.rs index 1f306d78..2af853c0 100644 --- a/crates/pgt_completions/src/item.rs +++ b/crates/pgt_completions/src/item.rs @@ -1,3 +1,5 @@ +use std::fmt::Display; + use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -10,12 +12,26 @@ pub enum CompletionItemKind { Schema, } +impl Display for CompletionItemKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let txt = match self { + CompletionItemKind::Table => "Table", + CompletionItemKind::Function => "Function", + CompletionItemKind::Column => "Column", + CompletionItemKind::Schema => "Schema", + }; + + write!(f, "{txt}") + } +} + #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct CompletionItem { pub label: String, - pub(crate) score: i32, pub description: String, pub preselected: bool, pub kind: CompletionItemKind, + /// String used for sorting by LSP clients. + pub sort_text: String, } diff --git a/crates/pgt_lsp/src/handlers/completions.rs b/crates/pgt_lsp/src/handlers/completions.rs index f9b68e7d..9dd547f9 100644 --- a/crates/pgt_lsp/src/handlers/completions.rs +++ b/crates/pgt_lsp/src/handlers/completions.rs @@ -32,9 +32,10 @@ pub fn get_completions( label: i.label, label_details: Some(CompletionItemLabelDetails { description: Some(i.description), - detail: None, + detail: Some(format!(" {}", i.kind)), }), preselect: Some(i.preselected), + sort_text: Some(i.sort_text), kind: Some(to_lsp_types_completion_item_kind(i.kind)), ..CompletionItem::default() }) From a6972a6ba4c1bf1f091fef52f9a14bb2d61196df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Tue, 22 Apr 2025 09:16:57 +0200 Subject: [PATCH 034/114] fix: properly parse when there is no affected (#356) --- .../src/workspace/server/change.rs | 88 ++++++++++++++++--- 1 file changed, 76 insertions(+), 12 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index 039c42db..db743b36 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -137,18 +137,6 @@ impl Document { diff_size: TextSize, is_addition: bool, ) -> Affected { - // special case: no previous statements -> always full range - if self.positions.is_empty() { - let full_range = TextRange::new(0.into(), content_size); - return Affected { - affected_range: full_range, - affected_indices: Vec::new(), - prev_index: None, - next_index: None, - full_affected_range: full_range, - }; - } - let mut start = change_range.start(); let mut end = change_range.end().min(content_size); @@ -171,6 +159,16 @@ impl Document { } } + if affected_indices.is_empty() && prev_index.is_none() { + // if there is no prev_index and no intersection -> use 0 + start = 0.into(); + } + + if affected_indices.is_empty() && next_index.is_none() { + // if there is no next_index and no intersection -> use content_size + end = content_size; + } + let first_affected_stmt_start = prev_index .map(|i| self.positions[i].1.start()) .unwrap_or(start); @@ -460,6 +458,72 @@ mod tests { assert!(d.has_fatal_error()); } + #[test] + fn comments_at_begin() { + let path = PgTPath::new("test.sql"); + let input = "\nselect id from users;\n"; + + let mut d = Document::new(input.to_string(), 0); + + let change1 = ChangeFileParams { + path: path.clone(), + version: 1, + changes: vec![ChangeParams { + text: "-".to_string(), + range: Some(TextRange::new(0.into(), 0.into())), + }], + }; + + let _changed1 = d.apply_file_change(&change1); + + assert_eq!(d.content, "-\nselect id from users;\n"); + assert_eq!(d.positions.len(), 2); + + let change2 = ChangeFileParams { + path: path.clone(), + version: 2, + changes: vec![ChangeParams { + text: "-".to_string(), + range: Some(TextRange::new(1.into(), 1.into())), + }], + }; + + let _changed2 = d.apply_file_change(&change2); + + assert_eq!(d.content, "--\nselect id from users;\n"); + assert_eq!(d.positions.len(), 1); + + let change3 = ChangeFileParams { + path: path.clone(), + version: 3, + changes: vec![ChangeParams { + text: " ".to_string(), + range: Some(TextRange::new(2.into(), 2.into())), + }], + }; + + let _changed3 = d.apply_file_change(&change3); + + assert_eq!(d.content, "-- \nselect id from users;\n"); + assert_eq!(d.positions.len(), 1); + + let change4 = ChangeFileParams { + path: path.clone(), + version: 3, + changes: vec![ChangeParams { + text: "t".to_string(), + range: Some(TextRange::new(3.into(), 3.into())), + }], + }; + + let _changed4 = d.apply_file_change(&change4); + + assert_eq!(d.content, "-- t\nselect id from users;\n"); + assert_eq!(d.positions.len(), 1); + + assert_document_integrity(&d); + } + #[test] fn typing_comments() { let path = PgTPath::new("test.sql"); From dfd40e7a853960594c46d3b9ebab439b5ddfe623 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Wed, 23 Apr 2025 12:51:14 +0200 Subject: [PATCH 035/114] feat(completions): insert schema name when selecting non-public tables --- crates/pgt_completions/src/builder.rs | 4 ++- crates/pgt_completions/src/context.rs | 9 +------ crates/pgt_completions/src/item.rs | 18 +++++++++++++ .../pgt_completions/src/providers/columns.rs | 1 + .../src/providers/functions.rs | 3 +++ .../pgt_completions/src/providers/helper.rs | 27 +++++++++++++++++++ crates/pgt_completions/src/providers/mod.rs | 1 + .../pgt_completions/src/providers/schemas.rs | 1 + .../pgt_completions/src/providers/tables.rs | 3 +++ crates/pgt_lsp/src/handlers/completions.rs | 19 ++++++++++--- 10 files changed, 74 insertions(+), 12 deletions(-) create mode 100644 crates/pgt_completions/src/providers/helper.rs diff --git a/crates/pgt_completions/src/builder.rs b/crates/pgt_completions/src/builder.rs index 40db6e1a..127a3405 100644 --- a/crates/pgt_completions/src/builder.rs +++ b/crates/pgt_completions/src/builder.rs @@ -1,5 +1,5 @@ use crate::{ - CompletionItemKind, + CompletionItemKind, CompletionText, context::CompletionContext, item::CompletionItem, relevance::{filtering::CompletionFilter, scoring::CompletionScore}, @@ -11,6 +11,7 @@ pub(crate) struct PossibleCompletionItem<'a> { pub kind: CompletionItemKind, pub score: CompletionScore<'a>, pub filter: CompletionFilter<'a>, + pub completion_text: Option, } pub(crate) struct CompletionBuilder<'a> { @@ -72,6 +73,7 @@ impl<'a> CompletionBuilder<'a> { // wonderous Rust syntax ftw sort_text: format!("{:0>padding$}", idx, padding = max_padding), + completion_text: item.completion_text, } }) .collect() diff --git a/crates/pgt_completions/src/context.rs b/crates/pgt_completions/src/context.rs index a4578df8..6005e07b 100644 --- a/crates/pgt_completions/src/context.rs +++ b/crates/pgt_completions/src/context.rs @@ -209,14 +209,7 @@ impl<'a> CompletionContext<'a> { // We have arrived at the leaf node if current_node.child_count() == 0 { - if matches!( - self.get_ts_node_content(current_node).unwrap(), - NodeText::Replaced - ) { - self.node_under_cursor = None; - } else { - self.node_under_cursor = Some(current_node); - } + self.node_under_cursor = Some(current_node); return; } diff --git a/crates/pgt_completions/src/item.rs b/crates/pgt_completions/src/item.rs index 2af853c0..f37d0efb 100644 --- a/crates/pgt_completions/src/item.rs +++ b/crates/pgt_completions/src/item.rs @@ -1,5 +1,6 @@ use std::fmt::Display; +use pgt_text_size::TextRange; use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -25,6 +26,21 @@ impl Display for CompletionItemKind { } } +#[derive(Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] +/// The text that the editor should fill in. +/// If `None`, the `label` should be used. +/// Tables, for example, might have different completion_texts: +/// +/// label: "users", description: "Schema: auth", completion_text: "auth.users". +pub struct CompletionText { + pub text: String, + /// A `range` is required because some editors replace the current token, + /// others naively insert the text. + /// Having a range where start == end makes it an insertion. + pub range: TextRange, +} + #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct CompletionItem { @@ -34,4 +50,6 @@ pub struct CompletionItem { pub kind: CompletionItemKind, /// String used for sorting by LSP clients. pub sort_text: String, + + pub completion_text: Option, } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index e8a51e48..6ac3c989 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -17,6 +17,7 @@ pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut Completio filter: CompletionFilter::from(relevance), description: format!("Table: {}.{}", col.schema_name, col.table_name), kind: CompletionItemKind::Column, + completion_text: None, }; builder.add_item(item); diff --git a/crates/pgt_completions/src/providers/functions.rs b/crates/pgt_completions/src/providers/functions.rs index b44a5ef5..4241da92 100644 --- a/crates/pgt_completions/src/providers/functions.rs +++ b/crates/pgt_completions/src/providers/functions.rs @@ -5,6 +5,8 @@ use crate::{ relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; +use super::helper::get_completion_text_with_schema; + pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { let available_functions = &ctx.schema_cache.functions; @@ -17,6 +19,7 @@ pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut Completi filter: CompletionFilter::from(relevance), description: format!("Schema: {}", func.schema), kind: CompletionItemKind::Function, + completion_text: get_completion_text_with_schema(ctx, &func.name, &func.schema), }; builder.add_item(item); diff --git a/crates/pgt_completions/src/providers/helper.rs b/crates/pgt_completions/src/providers/helper.rs new file mode 100644 index 00000000..274ded20 --- /dev/null +++ b/crates/pgt_completions/src/providers/helper.rs @@ -0,0 +1,27 @@ +use pgt_text_size::{TextRange, TextSize}; + +use crate::{CompletionText, context::CompletionContext}; + +pub(crate) fn get_completion_text_with_schema( + ctx: &CompletionContext, + item_name: &str, + item_schema_name: &str, +) -> Option { + if item_schema_name == "public" { + None + } else if ctx.schema_name.is_some() { + None + } else { + let node = ctx.node_under_cursor.unwrap(); + + let range = TextRange::new( + TextSize::try_from(node.start_byte()).unwrap(), + TextSize::try_from(node.end_byte()).unwrap(), + ); + + Some(CompletionText { + text: format!("{}.{}", item_schema_name, item_name), + range, + }) + } +} diff --git a/crates/pgt_completions/src/providers/mod.rs b/crates/pgt_completions/src/providers/mod.rs index d760fea0..82e32cdf 100644 --- a/crates/pgt_completions/src/providers/mod.rs +++ b/crates/pgt_completions/src/providers/mod.rs @@ -1,5 +1,6 @@ mod columns; mod functions; +mod helper; mod schemas; mod tables; diff --git a/crates/pgt_completions/src/providers/schemas.rs b/crates/pgt_completions/src/providers/schemas.rs index 3d8f622e..eb493d0c 100644 --- a/crates/pgt_completions/src/providers/schemas.rs +++ b/crates/pgt_completions/src/providers/schemas.rs @@ -16,6 +16,7 @@ pub fn complete_schemas<'a>(ctx: &'a CompletionContext, builder: &mut Completion kind: crate::CompletionItemKind::Schema, score: CompletionScore::from(relevance.clone()), filter: CompletionFilter::from(relevance), + completion_text: None, }; builder.add_item(item); diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index fcc8fa00..1da77e15 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -5,6 +5,8 @@ use crate::{ relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; +use super::helper::get_completion_text_with_schema; + pub fn complete_tables<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { let available_tables = &ctx.schema_cache.tables; @@ -17,6 +19,7 @@ pub fn complete_tables<'a>(ctx: &'a CompletionContext, builder: &mut CompletionB filter: CompletionFilter::from(relevance), description: format!("Schema: {}", table.schema), kind: CompletionItemKind::Table, + completion_text: get_completion_text_with_schema(ctx, &table.name, &table.schema), }; builder.add_item(item); diff --git a/crates/pgt_lsp/src/handlers/completions.rs b/crates/pgt_lsp/src/handlers/completions.rs index 9dd547f9..e1a7508c 100644 --- a/crates/pgt_lsp/src/handlers/completions.rs +++ b/crates/pgt_lsp/src/handlers/completions.rs @@ -1,16 +1,23 @@ -use crate::{adapters::get_cursor_position, session::Session}; +use crate::{ + adapters::{self, get_cursor_position}, + diagnostics::LspError, + session::Session, +}; use anyhow::Result; use pgt_workspace::{WorkspaceError, features::completions::GetCompletionsParams}; -use tower_lsp::lsp_types::{self, CompletionItem, CompletionItemLabelDetails}; +use tower_lsp::lsp_types::{self, CompletionItem, CompletionItemLabelDetails, TextEdit}; #[tracing::instrument(level = "debug", skip(session), err)] pub fn get_completions( session: &Session, params: lsp_types::CompletionParams, -) -> Result { +) -> Result { let url = params.text_document_position.text_document.uri; let path = session.file_path(&url)?; + let doc = session.document(&url)?; + let encoding = adapters::negotiated_encoding(session.client_capabilities().unwrap()); + let completion_result = match session.workspace.get_completions(GetCompletionsParams { path, position: get_cursor_position(session, &url, params.text_document_position.position)?, @@ -36,6 +43,12 @@ pub fn get_completions( }), preselect: Some(i.preselected), sort_text: Some(i.sort_text), + text_edit: i.completion_text.map(|c| { + lsp_types::CompletionTextEdit::Edit(TextEdit { + new_text: c.text, + range: adapters::to_lsp::range(&doc.line_index, c.range, encoding).unwrap(), + }) + }), kind: Some(to_lsp_types_completion_item_kind(i.kind)), ..CompletionItem::default() }) From 55deb6456903ebbd61c6b33c74e0d1e280ebf813 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Fri, 25 Apr 2025 13:13:39 +0200 Subject: [PATCH 036/114] chore: align ci with local dev (#374) --- .github/workflows/pull_request.yml | 45 ++++++++++++------- Cargo.toml | 2 +- .../pgt_completions/src/providers/helper.rs | 4 +- crates/pgt_completions/src/sanitization.rs | 36 +++++++-------- crates/pgt_completions/src/test_helper.rs | 8 ++-- .../src/workspace/server/change.rs | 6 +-- .../src/workspace/server/document.rs | 2 +- .../src/workspace/server/parsed_document.rs | 8 ++-- justfile | 31 +++++++++++-- rust-toolchain.toml | 6 +-- 10 files changed, 91 insertions(+), 57 deletions(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index e8393757..4600ac92 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -33,8 +33,10 @@ jobs: steps: - name: Checkout PR branch uses: actions/checkout@v4 + - name: Free Disk Space uses: ./.github/actions/free-disk-space + - name: Install toolchain uses: moonrepo/setup-rust@v1 with: @@ -43,15 +45,23 @@ jobs: cache-base: main env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Setup Biome - uses: biomejs/setup-biome@v2 - with: - version: latest + + - name: Setup Bun + uses: oven-sh/setup-bun@v2 + + - name: Install JS dependencies + run: bun install + + - name: Setup Just + uses: extractions/setup-just@v3 + + - name: Echo Tool Versions + run: | + just format-ci-versions + - name: Run format run: | - cargo fmt --all --check - taplo format --check - biome format + just format-ci actionlint: name: Lint GitHub Actions @@ -96,17 +106,22 @@ jobs: - name: Setup sqlx-cli run: cargo install sqlx-cli - - name: Setup Biome - uses: biomejs/setup-biome@v2 - with: - version: latest + - name: Setup Bun + uses: oven-sh/setup-bun@v2 + + - name: Install JS dependencies + run: bun install + + - name: Setup Just + uses: extractions/setup-just@v3 + + - name: Echo Tool Versions + run: | + just lint-ci-versions - name: Run Lints run: | - cargo sqlx prepare --check --workspace - cargo clippy --fix - cargo run -p rules_check - biome lint --write + just lint-ci - name: Check for changes run: | diff --git a/Cargo.toml b/Cargo.toml index ef29bcaa..aaaa9035 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ homepage = "https://supabase.com/" keywords = ["linter", "typechecker", "postgres", "language-server"] license = "MIT" repository = "https://github.com/supabase-community/postgres-language-server" -rust-version = "1.85.0" +rust-version = "1.86.0" [workspace.dependencies] # supporting crates unrelated to postgres diff --git a/crates/pgt_completions/src/providers/helper.rs b/crates/pgt_completions/src/providers/helper.rs index 274ded20..2e4ef8a9 100644 --- a/crates/pgt_completions/src/providers/helper.rs +++ b/crates/pgt_completions/src/providers/helper.rs @@ -7,9 +7,7 @@ pub(crate) fn get_completion_text_with_schema( item_name: &str, item_schema_name: &str, ) -> Option { - if item_schema_name == "public" { - None - } else if ctx.schema_name.is_some() { + if item_schema_name == "public" || ctx.schema_name.is_some() { None } else { let node = ctx.node_under_cursor.unwrap(); diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index 59eb609f..248a0ffa 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -219,19 +219,19 @@ mod tests { .set_language(tree_sitter_sql::language()) .expect("Error loading sql language"); - let mut tree = parser.parse(input, None).unwrap(); + let tree = parser.parse(input, None).unwrap(); // select | from users; <-- just right, one space after select token, one space before from - assert!(cursor_inbetween_nodes(&mut tree, TextSize::new(7))); + assert!(cursor_inbetween_nodes(&tree, TextSize::new(7))); // select| from users; <-- still on select token - assert!(!cursor_inbetween_nodes(&mut tree, TextSize::new(6))); + assert!(!cursor_inbetween_nodes(&tree, TextSize::new(6))); // select |from users; <-- already on from token - assert!(!cursor_inbetween_nodes(&mut tree, TextSize::new(8))); + assert!(!cursor_inbetween_nodes(&tree, TextSize::new(8))); // select from users;| - assert!(!cursor_inbetween_nodes(&mut tree, TextSize::new(19))); + assert!(!cursor_inbetween_nodes(&tree, TextSize::new(19))); } #[test] @@ -243,29 +243,29 @@ mod tests { .set_language(tree_sitter_sql::language()) .expect("Error loading sql language"); - let mut tree = parser.parse(input, None).unwrap(); + let tree = parser.parse(input, None).unwrap(); // select * from| <-- still on previous token assert!(!cursor_prepared_to_write_token_after_last_node( - &mut tree, + &tree, TextSize::new(13) )); // select * from | <-- too far off, two spaces afterward assert!(!cursor_prepared_to_write_token_after_last_node( - &mut tree, + &tree, TextSize::new(15) )); // select * |from <-- it's within assert!(!cursor_prepared_to_write_token_after_last_node( - &mut tree, + &tree, TextSize::new(9) )); // select * from | <-- just right assert!(cursor_prepared_to_write_token_after_last_node( - &mut tree, + &tree, TextSize::new(14) )); } @@ -295,26 +295,26 @@ mod tests { .set_language(tree_sitter_sql::language()) .expect("Error loading sql language"); - let mut tree = parser.parse(input, None).unwrap(); + let tree = parser.parse(input, None).unwrap(); // select * from ;| <-- it's after the statement - assert!(!cursor_before_semicolon(&mut tree, TextSize::new(19))); + assert!(!cursor_before_semicolon(&tree, TextSize::new(19))); // select * from| ; <-- still touches the from - assert!(!cursor_before_semicolon(&mut tree, TextSize::new(13))); + assert!(!cursor_before_semicolon(&tree, TextSize::new(13))); // not okay to be ON the semi. // select * from |; - assert!(!cursor_before_semicolon(&mut tree, TextSize::new(18))); + assert!(!cursor_before_semicolon(&tree, TextSize::new(18))); // anything is fine here // select * from | ; // select * from | ; // select * from | ; // select * from |; - assert!(cursor_before_semicolon(&mut tree, TextSize::new(14))); - assert!(cursor_before_semicolon(&mut tree, TextSize::new(15))); - assert!(cursor_before_semicolon(&mut tree, TextSize::new(16))); - assert!(cursor_before_semicolon(&mut tree, TextSize::new(17))); + assert!(cursor_before_semicolon(&tree, TextSize::new(14))); + assert!(cursor_before_semicolon(&tree, TextSize::new(15))); + assert!(cursor_before_semicolon(&tree, TextSize::new(16))); + assert!(cursor_before_semicolon(&tree, TextSize::new(17))); } } diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index fc2cf403..b1c5b399 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -1,3 +1,5 @@ +use std::fmt::Display; + use pgt_schema_cache::SchemaCache; use pgt_test_utils::test_database::get_new_test_db; use sqlx::Executor; @@ -25,9 +27,9 @@ impl From<&str> for InputQuery { } } -impl ToString for InputQuery { - fn to_string(&self) -> String { - self.sql.clone() +impl Display for InputQuery { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.sql) } } diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index db743b36..6e86abcf 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -1470,7 +1470,7 @@ mod tests { assert_eq!(old_stmt_text, "select * from"); } - _ => assert!(false, "Did not yield a modified statement."), + _ => unreachable!("Did not yield a modified statement."), } assert_document_integrity(&doc); @@ -1516,7 +1516,7 @@ mod tests { assert_eq!(old_stmt_text, "select * from"); } - _ => assert!(false, "Did not yield a modified statement."), + _ => unreachable!("Did not yield a modified statement."), } assert_document_integrity(&doc); @@ -1559,7 +1559,7 @@ mod tests { assert_eq!(new_stmt_text, "select * from users"); } - _ => assert!(false, "Did not yield a modified statement."), + _ => unreachable!("Did not yield a modified statement."), } assert_document_integrity(&doc); diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index f2c500cc..67ed991c 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -43,7 +43,7 @@ impl Document { .any(|d| d.severity() == Severity::Fatal) } - pub fn iter<'a>(&'a self) -> StatementIterator<'a> { + pub fn iter(&self) -> StatementIterator<'_> { StatementIterator::new(self) } } diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs index 01f18d3c..92f33926 100644 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ b/crates/pgt_workspace/src/workspace/server/parsed_document.rs @@ -353,7 +353,7 @@ impl<'a> StatementMapper<'a> for GetCompletionsMapper { pub struct GetCompletionsFilter { pub cursor_position: TextSize, } -impl<'a> StatementFilter<'a> for GetCompletionsFilter { +impl StatementFilter<'_> for GetCompletionsFilter { fn predicate(&self, _id: &StatementId, range: &TextRange, content: &str) -> bool { let is_terminated_by_semi = content.chars().last().is_some_and(|c| c == ';'); @@ -367,7 +367,7 @@ impl<'a> StatementFilter<'a> for GetCompletionsFilter { } pub struct NoFilter; -impl<'a> StatementFilter<'a> for NoFilter { +impl StatementFilter<'_> for NoFilter { fn predicate(&self, _id: &StatementId, _range: &TextRange, _content: &str) -> bool { true } @@ -383,7 +383,7 @@ impl CursorPositionFilter { } } -impl<'a> StatementFilter<'a> for CursorPositionFilter { +impl StatementFilter<'_> for CursorPositionFilter { fn predicate(&self, _id: &StatementId, range: &TextRange, _content: &str) -> bool { range.contains(self.pos) } @@ -399,7 +399,7 @@ impl IdFilter { } } -impl<'a> StatementFilter<'a> for IdFilter { +impl StatementFilter<'_> for IdFilter { fn predicate(&self, id: &StatementId, _range: &TextRange, _content: &str) -> bool { *id == self.id } diff --git a/justfile b/justfile index 3e7163b3..2495ae84 100644 --- a/justfile +++ b/justfile @@ -10,16 +10,15 @@ alias rg := reset-git # Installs the tools needed to develop install-tools: cargo install cargo-binstall - cargo binstall cargo-insta taplo-cli + cargo binstall cargo-insta taplo-cli sqlx-cli cargo binstall --git "https://github.com/astral-sh/uv" uv bun install # Upgrades the tools needed to develop upgrade-tools: cargo install cargo-binstall --force - cargo binstall cargo-insta taplo-cli --force + cargo binstall cargo-insta taplo-cli sqlx-cli --force cargo binstall --git "https://github.com/astral-sh/uv" uv --force - bun install # Generates code generated files for the linter gen-lint: @@ -41,6 +40,16 @@ format: taplo format bun biome format --write +format-ci: + cargo fmt --all --check + taplo format --check + bun biome format + +format-ci-versions: + cargo --version + taplo --version + echo "Biome $(bun biome --version)" + [unix] _touch file: touch {{file}} @@ -72,6 +81,20 @@ lint-fix: cargo run -p rules_check bun biome lint --write +lint-ci-versions: + rustc --version + rustup --version + cargo --version + cargo sqlx --version + cargo clippy --version + echo "Biome $(bun biome --version)" + +lint-ci: + cargo sqlx prepare --check --workspace + cargo clippy --fix + cargo run -p rules_check + bun biome lint --write + serve-docs: uv sync uv run mkdocs serve @@ -113,4 +136,4 @@ merge-main: # We recommend to install `bunyan` (npm i -g bunyan) and pipe the output through there for color-coding: # just show-logs | bunyan show-logs: - tail -f $(ls $PGT_LOG_PATH/server.log.* | sort -t- -k2,2 -k3,3 -k4,4 | tail -n 1) + tail -f $(ls $PGT_LOG_PATH/server.log.* | sort -t- -k2,2 -k3,3 -k4,4 | tail -n 1) \ No newline at end of file diff --git a/rust-toolchain.toml b/rust-toolchain.toml index ef2c880a..4501f2a1 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,7 +1,3 @@ [toolchain] -# The default profile includes rustc, rust-std, cargo, rust-docs, rustfmt and clippy. -# https://rust-lang.github.io/rustup/concepts/profiles.html profile = "default" -# For some reason, rustfmt is not included in the default profile. Add it here. -components = ["rustfmt"] -channel = "nightly" +channel = "1.86.0" From e2fae695dfe8c0cc914a6b5d654bc67d1a408074 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 25 Apr 2025 16:37:15 +0200 Subject: [PATCH 037/114] fix: timestamp with time zone split (#373) Co-authored-by: Julian --- crates/pgt_statement_splitter/src/lib.rs | 7 +++++++ crates/pgt_statement_splitter/src/parser/common.rs | 2 ++ 2 files changed, 9 insertions(+) diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index e5e995b7..5f6ca92f 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -85,6 +85,13 @@ mod tests { } } + #[test] + fn ts_with_timezone() { + Tester::from("alter table foo add column bar timestamp with time zone;").expect_statements( + vec!["alter table foo add column bar timestamp with time zone;"], + ); + } + #[test] fn failing_lexer() { let input = "select 1443ddwwd33djwdkjw13331333333333"; diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index ab3f8173..56d8d86a 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -249,6 +249,8 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { SyntaxKind::Ordinality, // WITH CHECK should not start a new statement SyntaxKind::Check, + // TIMESTAMP WITH TIME ZONE should not start a new statement + SyntaxKind::Time, ] .iter() .all(|x| Some(x) != next.as_ref()) From 2820bb5e27cfcf818e2fddf65a59e2977ab5b011 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Fri, 25 Apr 2025 16:37:27 +0200 Subject: [PATCH 038/114] fix: do not attempt db connection if jsonc section is missing (#375) * fix: do not attempt db connection if jsonc section is missing * comment * comment * whoopsndoopsn * whoopsndoopsn2 * whoopsndoopsn 3 * ok * ok * just readied * Update postgrestools.jsonc * ok * finally --- crates/pgt_cli/src/cli_options.rs | 4 --- crates/pgt_cli/src/commands/mod.rs | 1 - crates/pgt_configuration/src/database.rs | 8 ++++++ crates/pgt_configuration/src/lib.rs | 3 ++- crates/pgt_lsp/src/session.rs | 1 - crates/pgt_lsp/tests/server.rs | 5 ++-- crates/pgt_workspace/src/settings.rs | 11 ++++++++ crates/pgt_workspace/src/workspace.rs | 1 - crates/pgt_workspace/src/workspace/server.rs | 10 +++---- .../src/workspace/server/db_connection.rs | 5 ++++ docs/schemas/0.0.0/schema.json | 2 +- docs/schemas/latest/schema.json | 2 +- justfile | 2 ++ .../backend-jsonrpc/src/workspace.ts | 27 ++++++++++++++----- 14 files changed, 58 insertions(+), 24 deletions(-) diff --git a/crates/pgt_cli/src/cli_options.rs b/crates/pgt_cli/src/cli_options.rs index d1bfaee9..5c41c7fc 100644 --- a/crates/pgt_cli/src/cli_options.rs +++ b/crates/pgt_cli/src/cli_options.rs @@ -18,10 +18,6 @@ pub struct CliOptions { #[bpaf(long("use-server"), switch, fallback(false))] pub use_server: bool, - /// Skip connecting to the database and only run checks that don't require a database connection. - #[bpaf(long("skip-db"), switch, fallback(false))] - pub skip_db: bool, - /// Print additional diagnostics, and some diagnostics show more information. Also, print out what files were processed and which ones were modified. #[bpaf(long("verbose"), switch, fallback(false))] pub verbose: bool, diff --git a/crates/pgt_cli/src/commands/mod.rs b/crates/pgt_cli/src/commands/mod.rs index b166a033..ebd16e3d 100644 --- a/crates/pgt_cli/src/commands/mod.rs +++ b/crates/pgt_cli/src/commands/mod.rs @@ -307,7 +307,6 @@ pub(crate) trait CommandRunner: Sized { configuration, vcs_base_path, gitignore_matches, - skip_db: cli_options.skip_db, })?; let execution = self.get_execution(cli_options, console, workspace)?; diff --git a/crates/pgt_configuration/src/database.rs b/crates/pgt_configuration/src/database.rs index 209f86dc..39efb8d1 100644 --- a/crates/pgt_configuration/src/database.rs +++ b/crates/pgt_configuration/src/database.rs @@ -10,6 +10,8 @@ use serde::{Deserialize, Serialize}; #[partial(serde(rename_all = "camelCase", default, deny_unknown_fields))] pub struct DatabaseConfiguration { /// The host of the database. + /// Required if you want database-related features. + /// All else falls back to sensible defaults. #[partial(bpaf(long("host")))] pub host: String, @@ -35,11 +37,17 @@ pub struct DatabaseConfiguration { /// The connection timeout in seconds. #[partial(bpaf(long("conn_timeout_secs"), fallback(Some(10)), debug_fallback))] pub conn_timeout_secs: u16, + + /// Actively disable all database-related features. + #[partial(bpaf(long("disable-db"), switch, fallback(Some(false))))] + #[partial(cfg_attr(feature = "schema", schemars(skip)))] + pub disable_connection: bool, } impl Default for DatabaseConfiguration { fn default() -> Self { Self { + disable_connection: false, host: "127.0.0.1".to_string(), port: 5432, username: "postgres".to_string(), diff --git a/crates/pgt_configuration/src/lib.rs b/crates/pgt_configuration/src/lib.rs index f262450d..fcf0b5c6 100644 --- a/crates/pgt_configuration/src/lib.rs +++ b/crates/pgt_configuration/src/lib.rs @@ -110,8 +110,9 @@ impl PartialConfiguration { username: Some("postgres".to_string()), password: Some("postgres".to_string()), database: Some("postgres".to_string()), - conn_timeout_secs: Some(10), allow_statement_executions_against: Default::default(), + conn_timeout_secs: Some(10), + disable_connection: Some(false), }), } } diff --git a/crates/pgt_lsp/src/session.rs b/crates/pgt_lsp/src/session.rs index 64adf16a..db17dfd1 100644 --- a/crates/pgt_lsp/src/session.rs +++ b/crates/pgt_lsp/src/session.rs @@ -449,7 +449,6 @@ impl Session { configuration: fs_configuration, vcs_base_path, gitignore_matches, - skip_db: false, }); if let Err(error) = result { diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 8e40c097..581ea1fe 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -773,14 +773,15 @@ async fn test_execute_statement() -> Result<()> { .to_string(); let host = test_db.connect_options().get_host().to_string(); - let conf = PartialConfiguration { + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { db: Some(PartialDatabaseConfiguration { database: Some(database), host: Some(host), ..Default::default() }), ..Default::default() - }; + }); fs.insert( url!("postgrestools.jsonc").to_file_path().unwrap(), diff --git a/crates/pgt_workspace/src/settings.rs b/crates/pgt_workspace/src/settings.rs index d4ea462a..f9275aa9 100644 --- a/crates/pgt_workspace/src/settings.rs +++ b/crates/pgt_workspace/src/settings.rs @@ -268,6 +268,7 @@ impl Default for LinterSettings { /// Database settings for the entire workspace #[derive(Debug)] pub struct DatabaseSettings { + pub enable_connection: bool, pub host: String, pub port: u16, pub username: String, @@ -280,6 +281,7 @@ pub struct DatabaseSettings { impl Default for DatabaseSettings { fn default() -> Self { Self { + enable_connection: false, host: "127.0.0.1".to_string(), port: 5432, username: "postgres".to_string(), @@ -295,6 +297,13 @@ impl From for DatabaseSettings { fn from(value: PartialDatabaseConfiguration) -> Self { let d = DatabaseSettings::default(); + // "host" is the minimum required setting for database features + // to be enabled. + let enable_connection = value + .host + .as_ref() + .is_some_and(|_| value.disable_connection.is_none_or(|disabled| !disabled)); + let database = value.database.unwrap_or(d.database); let host = value.host.unwrap_or(d.host); @@ -312,6 +321,8 @@ impl From for DatabaseSettings { .unwrap_or(false); Self { + enable_connection, + port: value.port.unwrap_or(d.port), username: value.username.unwrap_or(d.username), password: value.password.unwrap_or(d.password), diff --git a/crates/pgt_workspace/src/workspace.rs b/crates/pgt_workspace/src/workspace.rs index 54f7200b..873dd83e 100644 --- a/crates/pgt_workspace/src/workspace.rs +++ b/crates/pgt_workspace/src/workspace.rs @@ -73,7 +73,6 @@ pub struct UpdateSettingsParams { pub vcs_base_path: Option, pub gitignore_matches: Vec, pub workspace_directory: Option, - pub skip_db: bool, } #[derive(Debug, serde::Serialize, serde::Deserialize)] diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 3bf540cc..3c14f352 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -168,12 +168,10 @@ impl Workspace for WorkspaceServer { tracing::info!("Updated settings in workspace"); - if !params.skip_db { - self.connection - .write() - .unwrap() - .set_conn_settings(&self.settings().as_ref().db); - } + self.connection + .write() + .unwrap() + .set_conn_settings(&self.settings().as_ref().db); tracing::info!("Updated Db connection settings"); diff --git a/crates/pgt_workspace/src/workspace/server/db_connection.rs b/crates/pgt_workspace/src/workspace/server/db_connection.rs index d1be3131..d002c0a2 100644 --- a/crates/pgt_workspace/src/workspace/server/db_connection.rs +++ b/crates/pgt_workspace/src/workspace/server/db_connection.rs @@ -16,6 +16,11 @@ impl DbConnection { } pub(crate) fn set_conn_settings(&mut self, settings: &DatabaseSettings) { + if !settings.enable_connection { + tracing::info!("Database connection disabled."); + return; + } + let config = PgConnectOptions::new() .host(&settings.host) .port(settings.port) diff --git a/docs/schemas/0.0.0/schema.json b/docs/schemas/0.0.0/schema.json index 086adf3d..faba3b5c 100644 --- a/docs/schemas/0.0.0/schema.json +++ b/docs/schemas/0.0.0/schema.json @@ -100,7 +100,7 @@ ] }, "host": { - "description": "The host of the database.", + "description": "The host of the database. Required if you want database-related features. All else falls back to sensible defaults.", "type": [ "string", "null" diff --git a/docs/schemas/latest/schema.json b/docs/schemas/latest/schema.json index 086adf3d..faba3b5c 100644 --- a/docs/schemas/latest/schema.json +++ b/docs/schemas/latest/schema.json @@ -100,7 +100,7 @@ ] }, "host": { - "description": "The host of the database.", + "description": "The host of the database. Required if you want database-related features. All else falls back to sensible defaults.", "type": [ "string", "null" diff --git a/justfile b/justfile index 2495ae84..a55207ae 100644 --- a/justfile +++ b/justfile @@ -100,6 +100,8 @@ serve-docs: uv run mkdocs serve # When you finished coding, run this command. Note that you should have already committed your changes. +# If you haven't run `sqlx prepare` at least once, you need to run `docker compose up` +# to lint the queries. ready: git diff --exit-code --quiet cargo run -p xtask_codegen -- configuration diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index 5c9a1ac4..a35dad81 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -179,21 +179,36 @@ export interface GetCompletionsParams { */ position: TextSize; } -export interface CompletionResult { +export interface CompletionsResult { items: CompletionItem[]; } export interface CompletionItem { + completion_text?: CompletionText; description: string; kind: CompletionItemKind; label: string; preselected: boolean; - score: number; + /** + * String used for sorting by LSP clients. + */ + sort_text: string; +} +/** + * The text that the editor should fill in. If `None`, the `label` should be used. Tables, for example, might have different completion_texts: + +label: "users", description: "Schema: auth", completion_text: "auth.users". + */ +export interface CompletionText { + /** + * A `range` is required because some editors replace the current token, others naively insert the text. Having a range where start == end makes it an insertion. + */ + range: TextRange; + text: string; } -export type CompletionItemKind = "table" | "function" | "column"; +export type CompletionItemKind = "table" | "function" | "column" | "schema"; export interface UpdateSettingsParams { configuration: PartialConfiguration; gitignore_matches: string[]; - skip_db: boolean; vcs_base_path?: string; workspace_directory?: string; } @@ -240,7 +255,7 @@ export interface PartialDatabaseConfiguration { */ database?: string; /** - * The host of the database. + * The host of the database. Required if you want database-related features. All else falls back to sensible defaults. */ host?: string; /** @@ -414,7 +429,7 @@ export interface Workspace { pullDiagnostics( params: PullDiagnosticsParams, ): Promise; - getCompletions(params: GetCompletionsParams): Promise; + getCompletions(params: GetCompletionsParams): Promise; updateSettings(params: UpdateSettingsParams): Promise; openFile(params: OpenFileParams): Promise; changeFile(params: ChangeFileParams): Promise; From 7b27cdc614c673609b052eaeddad1ab5b3d3649c Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Fri, 25 Apr 2025 20:18:31 +0200 Subject: [PATCH 039/114] fix(completions): improved completion in delete/update clauses (#371) * ok * fixie fixie * Update crates/pgt_completions/src/relevance/filtering.rs * set to workspace cargo.toml * OCD * seems valid * just some help * fixies * hope this works * ok * takin shape * ok * why * format too * this --- crates/pgt_cli/src/execute/mod.rs | 9 +- crates/pgt_completions/src/context.rs | 87 ++++++++++++++--- .../pgt_completions/src/providers/schemas.rs | 72 ++++++++++---- .../pgt_completions/src/providers/tables.rs | 97 ++++++++++++++++++- .../src/relevance/filtering.rs | 10 ++ .../pgt_completions/src/relevance/scoring.rs | 59 ++++++++--- crates/pgt_completions/src/test_helper.rs | 47 ++++++++- .../src/features/code_actions.rs | 1 - 8 files changed, 323 insertions(+), 59 deletions(-) diff --git a/crates/pgt_cli/src/execute/mod.rs b/crates/pgt_cli/src/execute/mod.rs index 90a5bb98..6cb01ca7 100644 --- a/crates/pgt_cli/src/execute/mod.rs +++ b/crates/pgt_cli/src/execute/mod.rs @@ -76,12 +76,11 @@ pub enum TraversalMode { Dummy, /// This mode is enabled when running the command `check` Check { - /// The type of fixes that should be applied when analyzing a file. - /// - /// It's [None] if the `check` command is called without `--apply` or `--apply-suggested` - /// arguments. + // The type of fixes that should be applied when analyzing a file. + // + // It's [None] if the `check` command is called without `--apply` or `--apply-suggested` + // arguments. // fix_file_mode: Option, - /// An optional tuple. /// 1. The virtual path to the file /// 2. The content of the file diff --git a/crates/pgt_completions/src/context.rs b/crates/pgt_completions/src/context.rs index 6005e07b..b16fd21c 100644 --- a/crates/pgt_completions/src/context.rs +++ b/crates/pgt_completions/src/context.rs @@ -30,7 +30,7 @@ impl TryFrom<&str> for ClauseType { match value { "select" => Ok(Self::Select), "where" => Ok(Self::Where), - "from" | "keyword_from" => Ok(Self::From), + "from" => Ok(Self::From), "update" => Ok(Self::Update), "delete" => Ok(Self::Delete), _ => { @@ -49,8 +49,52 @@ impl TryFrom<&str> for ClauseType { impl TryFrom for ClauseType { type Error = String; - fn try_from(value: String) -> Result { - ClauseType::try_from(value.as_str()) + fn try_from(value: String) -> Result { + Self::try_from(value.as_str()) + } +} + +/// We can map a few nodes, such as the "update" node, to actual SQL clauses. +/// That gives us a lot of insight for completions. +/// Other nodes, such as the "relation" node, gives us less but still +/// relevant information. +/// `WrappingNode` maps to such nodes. +/// +/// Note: This is not the direct parent of the `node_under_cursor`, but the closest +/// *relevant* parent. +#[derive(Debug, PartialEq, Eq)] +pub enum WrappingNode { + Relation, + BinaryExpression, + Assignment, +} + +impl TryFrom<&str> for WrappingNode { + type Error = String; + + fn try_from(value: &str) -> Result { + match value { + "relation" => Ok(Self::Relation), + "assignment" => Ok(Self::Assignment), + "binary_expression" => Ok(Self::BinaryExpression), + _ => { + let message = format!("Unimplemented Relation: {}", value); + + // Err on tests, so we notice that we're lacking an implementation immediately. + if cfg!(test) { + panic!("{}", message); + } + + Err(message) + } + } + } +} + +impl TryFrom for WrappingNode { + type Error = String; + fn try_from(value: String) -> Result { + Self::try_from(value.as_str()) } } @@ -64,6 +108,9 @@ pub(crate) struct CompletionContext<'a> { pub schema_name: Option, pub wrapping_clause_type: Option, + + pub wrapping_node_kind: Option, + pub is_invocation: bool, pub wrapping_statement_range: Option, @@ -80,6 +127,7 @@ impl<'a> CompletionContext<'a> { node_under_cursor: None, schema_name: None, wrapping_clause_type: None, + wrapping_node_kind: None, wrapping_statement_range: None, is_invocation: false, mentioned_relations: HashMap::new(), @@ -133,6 +181,15 @@ impl<'a> CompletionContext<'a> { }) } + pub fn get_node_under_cursor_content(&self) -> Option { + self.node_under_cursor + .and_then(|n| self.get_ts_node_content(n)) + .and_then(|txt| match txt { + NodeText::Replaced => None, + NodeText::Original(c) => Some(c.to_string()), + }) + } + fn gather_tree_context(&mut self) { let mut cursor = self.tree.root_node().walk(); @@ -163,15 +220,18 @@ impl<'a> CompletionContext<'a> { ) { let current_node = cursor.node(); + let parent_node_kind = parent_node.kind(); + let current_node_kind = current_node.kind(); + // prevent infinite recursion – this can happen if we only have a PROGRAM node - if current_node.kind() == parent_node.kind() { + if current_node_kind == parent_node_kind { self.node_under_cursor = Some(current_node); return; } - match parent_node.kind() { + match parent_node_kind { "statement" | "subquery" => { - self.wrapping_clause_type = current_node.kind().try_into().ok(); + self.wrapping_clause_type = current_node_kind.try_into().ok(); self.wrapping_statement_range = Some(parent_node.range()); } "invocation" => self.is_invocation = true, @@ -179,7 +239,7 @@ impl<'a> CompletionContext<'a> { _ => {} } - match current_node.kind() { + match current_node_kind { "object_reference" => { let content = self.get_ts_node_content(current_node); if let Some(node_txt) = content { @@ -195,13 +255,12 @@ impl<'a> CompletionContext<'a> { } } - // in Treesitter, the Where clause is nested inside other clauses - "where" => { - self.wrapping_clause_type = "where".try_into().ok(); + "where" | "update" | "select" | "delete" | "from" => { + self.wrapping_clause_type = current_node_kind.try_into().ok(); } - "keyword_from" => { - self.wrapping_clause_type = "keyword_from".try_into().ok(); + "relation" | "binary_expression" | "assignment" => { + self.wrapping_node_kind = current_node_kind.try_into().ok(); } _ => {} @@ -406,10 +465,6 @@ mod tests { ctx.get_ts_node_content(node), Some(NodeText::Original("from")) ); - assert_eq!( - ctx.wrapping_clause_type, - Some(crate::context::ClauseType::From) - ); } #[test] diff --git a/crates/pgt_completions/src/providers/schemas.rs b/crates/pgt_completions/src/providers/schemas.rs index eb493d0c..c28f831e 100644 --- a/crates/pgt_completions/src/providers/schemas.rs +++ b/crates/pgt_completions/src/providers/schemas.rs @@ -27,8 +27,8 @@ pub fn complete_schemas<'a>(ctx: &'a CompletionContext, builder: &mut Completion mod tests { use crate::{ - CompletionItemKind, complete, - test_helper::{CURSOR_POS, get_test_deps, get_test_params}, + CompletionItemKind, + test_helper::{CURSOR_POS, CompletionAssertion, assert_complete_results}, }; #[tokio::test] @@ -46,27 +46,59 @@ mod tests { ); "#; - let query = format!("select * from {}", CURSOR_POS); + assert_complete_results( + format!("select * from {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("public".to_string(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("auth".to_string(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind( + "internal".to_string(), + CompletionItemKind::Schema, + ), + CompletionAssertion::LabelAndKind( + "private".to_string(), + CompletionItemKind::Schema, + ), + CompletionAssertion::LabelAndKind( + "information_schema".to_string(), + CompletionItemKind::Schema, + ), + CompletionAssertion::LabelAndKind( + "pg_catalog".to_string(), + CompletionItemKind::Schema, + ), + CompletionAssertion::LabelAndKind( + "pg_toast".to_string(), + CompletionItemKind::Schema, + ), + CompletionAssertion::LabelAndKind("users".to_string(), CompletionItemKind::Table), + ], + setup, + ) + .await; + } - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; - let params = get_test_params(&tree, &cache, query.as_str().into()); - let items = complete(params); + #[tokio::test] + async fn suggests_tables_and_schemas_with_matching_keys() { + let setup = r#" + create schema ultimate; - assert!(!items.is_empty()); + -- add a table to compete against schemas + create table users ( + id serial primary key, + name text, + password text + ); + "#; - assert_eq!( - items - .into_iter() - .take(5) - .map(|i| (i.label, i.kind)) - .collect::>(), + assert_complete_results( + format!("select * from u{}", CURSOR_POS).as_str(), vec![ - ("public".to_string(), CompletionItemKind::Schema), - ("auth".to_string(), CompletionItemKind::Schema), - ("internal".to_string(), CompletionItemKind::Schema), - ("private".to_string(), CompletionItemKind::Schema), - ("users".to_string(), CompletionItemKind::Table), - ] - ); + CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), + CompletionAssertion::LabelAndKind("ultimate".into(), CompletionItemKind::Schema), + ], + setup, + ) + .await; } } diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index 1da77e15..f9f922d1 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -31,7 +31,10 @@ mod tests { use crate::{ CompletionItem, CompletionItemKind, complete, - test_helper::{CURSOR_POS, get_test_deps, get_test_params}, + test_helper::{ + CURSOR_POS, CompletionAssertion, assert_complete_results, assert_no_complete_results, + get_test_deps, get_test_params, + }, }; #[tokio::test] @@ -178,4 +181,96 @@ mod tests { assert_eq!(label, "coos"); assert_eq!(kind, CompletionItemKind::Table); } + + #[tokio::test] + async fn suggests_tables_in_update() { + let setup = r#" + create table coos ( + id serial primary key, + name text + ); + "#; + + assert_complete_results( + format!("update {}", CURSOR_POS).as_str(), + vec![CompletionAssertion::LabelAndKind( + "public".into(), + CompletionItemKind::Schema, + )], + setup, + ) + .await; + + assert_complete_results( + format!("update public.{}", CURSOR_POS).as_str(), + vec![CompletionAssertion::LabelAndKind( + "coos".into(), + CompletionItemKind::Table, + )], + setup, + ) + .await; + + assert_no_complete_results(format!("update public.coos {}", CURSOR_POS).as_str(), setup) + .await; + + assert_complete_results( + format!("update coos set {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("id".into()), + CompletionAssertion::Label("name".into()), + ], + setup, + ) + .await; + + assert_complete_results( + format!("update coos set name = 'cool' where {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("id".into()), + CompletionAssertion::Label("name".into()), + ], + setup, + ) + .await; + } + + #[tokio::test] + async fn suggests_tables_in_delete() { + let setup = r#" + create table coos ( + id serial primary key, + name text + ); + "#; + + assert_no_complete_results(format!("delete {}", CURSOR_POS).as_str(), setup).await; + + assert_complete_results( + format!("delete from {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("coos".into(), CompletionItemKind::Table), + ], + setup, + ) + .await; + + assert_complete_results( + format!("delete from public.{}", CURSOR_POS).as_str(), + vec![CompletionAssertion::Label("coos".into())], + setup, + ) + .await; + + assert_complete_results( + format!("delete from public.coos where {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("id".into()), + CompletionAssertion::Label("name".into()), + ], + setup, + ) + .await; + } } diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index 214fda56..69939e0b 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -35,6 +35,16 @@ impl CompletionFilter<'_> { return None; } + // No autocompletions if there are two identifiers without a separator. + if ctx.node_under_cursor.is_some_and(|n| { + n.prev_sibling().is_some_and(|p| { + (p.kind() == "identifier" || p.kind() == "object_reference") + && n.kind() == "identifier" + }) + }) { + return None; + } + Some(()) } diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs index 7c3f3a06..2ef8edb6 100644 --- a/crates/pgt_completions/src/relevance/scoring.rs +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -1,4 +1,4 @@ -use crate::context::{ClauseType, CompletionContext, NodeText}; +use crate::context::{ClauseType, CompletionContext, WrappingNode}; use super::CompletionRelevanceData; @@ -28,20 +28,13 @@ impl CompletionScore<'_> { self.check_matches_query_input(ctx); self.check_is_invocation(ctx); self.check_matching_clause_type(ctx); + self.check_matching_wrapping_node(ctx); self.check_relations_in_stmt(ctx); } fn check_matches_query_input(&mut self, ctx: &CompletionContext) { - let node = match ctx.node_under_cursor { - Some(node) => node, - None => return, - }; - - let content = match ctx.get_ts_node_content(node) { - Some(c) => match c { - NodeText::Original(s) => s, - NodeText::Replaced => return, - }, + let content = match ctx.get_node_under_cursor_content() { + Some(c) => c, None => return, }; @@ -52,7 +45,7 @@ impl CompletionScore<'_> { CompletionRelevanceData::Schema(s) => s.name.as_str(), }; - if name.starts_with(content) { + if name.starts_with(content.as_str()) { let len: i32 = content .len() .try_into() @@ -69,12 +62,13 @@ impl CompletionScore<'_> { }; let has_mentioned_tables = !ctx.mentioned_relations.is_empty(); + let has_mentioned_schema = ctx.schema_name.is_some(); self.score += match self.data { CompletionRelevanceData::Table(_) => match clause_type { ClauseType::From => 5, - ClauseType::Update => 15, - ClauseType::Delete => 15, + ClauseType::Update => 10, + ClauseType::Delete => 10, _ => -50, }, CompletionRelevanceData::Function(_) => match clause_type { @@ -90,7 +84,42 @@ impl CompletionScore<'_> { _ => -15, }, CompletionRelevanceData::Schema(_) => match clause_type { - ClauseType::From => 10, + ClauseType::From if !has_mentioned_schema => 15, + ClauseType::Update if !has_mentioned_schema => 15, + ClauseType::Delete if !has_mentioned_schema => 15, + _ => -50, + }, + } + } + + fn check_matching_wrapping_node(&mut self, ctx: &CompletionContext) { + let wrapping_node = match ctx.wrapping_node_kind.as_ref() { + None => return, + Some(wn) => wn, + }; + + let has_mentioned_schema = ctx.schema_name.is_some(); + let has_node_text = ctx.get_node_under_cursor_content().is_some(); + + self.score += match self.data { + CompletionRelevanceData::Table(_) => match wrapping_node { + WrappingNode::Relation if has_mentioned_schema => 15, + WrappingNode::Relation if !has_mentioned_schema => 10, + WrappingNode::BinaryExpression => 5, + _ => -50, + }, + CompletionRelevanceData::Function(_) => match wrapping_node { + WrappingNode::Relation => 10, + _ => -50, + }, + CompletionRelevanceData::Column(_) => match wrapping_node { + WrappingNode::BinaryExpression => 15, + WrappingNode::Assignment => 15, + _ => -15, + }, + CompletionRelevanceData::Schema(_) => match wrapping_node { + WrappingNode::Relation if !has_mentioned_schema && !has_node_text => 15, + WrappingNode::Relation if !has_mentioned_schema && has_node_text => 0, _ => -50, }, } diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index b1c5b399..5eb5f53f 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -4,7 +4,7 @@ use pgt_schema_cache::SchemaCache; use pgt_test_utils::test_database::get_new_test_db; use sqlx::Executor; -use crate::CompletionParams; +use crate::{CompletionItem, CompletionItemKind, CompletionParams, complete}; pub static CURSOR_POS: char = '€'; @@ -141,3 +141,48 @@ mod tests { } } } + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum CompletionAssertion { + Label(String), + LabelAndKind(String, CompletionItemKind), +} + +impl CompletionAssertion { + fn assert_eq(self, item: CompletionItem) { + match self { + CompletionAssertion::Label(label) => { + assert_eq!(item.label, label); + } + CompletionAssertion::LabelAndKind(label, kind) => { + assert_eq!(item.label, label); + assert_eq!(item.kind, kind); + } + } + } +} + +pub(crate) async fn assert_complete_results( + query: &str, + assertions: Vec, + setup: &str, +) { + let (tree, cache) = get_test_deps(setup, query.into()).await; + let params = get_test_params(&tree, &cache, query.into()); + let items = complete(params); + + assertions + .into_iter() + .zip(items.into_iter()) + .for_each(|(assertion, result)| { + assertion.assert_eq(result); + }); +} + +pub(crate) async fn assert_no_complete_results(query: &str, setup: &str) { + let (tree, cache) = get_test_deps(setup, query.into()).await; + let params = get_test_params(&tree, &cache, query.into()); + let items = complete(params); + + assert_eq!(items.len(), 0) +} diff --git a/crates/pgt_workspace/src/features/code_actions.rs b/crates/pgt_workspace/src/features/code_actions.rs index 5e3cd883..22223dd3 100644 --- a/crates/pgt_workspace/src/features/code_actions.rs +++ b/crates/pgt_workspace/src/features/code_actions.rs @@ -46,7 +46,6 @@ pub struct CommandAction { #[derive(Debug, serde::Serialize, serde::Deserialize, strum::EnumIter)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] - pub enum CommandActionCategory { ExecuteStatement(StatementId), } From 63e5bed4483c271676a0e66359b067fff87a6042 Mon Sep 17 00:00:00 2001 From: Jen-Chieh Shen Date: Tue, 29 Apr 2025 13:21:16 +0800 Subject: [PATCH 040/114] docs: Mention Emacs client (#378) --- docs/index.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/index.md b/docs/index.md index 9bb80102..14490385 100644 --- a/docs/index.md +++ b/docs/index.md @@ -64,6 +64,10 @@ The language server is available on the [VSCode Marketplace](https://marketplace You will have to install `nvim-lspconfig`, and follow the [instructions](https://github.com/neovim/nvim-lspconfig/blob/master/doc/configs.md#postgres_lsp). +### Emacs + +The language client is available through [lsp-mode](https://github.com/emacs-lsp/lsp-mode). For more details, refer to their [manual page](https://emacs-lsp.github.io/lsp-mode/page/lsp-postgres/). + ### Zed The language server is available as an Extension. It's published from [this repo](https://github.com/LoamStudios/zed-postgres-language-server). From 331f8d1662f3b883d75467e5bedb76312efc65b6 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Wed, 30 Apr 2025 11:50:53 +0200 Subject: [PATCH 041/114] chore(schema_cache): query policies (#379) --- ...b0a5d9c628c6a2704f5523fb9ee45414350c7.json | 62 +++++ crates/pgt_schema_cache/src/lib.rs | 1 + crates/pgt_schema_cache/src/policies.rs | 225 ++++++++++++++++++ .../pgt_schema_cache/src/queries/policies.sql | 11 + crates/pgt_schema_cache/src/schema_cache.rs | 8 +- 5 files changed, 305 insertions(+), 2 deletions(-) create mode 100644 .sqlx/query-47bbad9dc2cec0231ef726790a9b0a5d9c628c6a2704f5523fb9ee45414350c7.json create mode 100644 crates/pgt_schema_cache/src/policies.rs create mode 100644 crates/pgt_schema_cache/src/queries/policies.sql diff --git a/.sqlx/query-47bbad9dc2cec0231ef726790a9b0a5d9c628c6a2704f5523fb9ee45414350c7.json b/.sqlx/query-47bbad9dc2cec0231ef726790a9b0a5d9c628c6a2704f5523fb9ee45414350c7.json new file mode 100644 index 00000000..01d857f8 --- /dev/null +++ b/.sqlx/query-47bbad9dc2cec0231ef726790a9b0a5d9c628c6a2704f5523fb9ee45414350c7.json @@ -0,0 +1,62 @@ +{ + "db_name": "PostgreSQL", + "query": "select \n schemaname as \"schema_name!\", \n tablename as \"table_name!\", \n policyname as \"name!\", \n permissive as \"is_permissive!\", \n roles as \"role_names!\", \n cmd as \"command!\", \n qual as \"security_qualification\", \n with_check\nfrom \n pg_catalog.pg_policies;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "schema_name!", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "table_name!", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "name!", + "type_info": "Name" + }, + { + "ordinal": 3, + "name": "is_permissive!", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "role_names!", + "type_info": "NameArray" + }, + { + "ordinal": 5, + "name": "command!", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "security_qualification", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "with_check", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "47bbad9dc2cec0231ef726790a9b0a5d9c628c6a2704f5523fb9ee45414350c7" +} diff --git a/crates/pgt_schema_cache/src/lib.rs b/crates/pgt_schema_cache/src/lib.rs index 28c5b641..fc717fbe 100644 --- a/crates/pgt_schema_cache/src/lib.rs +++ b/crates/pgt_schema_cache/src/lib.rs @@ -4,6 +4,7 @@ mod columns; mod functions; +mod policies; mod schema_cache; mod schemas; mod tables; diff --git a/crates/pgt_schema_cache/src/policies.rs b/crates/pgt_schema_cache/src/policies.rs new file mode 100644 index 00000000..46a3ab18 --- /dev/null +++ b/crates/pgt_schema_cache/src/policies.rs @@ -0,0 +1,225 @@ +use crate::schema_cache::SchemaCacheItem; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PolicyCommand { + Select, + Insert, + Update, + Delete, + All, +} + +impl From<&str> for PolicyCommand { + fn from(value: &str) -> Self { + match value { + "SELECT" => PolicyCommand::Select, + "INSERT" => PolicyCommand::Insert, + "UPDATE" => PolicyCommand::Update, + "DELETE" => PolicyCommand::Delete, + "ALL" => PolicyCommand::All, + _ => panic!("Invalid Policy Command {}", value), + } + } +} +impl From for PolicyCommand { + fn from(value: String) -> Self { + PolicyCommand::from(value.as_str()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct PolicyQueried { + name: String, + table_name: String, + schema_name: String, + is_permissive: String, + command: String, + role_names: Option>, + security_qualification: Option, + with_check: Option, +} + +impl From for Policy { + fn from(value: PolicyQueried) -> Self { + Self { + name: value.name, + table_name: value.table_name, + schema_name: value.schema_name, + is_permissive: value.is_permissive == "PERMISSIVE", + command: PolicyCommand::from(value.command), + role_names: value.role_names.unwrap_or_default(), + security_qualification: value.security_qualification, + with_check: value.with_check, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Policy { + name: String, + table_name: String, + schema_name: String, + is_permissive: bool, + command: PolicyCommand, + role_names: Vec, + security_qualification: Option, + with_check: Option, +} + +impl SchemaCacheItem for Policy { + type Item = Policy; + + async fn load(pool: &sqlx::PgPool) -> Result, sqlx::Error> { + let policies = sqlx::query_file_as!(PolicyQueried, "src/queries/policies.sql") + .fetch_all(pool) + .await?; + + Ok(policies.into_iter().map(Policy::from).collect()) + } +} + +#[cfg(test)] +mod tests { + use pgt_test_utils::test_database::get_new_test_db; + use sqlx::Executor; + + use crate::{SchemaCache, policies::PolicyCommand}; + + #[tokio::test] + async fn loads_policies() { + let test_db = get_new_test_db().await; + + let setup = r#" + do $$ + begin + if not exists ( + select from pg_catalog.pg_roles + where rolname = 'admin' + ) then + create role admin; + end if; + end $$; + + + create table public.users ( + id serial primary key, + name varchar(255) not null + ); + + -- multiple policies to test various commands + create policy public_policy + on public.users + for select + to public + using (true); + + create policy public_policy_del + on public.users + for delete + to public + using (true); + + create policy public_policy_ins + on public.users + for insert + to public + with check (true); + + create policy admin_policy + on public.users + for all + to admin + with check (true); + + do $$ + begin + if not exists ( + select from pg_catalog.pg_roles + where rolname = 'owner' + ) then + create role owner; + end if; + end $$; + + create schema real_estate; + + create table real_estate.properties ( + id serial primary key, + owner_id int not null + ); + + create policy owner_policy + on real_estate.properties + for update + to owner + using (owner_id = current_user::int); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + let cache = SchemaCache::load(&test_db) + .await + .expect("Failed to load Schema Cache"); + + let public_policies = cache + .policies + .iter() + .filter(|p| p.schema_name == "public") + .count(); + + assert_eq!(public_policies, 4); + + let real_estate_policies = cache + .policies + .iter() + .filter(|p| p.schema_name == "real_estate") + .count(); + + assert_eq!(real_estate_policies, 1); + + let public_policy = cache + .policies + .iter() + .find(|p| p.name == "public_policy") + .unwrap(); + assert_eq!(public_policy.table_name, "users"); + assert_eq!(public_policy.schema_name, "public"); + assert!(public_policy.is_permissive); + assert_eq!(public_policy.command, PolicyCommand::Select); + assert_eq!(public_policy.role_names, vec!["public"]); + assert_eq!(public_policy.security_qualification, Some("true".into())); + assert_eq!(public_policy.with_check, None); + + let admin_policy = cache + .policies + .iter() + .find(|p| p.name == "admin_policy") + .unwrap(); + assert_eq!(admin_policy.table_name, "users"); + assert_eq!(admin_policy.schema_name, "public"); + assert!(admin_policy.is_permissive); + assert_eq!(admin_policy.command, PolicyCommand::All); + assert_eq!(admin_policy.role_names, vec!["admin"]); + assert_eq!(admin_policy.security_qualification, None); + assert_eq!(admin_policy.with_check, Some("true".into())); + + let owner_policy = cache + .policies + .iter() + .find(|p| p.name == "owner_policy") + .unwrap(); + assert_eq!(owner_policy.table_name, "properties"); + assert_eq!(owner_policy.schema_name, "real_estate"); + assert!(owner_policy.is_permissive); + assert_eq!(owner_policy.command, PolicyCommand::Update); + assert_eq!(owner_policy.role_names, vec!["owner"]); + assert_eq!( + owner_policy.security_qualification, + Some("(owner_id = (CURRENT_USER)::integer)".into()) + ); + assert_eq!(owner_policy.with_check, None); + } +} diff --git a/crates/pgt_schema_cache/src/queries/policies.sql b/crates/pgt_schema_cache/src/queries/policies.sql new file mode 100644 index 00000000..2c0af39f --- /dev/null +++ b/crates/pgt_schema_cache/src/queries/policies.sql @@ -0,0 +1,11 @@ +select + schemaname as "schema_name!", + tablename as "table_name!", + policyname as "name!", + permissive as "is_permissive!", + roles as "role_names!", + cmd as "command!", + qual as "security_qualification", + with_check +from + pg_catalog.pg_policies; \ No newline at end of file diff --git a/crates/pgt_schema_cache/src/schema_cache.rs b/crates/pgt_schema_cache/src/schema_cache.rs index 913d8fff..8a5c1a93 100644 --- a/crates/pgt_schema_cache/src/schema_cache.rs +++ b/crates/pgt_schema_cache/src/schema_cache.rs @@ -2,6 +2,7 @@ use sqlx::postgres::PgPool; use crate::columns::Column; use crate::functions::Function; +use crate::policies::Policy; use crate::schemas::Schema; use crate::tables::Table; use crate::types::PostgresType; @@ -15,17 +16,19 @@ pub struct SchemaCache { pub types: Vec, pub versions: Vec, pub columns: Vec, + pub policies: Vec, } impl SchemaCache { pub async fn load(pool: &PgPool) -> Result { - let (schemas, tables, functions, types, versions, columns) = futures_util::try_join!( + let (schemas, tables, functions, types, versions, columns, policies) = futures_util::try_join!( Schema::load(pool), Table::load(pool), Function::load(pool), PostgresType::load(pool), Version::load(pool), - Column::load(pool) + Column::load(pool), + Policy::load(pool), )?; Ok(SchemaCache { @@ -35,6 +38,7 @@ impl SchemaCache { types, versions, columns, + policies, }) } From b8a09868f152387f0e8d82dc8d6746e1471952da Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Wed, 30 Apr 2025 21:35:25 +0200 Subject: [PATCH 042/114] fix?(splitter): allow newlines after commas (#380) --- crates/pgt_completions/src/builder.rs | 4 ++-- crates/pgt_statement_splitter/src/lib.rs | 17 +++++++++++++++++ .../pgt_statement_splitter/src/parser/common.rs | 12 +++++++++++- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/crates/pgt_completions/src/builder.rs b/crates/pgt_completions/src/builder.rs index 127a3405..40d29db4 100644 --- a/crates/pgt_completions/src/builder.rs +++ b/crates/pgt_completions/src/builder.rs @@ -86,7 +86,7 @@ fn should_preselect_first_item(items: &Vec) -> bool { let second = items_iter.next(); first.is_some_and(|f| match second { - Some(s) => (f.score.get_score() - s.score.get_score()) > 10, + Some(s) => (f.score.get_score() - s.score.get_score()) > 15, None => true, - }) + }) && items.len() >= 10 } diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index 5f6ca92f..e44f20f1 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -333,4 +333,21 @@ values ('insert', new.id, now());", "select 3", ]); } + + #[test] + fn commas_and_newlines() { + Tester::from( + " + select + email, + + + from + auth.users; + ", + ) + .expect_statements(vec![ + "select\n email,\n\n\n from\n auth.users;", + ]); + } } diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index 56d8d86a..d5398016 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -138,11 +138,21 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { break; } Token { - kind: SyntaxKind::Newline | SyntaxKind::Eof, + kind: SyntaxKind::Eof, .. } => { break; } + Token { + kind: SyntaxKind::Newline, + .. + } => { + if p.look_back().is_some_and(|t| t.kind == SyntaxKind::Ascii44) { + p.advance(); + } else { + break; + } + } Token { kind: SyntaxKind::Case, .. From 51c301a311b1551cbc6edc490b726887b894b2e1 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Wed, 30 Apr 2025 23:29:23 +0200 Subject: [PATCH 043/114] chore(completions): add tree sitter query for table aliases --- crates/pgt_completions/src/context.rs | 11 ++ crates/pgt_treesitter_queries/src/lib.rs | 72 +++++++++++- .../pgt_treesitter_queries/src/queries/mod.rs | 10 +- .../src/queries/table_aliases.rs | 106 ++++++++++++++++++ 4 files changed, 197 insertions(+), 2 deletions(-) create mode 100644 crates/pgt_treesitter_queries/src/queries/table_aliases.rs diff --git a/crates/pgt_completions/src/context.rs b/crates/pgt_completions/src/context.rs index b16fd21c..db21e498 100644 --- a/crates/pgt_completions/src/context.rs +++ b/crates/pgt_completions/src/context.rs @@ -115,6 +115,8 @@ pub(crate) struct CompletionContext<'a> { pub wrapping_statement_range: Option, pub mentioned_relations: HashMap, HashSet>, + + pub mentioned_table_aliases: HashMap, } impl<'a> CompletionContext<'a> { @@ -131,6 +133,7 @@ impl<'a> CompletionContext<'a> { wrapping_statement_range: None, is_invocation: false, mentioned_relations: HashMap::new(), + mentioned_table_aliases: HashMap::new(), }; ctx.gather_tree_context(); @@ -146,6 +149,7 @@ impl<'a> CompletionContext<'a> { let mut executor = TreeSitterQueriesExecutor::new(self.tree.root_node(), sql); executor.add_query_results::(); + executor.add_query_results::(); for relation_match in executor.get_iter(stmt_range) { match relation_match { @@ -166,6 +170,13 @@ impl<'a> CompletionContext<'a> { } }; } + + QueryResult::TableAliases(table_alias_match) => { + self.mentioned_table_aliases.insert( + table_alias_match.get_alias(sql), + table_alias_match.get_table(sql), + ); + } }; } } diff --git a/crates/pgt_treesitter_queries/src/lib.rs b/crates/pgt_treesitter_queries/src/lib.rs index 7d2ba61b..8d1719b0 100644 --- a/crates/pgt_treesitter_queries/src/lib.rs +++ b/crates/pgt_treesitter_queries/src/lib.rs @@ -68,7 +68,77 @@ impl<'a> Iterator for QueryResultIter<'a> { #[cfg(test)] mod tests { - use crate::{TreeSitterQueriesExecutor, queries::RelationMatch}; + use crate::{ + TreeSitterQueriesExecutor, + queries::{RelationMatch, TableAliasMatch}, + }; + + #[test] + fn finds_all_table_aliases() { + let sql = r#" +select + * +from + ( + select + something + from + public.cool_table pu + join private.cool_tableau pr on pu.id = pr.id + where + x = '123' + union + select + something_else + from + another_table puat + inner join private.another_tableau prat on puat.id = prat.id + union + select + x, + y + from + public.get_something_cool () + ) as cool + join users u on u.id = cool.something +where + col = 17; +"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&TableAliasMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results[0].get_schema(sql), Some("public".into())); + assert_eq!(results[0].get_table(sql), "cool_table"); + assert_eq!(results[0].get_alias(sql), "pu"); + + assert_eq!(results[1].get_schema(sql), Some("private".into())); + assert_eq!(results[1].get_table(sql), "cool_tableau"); + assert_eq!(results[1].get_alias(sql), "pr"); + + assert_eq!(results[2].get_schema(sql), None); + assert_eq!(results[2].get_table(sql), "another_table"); + assert_eq!(results[2].get_alias(sql), "puat"); + + assert_eq!(results[3].get_schema(sql), Some("private".into())); + assert_eq!(results[3].get_table(sql), "another_tableau"); + assert_eq!(results[3].get_alias(sql), "prat"); + + assert_eq!(results[4].get_schema(sql), None); + assert_eq!(results[4].get_table(sql), "users"); + assert_eq!(results[4].get_alias(sql), "u"); + } #[test] fn finds_all_relations_and_ignores_functions() { diff --git a/crates/pgt_treesitter_queries/src/queries/mod.rs b/crates/pgt_treesitter_queries/src/queries/mod.rs index 98b55e03..4e10ed60 100644 --- a/crates/pgt_treesitter_queries/src/queries/mod.rs +++ b/crates/pgt_treesitter_queries/src/queries/mod.rs @@ -1,16 +1,19 @@ mod relations; +mod table_aliases; pub use relations::*; +pub use table_aliases::*; #[derive(Debug)] pub enum QueryResult<'a> { Relation(RelationMatch<'a>), + TableAliases(TableAliasMatch<'a>), } impl QueryResult<'_> { pub fn within_range(&self, range: &tree_sitter::Range) -> bool { match self { - Self::Relation(rm) => { + QueryResult::Relation(rm) => { let start = match rm.schema { Some(s) => s.start_position(), None => rm.table.start_position(), @@ -20,6 +23,11 @@ impl QueryResult<'_> { start >= range.start_point && end <= range.end_point } + QueryResult::TableAliases(m) => { + let start = m.table.start_position(); + let end = m.alias.end_position(); + start >= range.start_point && end <= range.end_point + } } } } diff --git a/crates/pgt_treesitter_queries/src/queries/table_aliases.rs b/crates/pgt_treesitter_queries/src/queries/table_aliases.rs new file mode 100644 index 00000000..4297a218 --- /dev/null +++ b/crates/pgt_treesitter_queries/src/queries/table_aliases.rs @@ -0,0 +1,106 @@ +use std::sync::LazyLock; + +use crate::{Query, QueryResult}; + +use super::QueryTryFrom; + +static TS_QUERY: LazyLock = LazyLock::new(|| { + static QUERY_STR: &str = r#" + (relation + (object_reference + . + (identifier) @schema_or_table + "."? + (identifier)? @table + ) + (keyword_as)? + (identifier) @alias + ) +"#; + tree_sitter::Query::new(tree_sitter_sql::language(), QUERY_STR).expect("Invalid TS Query") +}); + +#[derive(Debug)] +pub struct TableAliasMatch<'a> { + pub(crate) table: tree_sitter::Node<'a>, + pub(crate) alias: tree_sitter::Node<'a>, + pub(crate) schema: Option>, +} + +impl TableAliasMatch<'_> { + pub fn get_alias(&self, sql: &str) -> String { + self.alias + .utf8_text(sql.as_bytes()) + .expect("Failed to get alias from TableAliasMatch") + .to_string() + } + + pub fn get_table(&self, sql: &str) -> String { + self.table + .utf8_text(sql.as_bytes()) + .expect("Failed to get table from TableAliasMatch") + .to_string() + } + + pub fn get_schema(&self, sql: &str) -> Option { + self.schema.as_ref().map(|n| { + n.utf8_text(sql.as_bytes()) + .expect("Failed to get table from TableAliasMatch") + .to_string() + }) + } +} + +impl<'a> TryFrom<&'a QueryResult<'a>> for &'a TableAliasMatch<'a> { + type Error = String; + + fn try_from(q: &'a QueryResult<'a>) -> Result { + match q { + QueryResult::TableAliases(t) => Ok(t), + + #[allow(unreachable_patterns)] + _ => Err("Invalid QueryResult type".into()), + } + } +} + +impl<'a> QueryTryFrom<'a> for TableAliasMatch<'a> { + type Ref = &'a TableAliasMatch<'a>; +} + +impl<'a> Query<'a> for TableAliasMatch<'a> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + let mut cursor = tree_sitter::QueryCursor::new(); + + let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); + + let mut to_return = vec![]; + + for m in matches { + if m.captures.len() == 3 { + let schema = m.captures[0].node; + let table = m.captures[1].node; + let alias = m.captures[2].node; + + to_return.push(QueryResult::TableAliases(TableAliasMatch { + table, + alias, + schema: Some(schema), + })); + } + + if m.captures.len() == 2 { + let table = m.captures[0].node; + let alias = m.captures[1].node; + + to_return.push(QueryResult::TableAliases(TableAliasMatch { + table, + alias, + schema: None, + })); + } + } + + to_return + } +} From 9cd19b0718ff90ea64d1d7582939bc55ec5fadad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Mon, 5 May 2025 08:29:38 +0200 Subject: [PATCH 044/114] fix: properly parse trigger with instead of (#383) --- crates/pgt_statement_splitter/src/lib.rs | 16 ++++++++++++++++ .../pgt_statement_splitter/src/parser/common.rs | 2 ++ 2 files changed, 18 insertions(+) diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index e44f20f1..e43a1095 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -180,6 +180,22 @@ mod tests { Tester::from("/* this is a test */\nselect 1").expect_statements(vec!["select 1"]); } + #[test] + fn trigger_instead_of() { + Tester::from( + "CREATE OR REPLACE TRIGGER my_trigger + INSTEAD OF INSERT ON my_table + FOR EACH ROW + EXECUTE FUNCTION my_table_trigger_fn();", + ) + .expect_statements(vec![ + "CREATE OR REPLACE TRIGGER my_trigger + INSTEAD OF INSERT ON my_table + FOR EACH ROW + EXECUTE FUNCTION my_table_trigger_fn();", + ]); + } + #[test] fn with_check() { Tester::from("create policy employee_insert on journey_execution for insert to authenticated with check ((select private.organisation_id()) = organisation_id);") diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index d5398016..a5d68df1 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -236,6 +236,8 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { SyntaxKind::For, // e.g. on insert or delete SyntaxKind::Or, + // e.g. INSTEAD OF INSERT + SyntaxKind::Of, // for create rule SyntaxKind::On, // for create rule From 3b7971d733e785c96f6a2ea1b75266d2d737b14a Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Mon, 5 May 2025 11:22:04 +0200 Subject: [PATCH 045/114] feat(completions): respect table aliases, complete in JOINs (#388) * add comment, rename * add filtering * feat(completions): recognize table aliases * non exhaustive * ok --- crates/pgt_completions/src/context.rs | 84 +++++++++++-- .../pgt_completions/src/providers/columns.rs | 110 +++++++++++++++++- .../pgt_completions/src/providers/helper.rs | 2 +- .../pgt_completions/src/providers/schemas.rs | 3 +- .../pgt_completions/src/providers/tables.rs | 33 ++++++ .../src/relevance/filtering.rs | 42 ++++--- .../pgt_completions/src/relevance/scoring.rs | 23 +++- crates/pgt_completions/src/test_helper.rs | 59 +++++++++- 8 files changed, 318 insertions(+), 38 deletions(-) diff --git a/crates/pgt_completions/src/context.rs b/crates/pgt_completions/src/context.rs index db21e498..6ace55b6 100644 --- a/crates/pgt_completions/src/context.rs +++ b/crates/pgt_completions/src/context.rs @@ -13,6 +13,7 @@ pub enum ClauseType { Select, Where, From, + Join, Update, Delete, } @@ -33,6 +34,7 @@ impl TryFrom<&str> for ClauseType { "from" => Ok(Self::From), "update" => Ok(Self::Update), "delete" => Ok(Self::Delete), + "join" => Ok(Self::Join), _ => { let message = format!("Unimplemented ClauseType: {}", value); @@ -106,7 +108,25 @@ pub(crate) struct CompletionContext<'a> { pub schema_cache: &'a SchemaCache, pub position: usize, - pub schema_name: Option, + /// If the cursor is on a node that uses dot notation + /// to specify an alias or schema, this will hold the schema's or + /// alias's name. + /// + /// Here, `auth` is a schema name: + /// ```sql + /// select * from auth.users; + /// ``` + /// + /// Here, `u` is an alias name: + /// ```sql + /// select + /// * + /// from + /// auth.users u + /// left join identities i + /// on u.id = i.user_id; + /// ``` + pub schema_or_alias_name: Option, pub wrapping_clause_type: Option, pub wrapping_node_kind: Option, @@ -114,6 +134,9 @@ pub(crate) struct CompletionContext<'a> { pub is_invocation: bool, pub wrapping_statement_range: Option, + /// Some incomplete statements can't be correctly parsed by TreeSitter. + pub is_in_error_node: bool, + pub mentioned_relations: HashMap, HashSet>, pub mentioned_table_aliases: HashMap, @@ -127,13 +150,14 @@ impl<'a> CompletionContext<'a> { schema_cache: params.schema, position: usize::from(params.position), node_under_cursor: None, - schema_name: None, + schema_or_alias_name: None, wrapping_clause_type: None, wrapping_node_kind: None, wrapping_statement_range: None, is_invocation: false, mentioned_relations: HashMap::new(), mentioned_table_aliases: HashMap::new(), + is_in_error_node: false, }; ctx.gather_tree_context(); @@ -246,19 +270,58 @@ impl<'a> CompletionContext<'a> { self.wrapping_statement_range = Some(parent_node.range()); } "invocation" => self.is_invocation = true, - _ => {} } + // try to gather context from the siblings if we're within an error node. + if self.is_in_error_node { + let mut next_sibling = current_node.next_named_sibling(); + while let Some(n) = next_sibling { + if n.kind().starts_with("keyword_") { + if let Some(txt) = self.get_ts_node_content(n).and_then(|txt| match txt { + NodeText::Original(txt) => Some(txt), + NodeText::Replaced => None, + }) { + match txt { + "where" | "update" | "select" | "delete" | "from" | "join" => { + self.wrapping_clause_type = txt.try_into().ok(); + break; + } + _ => {} + } + }; + } + next_sibling = n.next_named_sibling(); + } + let mut prev_sibling = current_node.prev_named_sibling(); + while let Some(n) = prev_sibling { + if n.kind().starts_with("keyword_") { + if let Some(txt) = self.get_ts_node_content(n).and_then(|txt| match txt { + NodeText::Original(txt) => Some(txt), + NodeText::Replaced => None, + }) { + match txt { + "where" | "update" | "select" | "delete" | "from" | "join" => { + self.wrapping_clause_type = txt.try_into().ok(); + break; + } + _ => {} + } + }; + } + prev_sibling = n.prev_named_sibling(); + } + } + match current_node_kind { - "object_reference" => { + "object_reference" | "field" => { let content = self.get_ts_node_content(current_node); if let Some(node_txt) = content { match node_txt { NodeText::Original(txt) => { let parts: Vec<&str> = txt.split('.').collect(); if parts.len() == 2 { - self.schema_name = Some(parts[0].to_string()); + self.schema_or_alias_name = Some(parts[0].to_string()); } } NodeText::Replaced => {} @@ -266,7 +329,7 @@ impl<'a> CompletionContext<'a> { } } - "where" | "update" | "select" | "delete" | "from" => { + "where" | "update" | "select" | "delete" | "from" | "join" => { self.wrapping_clause_type = current_node_kind.try_into().ok(); } @@ -274,6 +337,10 @@ impl<'a> CompletionContext<'a> { self.wrapping_node_kind = current_node_kind.try_into().ok(); } + "ERROR" => { + self.is_in_error_node = true; + } + _ => {} } @@ -380,7 +447,10 @@ mod tests { let ctx = CompletionContext::new(¶ms); - assert_eq!(ctx.schema_name, expected_schema.map(|f| f.to_string())); + assert_eq!( + ctx.schema_or_alias_name, + expected_schema.map(|f| f.to_string()) + ); } } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 6ac3c989..770a2b61 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -28,7 +28,10 @@ pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut Completio mod tests { use crate::{ CompletionItem, CompletionItemKind, complete, - test_helper::{CURSOR_POS, InputQuery, get_test_deps, get_test_params}, + test_helper::{ + CURSOR_POS, CompletionAssertion, InputQuery, assert_complete_results, get_test_deps, + get_test_params, + }, }; struct TestCase { @@ -168,9 +171,9 @@ mod tests { ("name", "Table: public.users"), ("narrator", "Table: public.audio_books"), ("narrator_id", "Table: private.audio_books"), + ("id", "Table: public.audio_books"), ("name", "Schema: pg_catalog"), ("nameconcatoid", "Schema: pg_catalog"), - ("nameeq", "Schema: pg_catalog"), ] .into_iter() .map(|(label, schema)| LabelAndDesc { @@ -325,4 +328,107 @@ mod tests { ); } } + + #[tokio::test] + async fn filters_out_by_aliases() { + let setup = r#" + create schema auth; + + create table auth.users ( + uid serial primary key, + name text not null, + email text unique not null + ); + + create table auth.posts ( + pid serial primary key, + user_id int not null references auth.users(uid), + title text not null, + content text, + created_at timestamp default now() + ); + "#; + + // test in SELECT clause + assert_complete_results( + format!( + "select u.id, p.{} from auth.users u join auth.posts p on u.id = p.user_id;", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::LabelNotExists("uid".to_string()), + CompletionAssertion::LabelNotExists("name".to_string()), + CompletionAssertion::LabelNotExists("email".to_string()), + CompletionAssertion::Label("content".to_string()), + CompletionAssertion::Label("created_at".to_string()), + CompletionAssertion::Label("pid".to_string()), + CompletionAssertion::Label("title".to_string()), + CompletionAssertion::Label("user_id".to_string()), + ], + setup, + ) + .await; + + // test in JOIN clause + assert_complete_results( + format!( + "select u.id, p.content from auth.users u join auth.posts p on u.id = p.{};", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::LabelNotExists("uid".to_string()), + CompletionAssertion::LabelNotExists("name".to_string()), + CompletionAssertion::LabelNotExists("email".to_string()), + // primary keys are preferred + CompletionAssertion::Label("pid".to_string()), + CompletionAssertion::Label("content".to_string()), + CompletionAssertion::Label("created_at".to_string()), + CompletionAssertion::Label("title".to_string()), + CompletionAssertion::Label("user_id".to_string()), + ], + setup, + ) + .await; + } + + #[tokio::test] + async fn does_not_complete_cols_in_join_clauses() { + let setup = r#" + create schema auth; + + create table auth.users ( + uid serial primary key, + name text not null, + email text unique not null + ); + + create table auth.posts ( + pid serial primary key, + user_id int not null references auth.users(uid), + title text not null, + content text, + created_at timestamp default now() + ); + "#; + + /* + * We are not in the "ON" part of the JOIN clause, so we should not complete columns. + */ + assert_complete_results( + format!( + "select u.id, p.content from auth.users u join auth.{}", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::KindNotExists(CompletionItemKind::Column), + CompletionAssertion::LabelAndKind("posts".to_string(), CompletionItemKind::Table), + CompletionAssertion::LabelAndKind("users".to_string(), CompletionItemKind::Table), + ], + setup, + ) + .await; + } } diff --git a/crates/pgt_completions/src/providers/helper.rs b/crates/pgt_completions/src/providers/helper.rs index 2e4ef8a9..c0fe5869 100644 --- a/crates/pgt_completions/src/providers/helper.rs +++ b/crates/pgt_completions/src/providers/helper.rs @@ -7,7 +7,7 @@ pub(crate) fn get_completion_text_with_schema( item_name: &str, item_schema_name: &str, ) -> Option { - if item_schema_name == "public" || ctx.schema_name.is_some() { + if item_schema_name == "public" || ctx.schema_or_alias_name.is_some() { None } else { let node = ctx.node_under_cursor.unwrap(); diff --git a/crates/pgt_completions/src/providers/schemas.rs b/crates/pgt_completions/src/providers/schemas.rs index c28f831e..aaa5ebe6 100644 --- a/crates/pgt_completions/src/providers/schemas.rs +++ b/crates/pgt_completions/src/providers/schemas.rs @@ -59,6 +59,8 @@ mod tests { "private".to_string(), CompletionItemKind::Schema, ), + // users table still preferred over system schemas + CompletionAssertion::LabelAndKind("users".to_string(), CompletionItemKind::Table), CompletionAssertion::LabelAndKind( "information_schema".to_string(), CompletionItemKind::Schema, @@ -71,7 +73,6 @@ mod tests { "pg_toast".to_string(), CompletionItemKind::Schema, ), - CompletionAssertion::LabelAndKind("users".to_string(), CompletionItemKind::Table), ], setup, ) diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index f9f922d1..cbedc55b 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -273,4 +273,37 @@ mod tests { ) .await; } + + #[tokio::test] + async fn suggests_tables_in_join() { + let setup = r#" + create schema auth; + + create table auth.users ( + uid serial primary key, + name text not null, + email text unique not null + ); + + create table auth.posts ( + pid serial primary key, + user_id int not null references auth.users(uid), + title text not null, + content text, + created_at timestamp default now() + ); + "#; + + assert_complete_results( + format!("select * from auth.users u join {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), // self-join + CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), + ], + setup, + ) + .await; + } } diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index 69939e0b..2658216b 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -1,4 +1,4 @@ -use crate::context::{ClauseType, CompletionContext}; +use crate::context::{ClauseType, CompletionContext, WrappingNode}; use super::CompletionRelevanceData; @@ -18,7 +18,7 @@ impl CompletionFilter<'_> { self.completable_context(ctx)?; self.check_clause(ctx)?; self.check_invocation(ctx)?; - self.check_mentioned_schema(ctx)?; + self.check_mentioned_schema_or_alias(ctx)?; Some(()) } @@ -50,6 +50,7 @@ impl CompletionFilter<'_> { fn check_clause(&self, ctx: &CompletionContext) -> Option<()> { let clause = ctx.wrapping_clause_type.as_ref(); + let wrapping_node = ctx.wrapping_node_kind.as_ref(); match self.data { CompletionRelevanceData::Table(_) => { @@ -62,10 +63,20 @@ impl CompletionFilter<'_> { } CompletionRelevanceData::Column(_) => { let in_from_clause = clause.is_some_and(|c| c == &ClauseType::From); - if in_from_clause { return None; } + + // We can complete columns in JOIN cluases, but only if we are in the + // "ON u.id = posts.user_id" part. + let in_join_clause = clause.is_some_and(|c| c == &ClauseType::Join); + + let in_comparison_clause = + wrapping_node.is_some_and(|n| n == &WrappingNode::BinaryExpression); + + if in_join_clause && !in_comparison_clause { + return None; + } } _ => {} } @@ -86,27 +97,28 @@ impl CompletionFilter<'_> { Some(()) } - fn check_mentioned_schema(&self, ctx: &CompletionContext) -> Option<()> { - if ctx.schema_name.is_none() { + fn check_mentioned_schema_or_alias(&self, ctx: &CompletionContext) -> Option<()> { + if ctx.schema_or_alias_name.is_none() { return Some(()); } - let name = ctx.schema_name.as_ref().unwrap(); + let schema_or_alias = ctx.schema_or_alias_name.as_ref().unwrap(); + + let matches = match self.data { + CompletionRelevanceData::Table(table) => &table.schema == schema_or_alias, + CompletionRelevanceData::Function(f) => &f.schema == schema_or_alias, + CompletionRelevanceData::Column(col) => ctx + .mentioned_table_aliases + .get(schema_or_alias) + .is_some_and(|t| t == &col.table_name), - let does_not_match = match self.data { - CompletionRelevanceData::Table(table) => &table.schema != name, - CompletionRelevanceData::Function(f) => &f.schema != name, - CompletionRelevanceData::Column(_) => { - // columns belong to tables, not schemas - true - } CompletionRelevanceData::Schema(_) => { // we should never allow schema suggestions if there already was one. - true + false } }; - if does_not_match { + if !matches { return None; } diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs index 2ef8edb6..e67df658 100644 --- a/crates/pgt_completions/src/relevance/scoring.rs +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -62,13 +62,19 @@ impl CompletionScore<'_> { }; let has_mentioned_tables = !ctx.mentioned_relations.is_empty(); - let has_mentioned_schema = ctx.schema_name.is_some(); + let has_mentioned_schema = ctx.schema_or_alias_name.is_some(); + + let is_binary_exp = ctx + .wrapping_node_kind + .as_ref() + .is_some_and(|wn| wn == &WrappingNode::BinaryExpression); self.score += match self.data { CompletionRelevanceData::Table(_) => match clause_type { - ClauseType::From => 5, ClauseType::Update => 10, ClauseType::Delete => 10, + ClauseType::From => 5, + ClauseType::Join if !is_binary_exp => 5, _ => -50, }, CompletionRelevanceData::Function(_) => match clause_type { @@ -77,14 +83,19 @@ impl CompletionScore<'_> { ClauseType::From => 0, _ => -50, }, - CompletionRelevanceData::Column(_) => match clause_type { + CompletionRelevanceData::Column(col) => match clause_type { ClauseType::Select if has_mentioned_tables => 10, ClauseType::Select if !has_mentioned_tables => 0, ClauseType::Where => 10, + ClauseType::Join if is_binary_exp => { + // Users will probably join on primary keys + if col.is_primary_key { 20 } else { 10 } + } _ => -15, }, CompletionRelevanceData::Schema(_) => match clause_type { ClauseType::From if !has_mentioned_schema => 15, + ClauseType::Join if !has_mentioned_schema => 15, ClauseType::Update if !has_mentioned_schema => 15, ClauseType::Delete if !has_mentioned_schema => 15, _ => -50, @@ -98,7 +109,7 @@ impl CompletionScore<'_> { Some(wn) => wn, }; - let has_mentioned_schema = ctx.schema_name.is_some(); + let has_mentioned_schema = ctx.schema_or_alias_name.is_some(); let has_node_text = ctx.get_node_under_cursor_content().is_some(); self.score += match self.data { @@ -135,7 +146,7 @@ impl CompletionScore<'_> { } fn check_matches_schema(&mut self, ctx: &CompletionContext) { - let schema_name = match ctx.schema_name.as_ref() { + let schema_name = match ctx.schema_or_alias_name.as_ref() { None => return, Some(n) => n, }; @@ -199,7 +210,7 @@ impl CompletionScore<'_> { let system_schemas = ["pg_catalog", "information_schema", "pg_toast"]; if system_schemas.contains(&schema.as_str()) { - self.score -= 10; + self.score -= 20; } // "public" is the default postgres schema where users diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index 5eb5f53f..a6b57c55 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -146,17 +146,45 @@ mod tests { pub(crate) enum CompletionAssertion { Label(String), LabelAndKind(String, CompletionItemKind), + LabelNotExists(String), + KindNotExists(CompletionItemKind), } impl CompletionAssertion { - fn assert_eq(self, item: CompletionItem) { + fn assert(&self, item: &CompletionItem) { match self { CompletionAssertion::Label(label) => { - assert_eq!(item.label, label); + assert_eq!( + &item.label, label, + "Expected label to be {}, but got {}", + label, &item.label + ); } CompletionAssertion::LabelAndKind(label, kind) => { - assert_eq!(item.label, label); - assert_eq!(item.kind, kind); + assert_eq!( + &item.label, label, + "Expected label to be {}, but got {}", + label, &item.label + ); + assert_eq!( + &item.kind, kind, + "Expected kind to be {:?}, but got {:?}", + kind, &item.kind + ); + } + CompletionAssertion::LabelNotExists(label) => { + assert_ne!( + &item.label, label, + "Expected label {} not to exist, but found it", + label + ); + } + CompletionAssertion::KindNotExists(kind) => { + assert_ne!( + &item.kind, kind, + "Expected kind {:?} not to exist, but found it", + kind + ); } } } @@ -171,11 +199,30 @@ pub(crate) async fn assert_complete_results( let params = get_test_params(&tree, &cache, query.into()); let items = complete(params); - assertions + let (not_existing, existing): (Vec, Vec) = + assertions.into_iter().partition(|a| match a { + CompletionAssertion::LabelNotExists(_) | CompletionAssertion::KindNotExists(_) => true, + CompletionAssertion::Label(_) | CompletionAssertion::LabelAndKind(_, _) => false, + }); + + assert!( + items.len() >= existing.len(), + "Not enough items returned. Expected at least {} items, but got {}", + existing.len(), + items.len() + ); + + for item in &items { + for assertion in ¬_existing { + assertion.assert(item); + } + } + + existing .into_iter() .zip(items.into_iter()) .for_each(|(assertion, result)| { - assertion.assert_eq(result); + assertion.assert(&result); }); } From 8abc44d900b78a4330154856b9f1097adb97435d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Mon, 5 May 2025 11:56:09 +0200 Subject: [PATCH 046/114] chore: add contributors to changelog (#387) --- cliff.toml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cliff.toml b/cliff.toml index 3e040a17..3406b80c 100644 --- a/cliff.toml +++ b/cliff.toml @@ -27,10 +27,17 @@ body = """ {{ commit.message | upper_first }}\ {% endfor %} {% endfor %}\n + +## Contributors +{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} + * @{{ contributor.username }} made their first contribution in #{{ contributor.pr_number }} 🎉 +{%- endfor -%} +{% for contributor in github.contributors | filter(attribute="is_first_time", value=false) %} + * @{{ contributor.username }} +{%- endfor -%}\n """ # template for the changelog footer footer = """ - """ # remove the leading and trailing s trim = true From 5945c1b9e2bd0d45b00e597c09a21a465f0e2474 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 6 May 2025 10:41:01 +0200 Subject: [PATCH 047/114] fix(completions): complete right columns right after JOIN ON (#390) --- crates/pgt_completions/src/context.rs | 178 ++++++++++-------- .../pgt_completions/src/providers/columns.rs | 53 ++++++ .../src/relevance/filtering.rs | 27 +-- .../pgt_completions/src/relevance/scoring.rs | 49 ++--- 4 files changed, 197 insertions(+), 110 deletions(-) diff --git a/crates/pgt_completions/src/context.rs b/crates/pgt_completions/src/context.rs index 6ace55b6..d96d0d53 100644 --- a/crates/pgt_completions/src/context.rs +++ b/crates/pgt_completions/src/context.rs @@ -9,11 +9,13 @@ use pgt_treesitter_queries::{ use crate::sanitization::SanitizedCompletionParams; #[derive(Debug, PartialEq, Eq)] -pub enum ClauseType { +pub enum WrappingClause<'a> { Select, Where, From, - Join, + Join { + on_node: Option>, + }, Update, Delete, } @@ -24,38 +26,6 @@ pub(crate) enum NodeText<'a> { Original(&'a str), } -impl TryFrom<&str> for ClauseType { - type Error = String; - - fn try_from(value: &str) -> Result { - match value { - "select" => Ok(Self::Select), - "where" => Ok(Self::Where), - "from" => Ok(Self::From), - "update" => Ok(Self::Update), - "delete" => Ok(Self::Delete), - "join" => Ok(Self::Join), - _ => { - let message = format!("Unimplemented ClauseType: {}", value); - - // Err on tests, so we notice that we're lacking an implementation immediately. - if cfg!(test) { - panic!("{}", message); - } - - Err(message) - } - } - } -} - -impl TryFrom for ClauseType { - type Error = String; - fn try_from(value: String) -> Result { - Self::try_from(value.as_str()) - } -} - /// We can map a few nodes, such as the "update" node, to actual SQL clauses. /// That gives us a lot of insight for completions. /// Other nodes, such as the "relation" node, gives us less but still @@ -127,7 +97,7 @@ pub(crate) struct CompletionContext<'a> { /// on u.id = i.user_id; /// ``` pub schema_or_alias_name: Option, - pub wrapping_clause_type: Option, + pub wrapping_clause_type: Option>, pub wrapping_node_kind: Option, @@ -266,7 +236,9 @@ impl<'a> CompletionContext<'a> { match parent_node_kind { "statement" | "subquery" => { - self.wrapping_clause_type = current_node_kind.try_into().ok(); + self.wrapping_clause_type = + self.get_wrapping_clause_from_current_node(current_node, &mut cursor); + self.wrapping_statement_range = Some(parent_node.range()); } "invocation" => self.is_invocation = true, @@ -277,39 +249,21 @@ impl<'a> CompletionContext<'a> { if self.is_in_error_node { let mut next_sibling = current_node.next_named_sibling(); while let Some(n) = next_sibling { - if n.kind().starts_with("keyword_") { - if let Some(txt) = self.get_ts_node_content(n).and_then(|txt| match txt { - NodeText::Original(txt) => Some(txt), - NodeText::Replaced => None, - }) { - match txt { - "where" | "update" | "select" | "delete" | "from" | "join" => { - self.wrapping_clause_type = txt.try_into().ok(); - break; - } - _ => {} - } - }; + if let Some(clause_type) = self.get_wrapping_clause_from_keyword_node(n) { + self.wrapping_clause_type = Some(clause_type); + break; + } else { + next_sibling = n.next_named_sibling(); } - next_sibling = n.next_named_sibling(); } let mut prev_sibling = current_node.prev_named_sibling(); while let Some(n) = prev_sibling { - if n.kind().starts_with("keyword_") { - if let Some(txt) = self.get_ts_node_content(n).and_then(|txt| match txt { - NodeText::Original(txt) => Some(txt), - NodeText::Replaced => None, - }) { - match txt { - "where" | "update" | "select" | "delete" | "from" | "join" => { - self.wrapping_clause_type = txt.try_into().ok(); - break; - } - _ => {} - } - }; + if let Some(clause_type) = self.get_wrapping_clause_from_keyword_node(n) { + self.wrapping_clause_type = Some(clause_type); + break; + } else { + prev_sibling = n.prev_named_sibling(); } - prev_sibling = n.prev_named_sibling(); } } @@ -330,7 +284,8 @@ impl<'a> CompletionContext<'a> { } "where" | "update" | "select" | "delete" | "from" | "join" => { - self.wrapping_clause_type = current_node_kind.try_into().ok(); + self.wrapping_clause_type = + self.get_wrapping_clause_from_current_node(current_node, &mut cursor); } "relation" | "binary_expression" | "assignment" => { @@ -353,12 +308,67 @@ impl<'a> CompletionContext<'a> { cursor.goto_first_child_for_byte(self.position); self.gather_context_from_node(cursor, current_node); } + + fn get_wrapping_clause_from_keyword_node( + &self, + node: tree_sitter::Node<'a>, + ) -> Option> { + if node.kind().starts_with("keyword_") { + if let Some(txt) = self.get_ts_node_content(node).and_then(|txt| match txt { + NodeText::Original(txt) => Some(txt), + NodeText::Replaced => None, + }) { + match txt { + "where" => return Some(WrappingClause::Where), + "update" => return Some(WrappingClause::Update), + "select" => return Some(WrappingClause::Select), + "delete" => return Some(WrappingClause::Delete), + "from" => return Some(WrappingClause::From), + "join" => { + // TODO: not sure if we can infer it here. + return Some(WrappingClause::Join { on_node: None }); + } + _ => {} + } + }; + } + + None + } + + fn get_wrapping_clause_from_current_node( + &self, + node: tree_sitter::Node<'a>, + cursor: &mut tree_sitter::TreeCursor<'a>, + ) -> Option> { + match node.kind() { + "where" => Some(WrappingClause::Where), + "update" => Some(WrappingClause::Update), + "select" => Some(WrappingClause::Select), + "delete" => Some(WrappingClause::Delete), + "from" => Some(WrappingClause::From), + "join" => { + // sadly, we need to manually iterate over the children – + // `node.child_by_field_id(..)` does not work as expected + let mut on_node = None; + for child in node.children(cursor) { + // 28 is the id for "keyword_on" + if child.kind_id() == 28 { + on_node = Some(child); + } + } + cursor.goto_parent(); + Some(WrappingClause::Join { on_node }) + } + _ => None, + } + } } #[cfg(test)] mod tests { use crate::{ - context::{ClauseType, CompletionContext, NodeText}, + context::{CompletionContext, NodeText, WrappingClause}, sanitization::SanitizedCompletionParams, test_helper::{CURSOR_POS, get_text_and_position}, }; @@ -375,29 +385,41 @@ mod tests { #[test] fn identifies_clauses() { let test_cases = vec![ - (format!("Select {}* from users;", CURSOR_POS), "select"), - (format!("Select * from u{};", CURSOR_POS), "from"), + ( + format!("Select {}* from users;", CURSOR_POS), + WrappingClause::Select, + ), + ( + format!("Select * from u{};", CURSOR_POS), + WrappingClause::From, + ), ( format!("Select {}* from users where n = 1;", CURSOR_POS), - "select", + WrappingClause::Select, ), ( format!("Select * from users where {}n = 1;", CURSOR_POS), - "where", + WrappingClause::Where, ), ( format!("update users set u{} = 1 where n = 2;", CURSOR_POS), - "update", + WrappingClause::Update, ), ( format!("update users set u = 1 where n{} = 2;", CURSOR_POS), - "where", + WrappingClause::Where, + ), + ( + format!("delete{} from users;", CURSOR_POS), + WrappingClause::Delete, + ), + ( + format!("delete from {}users;", CURSOR_POS), + WrappingClause::From, ), - (format!("delete{} from users;", CURSOR_POS), "delete"), - (format!("delete from {}users;", CURSOR_POS), "from"), ( format!("select name, age, location from public.u{}sers", CURSOR_POS), - "from", + WrappingClause::From, ), ]; @@ -415,7 +437,7 @@ mod tests { let ctx = CompletionContext::new(¶ms); - assert_eq!(ctx.wrapping_clause_type, expected_clause.try_into().ok()); + assert_eq!(ctx.wrapping_clause_type, Some(expected_clause)); } } @@ -518,7 +540,7 @@ mod tests { assert_eq!( ctx.wrapping_clause_type, - Some(crate::context::ClauseType::Select) + Some(crate::context::WrappingClause::Select) ); } } @@ -596,6 +618,6 @@ mod tests { ctx.get_ts_node_content(node), Some(NodeText::Original("fro")) ); - assert_eq!(ctx.wrapping_clause_type, Some(ClauseType::Select)); + assert_eq!(ctx.wrapping_clause_type, Some(WrappingClause::Select)); } } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 770a2b61..bd573430 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -431,4 +431,57 @@ mod tests { ) .await; } + + #[tokio::test] + async fn completes_in_join_on_clause() { + let setup = r#" + create schema auth; + + create table auth.users ( + uid serial primary key, + name text not null, + email text unique not null + ); + + create table auth.posts ( + pid serial primary key, + user_id int not null references auth.users(uid), + title text not null, + content text, + created_at timestamp default now() + ); + "#; + + assert_complete_results( + format!( + "select u.id, auth.posts.content from auth.users u join auth.posts on u.{}", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::KindNotExists(CompletionItemKind::Table), + CompletionAssertion::LabelAndKind("uid".to_string(), CompletionItemKind::Column), + CompletionAssertion::LabelAndKind("email".to_string(), CompletionItemKind::Column), + CompletionAssertion::LabelAndKind("name".to_string(), CompletionItemKind::Column), + ], + setup, + ) + .await; + + assert_complete_results( + format!( + "select u.id, p.content from auth.users u join auth.posts p on p.user_id = u.{}", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::KindNotExists(CompletionItemKind::Table), + CompletionAssertion::LabelAndKind("uid".to_string(), CompletionItemKind::Column), + CompletionAssertion::LabelAndKind("email".to_string(), CompletionItemKind::Column), + CompletionAssertion::LabelAndKind("name".to_string(), CompletionItemKind::Column), + ], + setup, + ) + .await; + } } diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index 2658216b..c74d8c35 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -1,4 +1,4 @@ -use crate::context::{ClauseType, CompletionContext, WrappingNode}; +use crate::context::{CompletionContext, WrappingClause}; use super::CompletionRelevanceData; @@ -50,31 +50,36 @@ impl CompletionFilter<'_> { fn check_clause(&self, ctx: &CompletionContext) -> Option<()> { let clause = ctx.wrapping_clause_type.as_ref(); - let wrapping_node = ctx.wrapping_node_kind.as_ref(); match self.data { CompletionRelevanceData::Table(_) => { - let in_select_clause = clause.is_some_and(|c| c == &ClauseType::Select); - let in_where_clause = clause.is_some_and(|c| c == &ClauseType::Where); + let in_select_clause = clause.is_some_and(|c| c == &WrappingClause::Select); + let in_where_clause = clause.is_some_and(|c| c == &WrappingClause::Where); if in_select_clause || in_where_clause { return None; }; } CompletionRelevanceData::Column(_) => { - let in_from_clause = clause.is_some_and(|c| c == &ClauseType::From); + let in_from_clause = clause.is_some_and(|c| c == &WrappingClause::From); if in_from_clause { return None; } - // We can complete columns in JOIN cluases, but only if we are in the - // "ON u.id = posts.user_id" part. - let in_join_clause = clause.is_some_and(|c| c == &ClauseType::Join); + // We can complete columns in JOIN cluases, but only if we are after the + // ON node in the "ON u.id = posts.user_id" part. + let in_join_clause_before_on_node = clause.is_some_and(|c| match c { + // we are in a JOIN, but definitely not after an ON + WrappingClause::Join { on_node: None } => true, - let in_comparison_clause = - wrapping_node.is_some_and(|n| n == &WrappingNode::BinaryExpression); + WrappingClause::Join { on_node: Some(on) } => ctx + .node_under_cursor + .is_some_and(|n| n.end_byte() < on.start_byte()), - if in_join_clause && !in_comparison_clause { + _ => false, + }); + + if in_join_clause_before_on_node { return None; } } diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs index e67df658..baff3960 100644 --- a/crates/pgt_completions/src/relevance/scoring.rs +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -1,4 +1,4 @@ -use crate::context::{ClauseType, CompletionContext, WrappingNode}; +use crate::context::{CompletionContext, WrappingClause, WrappingNode}; use super::CompletionRelevanceData; @@ -64,40 +64,47 @@ impl CompletionScore<'_> { let has_mentioned_tables = !ctx.mentioned_relations.is_empty(); let has_mentioned_schema = ctx.schema_or_alias_name.is_some(); - let is_binary_exp = ctx - .wrapping_node_kind - .as_ref() - .is_some_and(|wn| wn == &WrappingNode::BinaryExpression); - self.score += match self.data { CompletionRelevanceData::Table(_) => match clause_type { - ClauseType::Update => 10, - ClauseType::Delete => 10, - ClauseType::From => 5, - ClauseType::Join if !is_binary_exp => 5, + WrappingClause::Update => 10, + WrappingClause::Delete => 10, + WrappingClause::From => 5, + WrappingClause::Join { on_node } + if on_node.is_none_or(|on| { + ctx.node_under_cursor + .is_none_or(|n| n.end_byte() < on.start_byte()) + }) => + { + 5 + } _ => -50, }, CompletionRelevanceData::Function(_) => match clause_type { - ClauseType::Select if !has_mentioned_tables => 15, - ClauseType::Select if has_mentioned_tables => 0, - ClauseType::From => 0, + WrappingClause::Select if !has_mentioned_tables => 15, + WrappingClause::Select if has_mentioned_tables => 0, + WrappingClause::From => 0, _ => -50, }, CompletionRelevanceData::Column(col) => match clause_type { - ClauseType::Select if has_mentioned_tables => 10, - ClauseType::Select if !has_mentioned_tables => 0, - ClauseType::Where => 10, - ClauseType::Join if is_binary_exp => { + WrappingClause::Select if has_mentioned_tables => 10, + WrappingClause::Select if !has_mentioned_tables => 0, + WrappingClause::Where => 10, + WrappingClause::Join { on_node } + if on_node.is_some_and(|on| { + ctx.node_under_cursor + .is_some_and(|n| n.start_byte() > on.end_byte()) + }) => + { // Users will probably join on primary keys if col.is_primary_key { 20 } else { 10 } } _ => -15, }, CompletionRelevanceData::Schema(_) => match clause_type { - ClauseType::From if !has_mentioned_schema => 15, - ClauseType::Join if !has_mentioned_schema => 15, - ClauseType::Update if !has_mentioned_schema => 15, - ClauseType::Delete if !has_mentioned_schema => 15, + WrappingClause::From if !has_mentioned_schema => 15, + WrappingClause::Join { .. } if !has_mentioned_schema => 15, + WrappingClause::Update if !has_mentioned_schema => 15, + WrappingClause::Delete if !has_mentioned_schema => 15, _ => -50, }, } From 6469ce368aa4f42c38e16b9561c3f8d6680c7fca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Tue, 6 May 2025 11:09:02 +0200 Subject: [PATCH 048/114] fix: properly handle multiple changes at once (#389) * fix: multiple changes at once * fix: also handle deletion case * cleanup * cleanup * cleanup --- .../src/workspace/server/change.rs | 108 +++++++++++++++++- 1 file changed, 103 insertions(+), 5 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index 6e86abcf..c8799922 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -63,11 +63,33 @@ impl Document { // very much not guaranteed to result in correct ranges self.diagnostics.clear(); - let changes = change - .changes - .iter() - .flat_map(|c| self.apply_change(c)) - .collect(); + // when we recieive more than one change, we need to push back the changes based on the + // total range of the previous ones. This is because the ranges are always related to the original state. + let mut changes = Vec::new(); + + let mut offset: i64 = 0; + + for change in &change.changes { + let adjusted_change = if offset != 0 && change.range.is_some() { + &ChangeParams { + text: change.text.clone(), + range: change.range.map(|range| { + let start = u32::from(range.start()); + let end = u32::from(range.end()); + TextRange::new( + TextSize::from((start as i64 + offset).try_into().unwrap_or(0)), + TextSize::from((end as i64 + offset).try_into().unwrap_or(0)), + ) + }), + } + } else { + change + }; + + changes.extend(self.apply_change(adjusted_change)); + + offset += change.change_size(); + } self.version = change.version; @@ -356,6 +378,18 @@ impl Document { } impl ChangeParams { + /// For lack of a better name, this returns the change in size of the text compared to the range + pub fn change_size(&self) -> i64 { + match self.range { + Some(range) => { + let range_length: usize = range.len().into(); + let text_length = self.text.chars().count(); + text_length as i64 - range_length as i64 + } + None => i64::try_from(self.text.chars().count()).unwrap(), + } + } + pub fn diff_size(&self) -> TextSize { match self.range { Some(range) => { @@ -1522,6 +1556,70 @@ mod tests { assert_document_integrity(&doc); } + #[test] + fn multiple_deletions_at_once() { + let path = PgTPath::new("test.sql"); + + let mut doc = Document::new("\n\n\n\nALTER TABLE ONLY \"public\".\"sendout\"\n ADD CONSTRAINT \"sendout_organisation_id_fkey\" FOREIGN +KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDATE RESTRICT ON DELETE CASCADE;\n".to_string(), 0); + + let change = ChangeFileParams { + path: path.clone(), + version: 1, + changes: vec![ + ChangeParams { + range: Some(TextRange::new(31.into(), 38.into())), + text: "te".to_string(), + }, + ChangeParams { + range: Some(TextRange::new(60.into(), 67.into())), + text: "te".to_string(), + }, + ], + }; + + let changed = doc.apply_file_change(&change); + + assert_eq!(doc.content, "\n\n\n\nALTER TABLE ONLY \"public\".\"te\"\n ADD CONSTRAINT \"te_organisation_id_fkey\" FOREIGN +KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDATE RESTRICT ON DELETE CASCADE;\n"); + + assert_eq!(changed.len(), 2); + + assert_document_integrity(&doc); + } + + #[test] + fn multiple_additions_at_once() { + let path = PgTPath::new("test.sql"); + + let mut doc = Document::new("\n\n\n\nALTER TABLE ONLY \"public\".\"sendout\"\n ADD CONSTRAINT \"sendout_organisation_id_fkey\" FOREIGN +KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDATE RESTRICT ON DELETE CASCADE;\n".to_string(), 0); + + let change = ChangeFileParams { + path: path.clone(), + version: 1, + changes: vec![ + ChangeParams { + range: Some(TextRange::new(31.into(), 38.into())), + text: "omni_channel_message".to_string(), + }, + ChangeParams { + range: Some(TextRange::new(60.into(), 67.into())), + text: "omni_channel_message".to_string(), + }, + ], + }; + + let changed = doc.apply_file_change(&change); + + assert_eq!(doc.content, "\n\n\n\nALTER TABLE ONLY \"public\".\"omni_channel_message\"\n ADD CONSTRAINT \"omni_channel_message_organisation_id_fkey\" FOREIGN +KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDATE RESTRICT ON DELETE CASCADE;\n"); + + assert_eq!(changed.len(), 2); + + assert_document_integrity(&doc); + } + #[test] fn remove_inbetween_whitespace() { let path = PgTPath::new("test.sql"); From 4ef2c75f153197cc6ddcec109eb85af26ab19eee Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Wed, 7 May 2025 10:16:19 +0200 Subject: [PATCH 049/114] fix(completions): use fuzzy matching for user input (#393) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes it such that we rank higher the `portfolio_settings` column if we type "sett" – before, it didn't get any boost. --- Cargo.lock | 10 +++ crates/pgt_completions/Cargo.toml | 1 + .../pgt_completions/src/providers/columns.rs | 76 ++++++++----------- .../pgt_completions/src/relevance/scoring.rs | 19 +++-- 4 files changed, 58 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72ba810f..55db1b6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1506,6 +1506,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fuzzy-matcher" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54614a3312934d066701a80f20f15fa3b56d67ac7722b39eea5b4c9dd1d66c94" +dependencies = [ + "thread_local", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -2567,6 +2576,7 @@ version = "0.0.0" dependencies = [ "async-std", "criterion", + "fuzzy-matcher", "pgt_schema_cache", "pgt_test_utils", "pgt_text_size", diff --git a/crates/pgt_completions/Cargo.toml b/crates/pgt_completions/Cargo.toml index a69ee75a..916a0020 100644 --- a/crates/pgt_completions/Cargo.toml +++ b/crates/pgt_completions/Cargo.toml @@ -17,6 +17,7 @@ async-std = "1.12.0" pgt_text_size.workspace = true +fuzzy-matcher = "0.3.7" pgt_schema_cache.workspace = true pgt_treesitter_queries.workspace = true schemars = { workspace = true, optional = true } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index bd573430..7524e134 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -273,60 +273,50 @@ mod tests { id1 serial primary key, name1 text, address1 text, - email1 text + email1 text, + user_settings jsonb ); create table public.users ( id2 serial primary key, name2 text, address2 text, - email2 text + email2 text, + settings jsonb ); "#; - { - let test_case = TestCase { - message: "", - query: format!(r#"select {} from users"#, CURSOR_POS), - label: "suggests from table", - description: "", - }; - - let (tree, cache) = get_test_deps(setup, test_case.get_input_query()).await; - let params = get_test_params(&tree, &cache, test_case.get_input_query()); - let results = complete(params); - - assert_eq!( - results - .into_iter() - .take(4) - .map(|item| item.label) - .collect::>(), - vec!["address2", "email2", "id2", "name2"] - ); - } - - { - let test_case = TestCase { - message: "", - query: format!(r#"select {} from private.users"#, CURSOR_POS), - label: "suggests from table", - description: "", - }; + assert_complete_results( + format!(r#"select {} from users"#, CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("address2".into()), + CompletionAssertion::Label("email2".into()), + CompletionAssertion::Label("id2".into()), + CompletionAssertion::Label("name2".into()), + ], + setup, + ) + .await; - let (tree, cache) = get_test_deps(setup, test_case.get_input_query()).await; - let params = get_test_params(&tree, &cache, test_case.get_input_query()); - let results = complete(params); + assert_complete_results( + format!(r#"select {} from private.users"#, CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("address1".into()), + CompletionAssertion::Label("email1".into()), + CompletionAssertion::Label("id1".into()), + CompletionAssertion::Label("name1".into()), + ], + setup, + ) + .await; - assert_eq!( - results - .into_iter() - .take(4) - .map(|item| item.label) - .collect::>(), - vec!["address1", "email1", "id1", "name1"] - ); - } + // asserts fuzzy finding for "settings" + assert_complete_results( + format!(r#"select sett{} from private.users"#, CURSOR_POS).as_str(), + vec![CompletionAssertion::Label("user_settings".into())], + setup, + ) + .await; } #[tokio::test] diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs index baff3960..71c01023 100644 --- a/crates/pgt_completions/src/relevance/scoring.rs +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -1,3 +1,5 @@ +use fuzzy_matcher::{FuzzyMatcher, skim::SkimMatcherV2}; + use crate::context::{CompletionContext, WrappingClause, WrappingNode}; use super::CompletionRelevanceData; @@ -45,14 +47,21 @@ impl CompletionScore<'_> { CompletionRelevanceData::Schema(s) => s.name.as_str(), }; - if name.starts_with(content.as_str()) { - let len: i32 = content - .len() + let fz_matcher = SkimMatcherV2::default(); + + if let Some(score) = fz_matcher.fuzzy_match(name, content.as_str()) { + let scorei32: i32 = score .try_into() .expect("The length of the input exceeds i32 capacity"); - self.score += len * 10; - }; + // the scoring value isn't linear. + // here are a couple of samples: + // - item: bytea_string_agg_transfn, input: n, score: 15 + // - item: numeric_uplus, input: n, score: 31 + // - item: settings, input: sett, score: 91 + // - item: user_settings, input: sett, score: 82 + self.score += scorei32 / 2; + } } fn check_matching_clause_type(&mut self, ctx: &CompletionContext) { From 573e0ff5f95dd24392160b40c7ed0e428b315e75 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Wed, 7 May 2025 10:16:27 +0200 Subject: [PATCH 050/114] fix: do not complete right after asterisk (#394) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we have `select * {}`, we shouldn't provide suggestions. It's annoying if you just want to write `from` 💩 If we have `select *, {}`, on the other hand, we should. --- .../src/relevance/filtering.rs | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index c74d8c35..ec12201c 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -45,6 +45,15 @@ impl CompletionFilter<'_> { return None; } + // no completions if we're right after an asterisk: + // `select * {}` + if ctx.node_under_cursor.is_some_and(|n| { + n.prev_sibling() + .is_some_and(|p| (p.kind() == "all_fields") && n.kind() == "identifier") + }) { + return None; + } + Some(()) } @@ -130,3 +139,35 @@ impl CompletionFilter<'_> { Some(()) } } + +#[cfg(test)] +mod tests { + use crate::test_helper::{ + CURSOR_POS, CompletionAssertion, assert_complete_results, assert_no_complete_results, + }; + + #[tokio::test] + async fn completion_after_asterisk() { + let setup = r#" + create table users ( + id serial primary key, + email text, + address text + ); + "#; + + assert_no_complete_results(format!("select * {}", CURSOR_POS).as_str(), setup).await; + + // if there s a COMMA after the asterisk, we're good + assert_complete_results( + format!("select *, {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("address".into()), + CompletionAssertion::Label("email".into()), + CompletionAssertion::Label("id".into()), + ], + setup, + ) + .await; + } +} From be9446133c4a85c7184297390f48cc66cdf56fb3 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Wed, 7 May 2025 10:16:36 +0200 Subject: [PATCH 051/114] feat(completions): fill in alias for columns in join clauses (#392) Given a query ```sql select email, id from auth.users u join auth.identities i on {} ``` if you pick `user_id` from the list, it will fill in `i.user_id` now. --- .../pgt_completions/src/providers/columns.rs | 14 +++++++++++-- .../src/providers/functions.rs | 8 ++++++-- .../pgt_completions/src/providers/helper.rs | 20 +++++++++++++++---- .../pgt_completions/src/providers/tables.rs | 8 ++++++-- 4 files changed, 40 insertions(+), 10 deletions(-) diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 7524e134..331c4416 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -1,17 +1,19 @@ use crate::{ CompletionItemKind, builder::{CompletionBuilder, PossibleCompletionItem}, - context::CompletionContext, + context::{CompletionContext, WrappingClause}, relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; +use super::helper::{find_matching_alias_for_table, get_completion_text_with_schema_or_alias}; + pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut CompletionBuilder<'a>) { let available_columns = &ctx.schema_cache.columns; for col in available_columns { let relevance = CompletionRelevanceData::Column(col); - let item = PossibleCompletionItem { + let mut item = PossibleCompletionItem { label: col.name.clone(), score: CompletionScore::from(relevance.clone()), filter: CompletionFilter::from(relevance), @@ -20,6 +22,14 @@ pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut Completio completion_text: None, }; + // autocomplete with the alias in a join clause if we find one + if matches!(ctx.wrapping_clause_type, Some(WrappingClause::Join { .. })) { + item.completion_text = find_matching_alias_for_table(ctx, col.table_name.as_str()) + .and_then(|alias| { + get_completion_text_with_schema_or_alias(ctx, col.name.as_str(), alias.as_str()) + }); + } + builder.add_item(item); } } diff --git a/crates/pgt_completions/src/providers/functions.rs b/crates/pgt_completions/src/providers/functions.rs index 4241da92..6bc04deb 100644 --- a/crates/pgt_completions/src/providers/functions.rs +++ b/crates/pgt_completions/src/providers/functions.rs @@ -5,7 +5,7 @@ use crate::{ relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; -use super::helper::get_completion_text_with_schema; +use super::helper::get_completion_text_with_schema_or_alias; pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { let available_functions = &ctx.schema_cache.functions; @@ -19,7 +19,11 @@ pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut Completi filter: CompletionFilter::from(relevance), description: format!("Schema: {}", func.schema), kind: CompletionItemKind::Function, - completion_text: get_completion_text_with_schema(ctx, &func.name, &func.schema), + completion_text: get_completion_text_with_schema_or_alias( + ctx, + &func.name, + &func.schema, + ), }; builder.add_item(item); diff --git a/crates/pgt_completions/src/providers/helper.rs b/crates/pgt_completions/src/providers/helper.rs index c0fe5869..999d6b37 100644 --- a/crates/pgt_completions/src/providers/helper.rs +++ b/crates/pgt_completions/src/providers/helper.rs @@ -2,12 +2,24 @@ use pgt_text_size::{TextRange, TextSize}; use crate::{CompletionText, context::CompletionContext}; -pub(crate) fn get_completion_text_with_schema( +pub(crate) fn find_matching_alias_for_table( + ctx: &CompletionContext, + table_name: &str, +) -> Option { + for (alias, table) in ctx.mentioned_table_aliases.iter() { + if table == table_name { + return Some(alias.to_string()); + } + } + None +} + +pub(crate) fn get_completion_text_with_schema_or_alias( ctx: &CompletionContext, item_name: &str, - item_schema_name: &str, + schema_or_alias_name: &str, ) -> Option { - if item_schema_name == "public" || ctx.schema_or_alias_name.is_some() { + if schema_or_alias_name == "public" || ctx.schema_or_alias_name.is_some() { None } else { let node = ctx.node_under_cursor.unwrap(); @@ -18,7 +30,7 @@ pub(crate) fn get_completion_text_with_schema( ); Some(CompletionText { - text: format!("{}.{}", item_schema_name, item_name), + text: format!("{}.{}", schema_or_alias_name, item_name), range, }) } diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index cbedc55b..57195da7 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -5,7 +5,7 @@ use crate::{ relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; -use super::helper::get_completion_text_with_schema; +use super::helper::get_completion_text_with_schema_or_alias; pub fn complete_tables<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { let available_tables = &ctx.schema_cache.tables; @@ -19,7 +19,11 @@ pub fn complete_tables<'a>(ctx: &'a CompletionContext, builder: &mut CompletionB filter: CompletionFilter::from(relevance), description: format!("Schema: {}", table.schema), kind: CompletionItemKind::Table, - completion_text: get_completion_text_with_schema(ctx, &table.name, &table.schema), + completion_text: get_completion_text_with_schema_or_alias( + ctx, + &table.name, + &table.schema, + ), }; builder.add_item(item); From 26e82c7ef9572c6ec912a17f1b949b5b5dc30a78 Mon Sep 17 00:00:00 2001 From: Will Ruggiano <11872440+willruggiano@users.noreply.github.com> Date: Wed, 7 May 2025 01:18:22 -0700 Subject: [PATCH 052/114] feat: allow configuration through workspace/didChangeConfiguration (#316) ## What kind of change does this PR introduce? It enables passing workspace settings via the workspace/didChangeConfiguration notification. This in turn enables the client to specify settings dynamically, rather than being limited to configuration files. This is _a_ solution to #302. See example usage (with lspconfig) here: https://github.com/willruggiano/neovim.drv/commit/9aa06ad7c889ed7b14d35ab34264e757d5ad1a7e. ## What is the current behavior? There is none. The payload of this handler is currently ignored. ## What is the new behavior? The configuration received by the handler is merged with the fs configuration. --------- Co-authored-by: Will Ruggiano --- biome.jsonc | 4 +-- crates/pgt_lsp/src/server.rs | 10 +++++--- crates/pgt_lsp/src/session.rs | 27 ++++++++++++++------ crates/pgt_workspace/src/workspace/server.rs | 1 + 4 files changed, 28 insertions(+), 14 deletions(-) diff --git a/biome.jsonc b/biome.jsonc index 36b60d9b..582bee9b 100644 --- a/biome.jsonc +++ b/biome.jsonc @@ -1,5 +1,5 @@ { - "$schema": "https://biomejs.dev/schemas/1.9.4/schema.json", + "$schema": "./node_modules/@biomejs/biome/configuration_schema.json", "vcs": { "enabled": false, "clientKind": "git", @@ -8,7 +8,7 @@ "files": { "ignoreUnknown": false, "ignore": [], - "include": ["packages/**/*"] + "include": ["/packages/**/*"] }, "formatter": { "enabled": true, diff --git a/crates/pgt_lsp/src/server.rs b/crates/pgt_lsp/src/server.rs index 9bf9b9c2..4c05c0e4 100644 --- a/crates/pgt_lsp/src/server.rs +++ b/crates/pgt_lsp/src/server.rs @@ -132,7 +132,7 @@ impl LanguageServer for LSPServer { ConfigName::pgt_jsonc() ); - futures::join!(self.session.load_workspace_settings()); + futures::join!(self.session.load_workspace_settings(None)); let msg = format!("Server initialized with PID: {}", std::process::id()); self.session @@ -152,8 +152,10 @@ impl LanguageServer for LSPServer { } #[tracing::instrument(level = "info", skip_all)] - async fn did_change_configuration(&self, _params: DidChangeConfigurationParams) { - self.session.load_workspace_settings().await; + async fn did_change_configuration(&self, params: DidChangeConfigurationParams) { + self.session + .load_workspace_settings(serde_json::from_value(params.settings).ok()) + .await; self.setup_capabilities().await; self.session.update_all_diagnostics().await; } @@ -174,7 +176,7 @@ impl LanguageServer for LSPServer { if ConfigName::file_names() .contains(&&*watched_file.display().to_string()) { - self.session.load_workspace_settings().await; + self.session.load_workspace_settings(None).await; self.setup_capabilities().await; // self.session.update_all_diagnostics().await; // for now we are only interested to the configuration file, diff --git a/crates/pgt_lsp/src/session.rs b/crates/pgt_lsp/src/session.rs index db17dfd1..7ccf2bab 100644 --- a/crates/pgt_lsp/src/session.rs +++ b/crates/pgt_lsp/src/session.rs @@ -3,10 +3,11 @@ use crate::diagnostics::LspError; use crate::documents::Document; use crate::utils; use anyhow::Result; +use biome_deserialize::Merge; use futures::StreamExt; use futures::stream::FuturesUnordered; use pgt_analyse::RuleCategoriesBuilder; -use pgt_configuration::ConfigurationPathHint; +use pgt_configuration::{ConfigurationPathHint, PartialConfiguration}; use pgt_diagnostics::{DiagnosticExt, Error}; use pgt_fs::{FileSystem, PgTPath}; use pgt_workspace::Workspace; @@ -386,11 +387,13 @@ impl Session { /// This function attempts to read the `postgrestools.jsonc` configuration file from /// the root URI and update the workspace settings accordingly #[tracing::instrument(level = "trace", skip(self))] - pub(crate) async fn load_workspace_settings(&self) { + pub(crate) async fn load_workspace_settings(&self, extra_config: Option) { // Providing a custom configuration path will not allow to support workspaces if let Some(config_path) = &self.config_path { let base_path = ConfigurationPathHint::FromUser(config_path.clone()); - let status = self.load_pgt_configuration_file(base_path).await; + let status = self + .load_pgt_configuration_file(base_path, extra_config) + .await; self.set_configuration_status(status); } else if let Some(folders) = self.get_workspace_folders() { info!("Detected workspace folder."); @@ -401,9 +404,10 @@ impl Session { match base_path { Ok(base_path) => { let status = self - .load_pgt_configuration_file(ConfigurationPathHint::FromWorkspace( - base_path, - )) + .load_pgt_configuration_file( + ConfigurationPathHint::FromWorkspace(base_path), + extra_config.clone(), + ) .await; self.set_configuration_status(status); } @@ -420,7 +424,9 @@ impl Session { None => ConfigurationPathHint::default(), Some(path) => ConfigurationPathHint::FromLsp(path), }; - let status = self.load_pgt_configuration_file(base_path).await; + let status = self + .load_pgt_configuration_file(base_path, extra_config) + .await; self.set_configuration_status(status); } } @@ -428,17 +434,22 @@ impl Session { async fn load_pgt_configuration_file( &self, base_path: ConfigurationPathHint, + extra_config: Option, ) -> ConfigurationStatus { match load_configuration(&self.fs, base_path.clone()) { Ok(loaded_configuration) => { let LoadedConfiguration { - configuration: fs_configuration, + configuration: mut fs_configuration, directory_path: configuration_path, .. } = loaded_configuration; info!("Configuration loaded successfully from disk."); info!("Update workspace settings."); + if let Some(ws_configuration) = extra_config { + fs_configuration.merge_with(ws_configuration); + } + let result = fs_configuration .retrieve_gitignore_matches(&self.fs, configuration_path.as_deref()); diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 3c14f352..5a7bfc44 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -167,6 +167,7 @@ impl Workspace for WorkspaceServer { )?; tracing::info!("Updated settings in workspace"); + tracing::debug!("Updated settings are {:#?}", self.settings()); self.connection .write() From 2ada420498c26923f62abdcc3fbd9c59cef8fd8e Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Fri, 16 May 2025 22:42:40 +0200 Subject: [PATCH 053/114] feat(completions): lower priority of already mentioned columns in SELECT (#399) --- crates/pgt_completions/src/context.rs | 50 +++-- .../pgt_completions/src/providers/columns.rs | 89 +++++++++ .../pgt_completions/src/providers/triggers.rs | 169 +++++++++++++++++ .../pgt_completions/src/relevance/scoring.rs | 37 ++++ crates/pgt_completions/src/test_helper.rs | 17 +- .../pgt_treesitter_queries/src/queries/mod.rs | 13 ++ .../src/queries/select_columns.rs | 172 ++++++++++++++++++ 7 files changed, 531 insertions(+), 16 deletions(-) create mode 100644 crates/pgt_completions/src/providers/triggers.rs create mode 100644 crates/pgt_treesitter_queries/src/queries/select_columns.rs diff --git a/crates/pgt_completions/src/context.rs b/crates/pgt_completions/src/context.rs index d96d0d53..a17cafa2 100644 --- a/crates/pgt_completions/src/context.rs +++ b/crates/pgt_completions/src/context.rs @@ -8,7 +8,7 @@ use pgt_treesitter_queries::{ use crate::sanitization::SanitizedCompletionParams; -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Hash)] pub enum WrappingClause<'a> { Select, Where, @@ -26,6 +26,12 @@ pub(crate) enum NodeText<'a> { Original(&'a str), } +#[derive(PartialEq, Eq, Hash, Debug)] +pub(crate) struct MentionedColumn { + pub(crate) column: String, + pub(crate) alias: Option, +} + /// We can map a few nodes, such as the "update" node, to actual SQL clauses. /// That gives us a lot of insight for completions. /// Other nodes, such as the "relation" node, gives us less but still @@ -108,8 +114,8 @@ pub(crate) struct CompletionContext<'a> { pub is_in_error_node: bool, pub mentioned_relations: HashMap, HashSet>, - pub mentioned_table_aliases: HashMap, + pub mentioned_columns: HashMap>, HashSet>, } impl<'a> CompletionContext<'a> { @@ -127,6 +133,7 @@ impl<'a> CompletionContext<'a> { is_invocation: false, mentioned_relations: HashMap::new(), mentioned_table_aliases: HashMap::new(), + mentioned_columns: HashMap::new(), is_in_error_node: false, }; @@ -144,6 +151,7 @@ impl<'a> CompletionContext<'a> { executor.add_query_results::(); executor.add_query_results::(); + executor.add_query_results::(); for relation_match in executor.get_iter(stmt_range) { match relation_match { @@ -151,26 +159,38 @@ impl<'a> CompletionContext<'a> { let schema_name = r.get_schema(sql); let table_name = r.get_table(sql); - let current = self.mentioned_relations.get_mut(&schema_name); - - match current { - Some(c) => { - c.insert(table_name); - } - None => { - let mut new = HashSet::new(); - new.insert(table_name); - self.mentioned_relations.insert(schema_name, new); - } - }; + if let Some(c) = self.mentioned_relations.get_mut(&schema_name) { + c.insert(table_name); + } else { + let mut new = HashSet::new(); + new.insert(table_name); + self.mentioned_relations.insert(schema_name, new); + } } - QueryResult::TableAliases(table_alias_match) => { self.mentioned_table_aliases.insert( table_alias_match.get_alias(sql), table_alias_match.get_table(sql), ); } + QueryResult::SelectClauseColumns(c) => { + let mentioned = MentionedColumn { + column: c.get_column(sql), + alias: c.get_alias(sql), + }; + + if let Some(cols) = self + .mentioned_columns + .get_mut(&Some(WrappingClause::Select)) + { + cols.insert(mentioned); + } else { + let mut new = HashSet::new(); + new.insert(mentioned); + self.mentioned_columns + .insert(Some(WrappingClause::Select), new); + } + } }; } } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 331c4416..8109ba83 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -484,4 +484,93 @@ mod tests { ) .await; } + + #[tokio::test] + async fn prefers_not_mentioned_columns() { + let setup = r#" + create schema auth; + + create table public.one ( + id serial primary key, + a text, + b text, + z text + ); + + create table public.two ( + id serial primary key, + c text, + d text, + e text + ); + "#; + + assert_complete_results( + format!( + "select {} from public.one o join public.two on o.id = t.id;", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::Label("a".to_string()), + CompletionAssertion::Label("b".to_string()), + CompletionAssertion::Label("c".to_string()), + CompletionAssertion::Label("d".to_string()), + CompletionAssertion::Label("e".to_string()), + ], + setup, + ) + .await; + + // "a" is already mentioned, so it jumps down + assert_complete_results( + format!( + "select a, {} from public.one o join public.two on o.id = t.id;", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::Label("b".to_string()), + CompletionAssertion::Label("c".to_string()), + CompletionAssertion::Label("d".to_string()), + CompletionAssertion::Label("e".to_string()), + CompletionAssertion::Label("id".to_string()), + CompletionAssertion::Label("z".to_string()), + CompletionAssertion::Label("a".to_string()), + ], + setup, + ) + .await; + + // "id" of table one is mentioned, but table two isn't – + // its priority stays up + assert_complete_results( + format!( + "select o.id, a, b, c, d, e, {} from public.one o join public.two on o.id = t.id;", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::LabelAndDesc( + "id".to_string(), + "Table: public.two".to_string(), + ), + CompletionAssertion::Label("z".to_string()), + ], + setup, + ) + .await; + + // "id" is ambiguous, so both "id" columns are lowered in priority + assert_complete_results( + format!( + "select id, a, b, c, d, e, {} from public.one o join public.two on o.id = t.id;", + CURSOR_POS + ) + .as_str(), + vec![CompletionAssertion::Label("z".to_string())], + setup, + ) + .await; + } } diff --git a/crates/pgt_completions/src/providers/triggers.rs b/crates/pgt_completions/src/providers/triggers.rs new file mode 100644 index 00000000..6bc04deb --- /dev/null +++ b/crates/pgt_completions/src/providers/triggers.rs @@ -0,0 +1,169 @@ +use crate::{ + CompletionItemKind, + builder::{CompletionBuilder, PossibleCompletionItem}, + context::CompletionContext, + relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, +}; + +use super::helper::get_completion_text_with_schema_or_alias; + +pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { + let available_functions = &ctx.schema_cache.functions; + + for func in available_functions { + let relevance = CompletionRelevanceData::Function(func); + + let item = PossibleCompletionItem { + label: func.name.clone(), + score: CompletionScore::from(relevance.clone()), + filter: CompletionFilter::from(relevance), + description: format!("Schema: {}", func.schema), + kind: CompletionItemKind::Function, + completion_text: get_completion_text_with_schema_or_alias( + ctx, + &func.name, + &func.schema, + ), + }; + + builder.add_item(item); + } +} + +#[cfg(test)] +mod tests { + use crate::{ + CompletionItem, CompletionItemKind, complete, + test_helper::{CURSOR_POS, get_test_deps, get_test_params}, + }; + + #[tokio::test] + async fn completes_fn() { + let setup = r#" + create or replace function cool() + returns trigger + language plpgsql + security invoker + as $$ + begin + raise exception 'dont matter'; + end; + $$; + "#; + + let query = format!("select coo{}", CURSOR_POS); + + let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let params = get_test_params(&tree, &cache, query.as_str().into()); + let results = complete(params); + + let CompletionItem { label, .. } = results + .into_iter() + .next() + .expect("Should return at least one completion item"); + + assert_eq!(label, "cool"); + } + + #[tokio::test] + async fn prefers_fn_if_invocation() { + let setup = r#" + create table coos ( + id serial primary key, + name text + ); + + create or replace function cool() + returns trigger + language plpgsql + security invoker + as $$ + begin + raise exception 'dont matter'; + end; + $$; + "#; + + let query = format!(r#"select * from coo{}()"#, CURSOR_POS); + + let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let params = get_test_params(&tree, &cache, query.as_str().into()); + let results = complete(params); + + let CompletionItem { label, kind, .. } = results + .into_iter() + .next() + .expect("Should return at least one completion item"); + + assert_eq!(label, "cool"); + assert_eq!(kind, CompletionItemKind::Function); + } + + #[tokio::test] + async fn prefers_fn_in_select_clause() { + let setup = r#" + create table coos ( + id serial primary key, + name text + ); + + create or replace function cool() + returns trigger + language plpgsql + security invoker + as $$ + begin + raise exception 'dont matter'; + end; + $$; + "#; + + let query = format!(r#"select coo{}"#, CURSOR_POS); + + let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let params = get_test_params(&tree, &cache, query.as_str().into()); + let results = complete(params); + + let CompletionItem { label, kind, .. } = results + .into_iter() + .next() + .expect("Should return at least one completion item"); + + assert_eq!(label, "cool"); + assert_eq!(kind, CompletionItemKind::Function); + } + + #[tokio::test] + async fn prefers_function_in_from_clause_if_invocation() { + let setup = r#" + create table coos ( + id serial primary key, + name text + ); + + create or replace function cool() + returns trigger + language plpgsql + security invoker + as $$ + begin + raise exception 'dont matter'; + end; + $$; + "#; + + let query = format!(r#"select * from coo{}()"#, CURSOR_POS); + + let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let params = get_test_params(&tree, &cache, query.as_str().into()); + let results = complete(params); + + let CompletionItem { label, kind, .. } = results + .into_iter() + .next() + .expect("Should return at least one completion item"); + + assert_eq!(label, "cool"); + assert_eq!(kind, CompletionItemKind::Function); + } +} diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs index 71c01023..b0b0bf63 100644 --- a/crates/pgt_completions/src/relevance/scoring.rs +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -32,6 +32,7 @@ impl CompletionScore<'_> { self.check_matching_clause_type(ctx); self.check_matching_wrapping_node(ctx); self.check_relations_in_stmt(ctx); + self.check_columns_in_stmt(ctx); } fn check_matches_query_input(&mut self, ctx: &CompletionContext) { @@ -235,4 +236,40 @@ impl CompletionScore<'_> { self.score += 2; } } + + fn check_columns_in_stmt(&mut self, ctx: &CompletionContext) { + if let CompletionRelevanceData::Column(column) = self.data { + /* + * Columns can be mentioned in one of two ways: + * + * 1) With an alias: `select u.id`. + * If the currently investigated suggestion item is "id" of the "users" table, + * we want to check + * a) whether the name of the column matches. + * b) whether we know which table is aliased by "u" (if we don't, we ignore the alias). + * c) whether the aliased table matches the currently investigated suggestion item's table. + * + * 2) Without an alias: `select id`. + * In that case, we only check whether the mentioned column fits our currently investigated + * suggestion item's name. + * + */ + if ctx + .mentioned_columns + .get(&ctx.wrapping_clause_type) + .is_some_and(|set| { + set.iter().any(|mentioned| match mentioned.alias.as_ref() { + Some(als) => { + let aliased_table = ctx.mentioned_table_aliases.get(als.as_str()); + column.name == mentioned.column + && aliased_table.is_none_or(|t| t == &column.table_name) + } + None => mentioned.column == column.name, + }) + }) + { + self.score -= 10; + } + } + } } diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index a6b57c55..937c11af 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -146,6 +146,7 @@ mod tests { pub(crate) enum CompletionAssertion { Label(String), LabelAndKind(String, CompletionItemKind), + LabelAndDesc(String, String), LabelNotExists(String), KindNotExists(CompletionItemKind), } @@ -186,6 +187,18 @@ impl CompletionAssertion { kind ); } + CompletionAssertion::LabelAndDesc(label, desc) => { + assert_eq!( + &item.label, label, + "Expected label to be {}, but got {}", + label, &item.label + ); + assert_eq!( + &item.description, desc, + "Expected desc to be {}, but got {}", + desc, &item.description + ); + } } } } @@ -202,7 +215,9 @@ pub(crate) async fn assert_complete_results( let (not_existing, existing): (Vec, Vec) = assertions.into_iter().partition(|a| match a { CompletionAssertion::LabelNotExists(_) | CompletionAssertion::KindNotExists(_) => true, - CompletionAssertion::Label(_) | CompletionAssertion::LabelAndKind(_, _) => false, + CompletionAssertion::Label(_) + | CompletionAssertion::LabelAndKind(_, _) + | CompletionAssertion::LabelAndDesc(_, _) => false, }); assert!( diff --git a/crates/pgt_treesitter_queries/src/queries/mod.rs b/crates/pgt_treesitter_queries/src/queries/mod.rs index 4e10ed60..e02d675b 100644 --- a/crates/pgt_treesitter_queries/src/queries/mod.rs +++ b/crates/pgt_treesitter_queries/src/queries/mod.rs @@ -1,13 +1,16 @@ mod relations; +mod select_columns; mod table_aliases; pub use relations::*; +pub use select_columns::*; pub use table_aliases::*; #[derive(Debug)] pub enum QueryResult<'a> { Relation(RelationMatch<'a>), TableAliases(TableAliasMatch<'a>), + SelectClauseColumns(SelectColumnMatch<'a>), } impl QueryResult<'_> { @@ -28,6 +31,16 @@ impl QueryResult<'_> { let end = m.alias.end_position(); start >= range.start_point && end <= range.end_point } + Self::SelectClauseColumns(cm) => { + let start = match cm.alias { + Some(n) => n.start_position(), + None => cm.column.start_position(), + }; + + let end = cm.column.end_position(); + + start >= range.start_point && end <= range.end_point + } } } } diff --git a/crates/pgt_treesitter_queries/src/queries/select_columns.rs b/crates/pgt_treesitter_queries/src/queries/select_columns.rs new file mode 100644 index 00000000..00b6977d --- /dev/null +++ b/crates/pgt_treesitter_queries/src/queries/select_columns.rs @@ -0,0 +1,172 @@ +use std::sync::LazyLock; + +use crate::{Query, QueryResult}; + +use super::QueryTryFrom; + +static TS_QUERY: LazyLock = LazyLock::new(|| { + static QUERY_STR: &str = r#" + (select_expression + (term + (field + (object_reference)? @alias + "."? + (identifier) @column + ) + ) + ","? + ) +"#; + tree_sitter::Query::new(tree_sitter_sql::language(), QUERY_STR).expect("Invalid TS Query") +}); + +#[derive(Debug)] +pub struct SelectColumnMatch<'a> { + pub(crate) alias: Option>, + pub(crate) column: tree_sitter::Node<'a>, +} + +impl SelectColumnMatch<'_> { + pub fn get_alias(&self, sql: &str) -> Option { + let str = self + .alias + .as_ref()? + .utf8_text(sql.as_bytes()) + .expect("Failed to get alias from ColumnMatch"); + + Some(str.to_string()) + } + + pub fn get_column(&self, sql: &str) -> String { + self.column + .utf8_text(sql.as_bytes()) + .expect("Failed to get column from ColumnMatch") + .to_string() + } +} + +impl<'a> TryFrom<&'a QueryResult<'a>> for &'a SelectColumnMatch<'a> { + type Error = String; + + fn try_from(q: &'a QueryResult<'a>) -> Result { + match q { + QueryResult::SelectClauseColumns(c) => Ok(c), + + #[allow(unreachable_patterns)] + _ => Err("Invalid QueryResult type".into()), + } + } +} + +impl<'a> QueryTryFrom<'a> for SelectColumnMatch<'a> { + type Ref = &'a SelectColumnMatch<'a>; +} + +impl<'a> Query<'a> for SelectColumnMatch<'a> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + let mut cursor = tree_sitter::QueryCursor::new(); + + let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); + + let mut to_return = vec![]; + + for m in matches { + if m.captures.len() == 1 { + let capture = m.captures[0].node; + to_return.push(QueryResult::SelectClauseColumns(SelectColumnMatch { + alias: None, + column: capture, + })); + } + + if m.captures.len() == 2 { + let alias = m.captures[0].node; + let column = m.captures[1].node; + + to_return.push(QueryResult::SelectClauseColumns(SelectColumnMatch { + alias: Some(alias), + column, + })); + } + } + + to_return + } +} + +#[cfg(test)] +mod tests { + use crate::TreeSitterQueriesExecutor; + + use super::SelectColumnMatch; + + #[test] + fn finds_all_columns() { + let sql = r#"select aud, id, email from auth.users;"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&SelectColumnMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results[0].get_alias(sql), None); + assert_eq!(results[0].get_column(sql), "aud"); + + assert_eq!(results[1].get_alias(sql), None); + assert_eq!(results[1].get_column(sql), "id"); + + assert_eq!(results[2].get_alias(sql), None); + assert_eq!(results[2].get_column(sql), "email"); + } + + #[test] + fn finds_columns_with_aliases() { + let sql = r#" +select + u.id, + u.email, + cs.user_settings, + cs.client_id +from + auth.users u + join public.client_settings cs + on u.id = cs.user_id; + +"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&SelectColumnMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results[0].get_alias(sql), Some("u".into())); + assert_eq!(results[0].get_column(sql), "id"); + + assert_eq!(results[1].get_alias(sql), Some("u".into())); + assert_eq!(results[1].get_column(sql), "email"); + + assert_eq!(results[2].get_alias(sql), Some("cs".into())); + assert_eq!(results[2].get_column(sql), "user_settings"); + + assert_eq!(results[3].get_alias(sql), Some("cs".into())); + assert_eq!(results[3].get_column(sql), "client_id"); + } +} From 4e57995818c3dc846b5dc2f58a0e38cde46b259a Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Mon, 19 May 2025 09:28:56 +0200 Subject: [PATCH 054/114] chore(schema_cache): add query for triggers (#398) --- ...75ba8951faa1be2ea6b2bf6714b1aa9127a6f.json | 44 +++ Cargo.lock | 1 + crates/pgt_schema_cache/Cargo.toml | 1 + crates/pgt_schema_cache/src/columns.rs | 2 +- crates/pgt_schema_cache/src/functions.rs | 2 +- crates/pgt_schema_cache/src/lib.rs | 2 + crates/pgt_schema_cache/src/policies.rs | 2 +- .../pgt_schema_cache/src/queries/triggers.sql | 17 + crates/pgt_schema_cache/src/schema_cache.rs | 8 +- crates/pgt_schema_cache/src/schemas.rs | 2 +- crates/pgt_schema_cache/src/tables.rs | 2 +- crates/pgt_schema_cache/src/triggers.rs | 300 ++++++++++++++++++ crates/pgt_schema_cache/src/types.rs | 2 +- crates/pgt_schema_cache/src/versions.rs | 2 +- 14 files changed, 378 insertions(+), 9 deletions(-) create mode 100644 .sqlx/query-df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f.json create mode 100644 crates/pgt_schema_cache/src/queries/triggers.sql create mode 100644 crates/pgt_schema_cache/src/triggers.rs diff --git a/.sqlx/query-df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f.json b/.sqlx/query-df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f.json new file mode 100644 index 00000000..b6fd2fc8 --- /dev/null +++ b/.sqlx/query-df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f.json @@ -0,0 +1,44 @@ +{ + "db_name": "PostgreSQL", + "query": "-- we need to join tables from the pg_catalog since \"TRUNCATE\" triggers are \n-- not available in the information_schema.trigger table.\nselect \n t.tgname as \"name!\",\n c.relname as \"table_name!\",\n p.proname as \"proc_name!\",\n n.nspname as \"schema_name!\",\n t.tgtype as \"details_bitmask!\"\nfrom \n pg_catalog.pg_trigger t \n left join pg_catalog.pg_proc p on t.tgfoid = p.oid\n left join pg_catalog.pg_class c on t.tgrelid = c.oid\n left join pg_catalog.pg_namespace n on c.relnamespace = n.oid\nwhere \n -- triggers enforcing constraints (e.g. unique fields) should not be included.\n t.tgisinternal = false and \n t.tgconstraint = 0;\n", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name!", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "table_name!", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "proc_name!", + "type_info": "Name" + }, + { + "ordinal": 3, + "name": "schema_name!", + "type_info": "Name" + }, + { + "ordinal": 4, + "name": "details_bitmask!", + "type_info": "Int2" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true, + true, + true, + false + ] + }, + "hash": "df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f" +} diff --git a/Cargo.lock b/Cargo.lock index 55db1b6f..10f45b7c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2788,6 +2788,7 @@ dependencies = [ "serde", "serde_json", "sqlx", + "strum", "tokio", ] diff --git a/crates/pgt_schema_cache/Cargo.toml b/crates/pgt_schema_cache/Cargo.toml index 291f80ca..c5fadb3e 100644 --- a/crates/pgt_schema_cache/Cargo.toml +++ b/crates/pgt_schema_cache/Cargo.toml @@ -20,6 +20,7 @@ pgt_diagnostics.workspace = true serde.workspace = true serde_json.workspace = true sqlx.workspace = true +strum = { workspace = true } tokio.workspace = true [dev-dependencies] diff --git a/crates/pgt_schema_cache/src/columns.rs b/crates/pgt_schema_cache/src/columns.rs index 6e2e2adf..de7c2d4a 100644 --- a/crates/pgt_schema_cache/src/columns.rs +++ b/crates/pgt_schema_cache/src/columns.rs @@ -37,7 +37,7 @@ impl From for ColumnClassKind { } } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq)] pub struct Column { pub name: String, diff --git a/crates/pgt_schema_cache/src/functions.rs b/crates/pgt_schema_cache/src/functions.rs index 36db011d..5e40709f 100644 --- a/crates/pgt_schema_cache/src/functions.rs +++ b/crates/pgt_schema_cache/src/functions.rs @@ -58,7 +58,7 @@ impl From> for FunctionArgs { } } -#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[derive(Debug, Default, Serialize, Deserialize)] pub struct Function { /// The Id (`oid`). pub id: i64, diff --git a/crates/pgt_schema_cache/src/lib.rs b/crates/pgt_schema_cache/src/lib.rs index fc717fbe..d978a94b 100644 --- a/crates/pgt_schema_cache/src/lib.rs +++ b/crates/pgt_schema_cache/src/lib.rs @@ -8,6 +8,7 @@ mod policies; mod schema_cache; mod schemas; mod tables; +mod triggers; mod types; mod versions; @@ -16,3 +17,4 @@ pub use functions::{Behavior, Function, FunctionArg, FunctionArgs}; pub use schema_cache::SchemaCache; pub use schemas::Schema; pub use tables::{ReplicaIdentity, Table}; +pub use triggers::{Trigger, TriggerAffected, TriggerEvent}; diff --git a/crates/pgt_schema_cache/src/policies.rs b/crates/pgt_schema_cache/src/policies.rs index 46a3ab18..641dad12 100644 --- a/crates/pgt_schema_cache/src/policies.rs +++ b/crates/pgt_schema_cache/src/policies.rs @@ -54,7 +54,7 @@ impl From for Policy { } } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq)] pub struct Policy { name: String, table_name: String, diff --git a/crates/pgt_schema_cache/src/queries/triggers.sql b/crates/pgt_schema_cache/src/queries/triggers.sql new file mode 100644 index 00000000..c28cc39f --- /dev/null +++ b/crates/pgt_schema_cache/src/queries/triggers.sql @@ -0,0 +1,17 @@ +-- we need to join tables from the pg_catalog since "TRUNCATE" triggers are +-- not available in the information_schema.trigger table. +select + t.tgname as "name!", + c.relname as "table_name!", + p.proname as "proc_name!", + n.nspname as "schema_name!", + t.tgtype as "details_bitmask!" +from + pg_catalog.pg_trigger t + left join pg_catalog.pg_proc p on t.tgfoid = p.oid + left join pg_catalog.pg_class c on t.tgrelid = c.oid + left join pg_catalog.pg_namespace n on c.relnamespace = n.oid +where + -- triggers enforcing constraints (e.g. unique fields) should not be included. + t.tgisinternal = false and + t.tgconstraint = 0; diff --git a/crates/pgt_schema_cache/src/schema_cache.rs b/crates/pgt_schema_cache/src/schema_cache.rs index 8a5c1a93..b21d2baf 100644 --- a/crates/pgt_schema_cache/src/schema_cache.rs +++ b/crates/pgt_schema_cache/src/schema_cache.rs @@ -1,5 +1,6 @@ use sqlx::postgres::PgPool; +use crate::Trigger; use crate::columns::Column; use crate::functions::Function; use crate::policies::Policy; @@ -8,7 +9,7 @@ use crate::tables::Table; use crate::types::PostgresType; use crate::versions::Version; -#[derive(Debug, Clone, Default)] +#[derive(Debug, Default)] pub struct SchemaCache { pub schemas: Vec, pub tables: Vec, @@ -17,11 +18,12 @@ pub struct SchemaCache { pub versions: Vec, pub columns: Vec, pub policies: Vec, + pub triggers: Vec, } impl SchemaCache { pub async fn load(pool: &PgPool) -> Result { - let (schemas, tables, functions, types, versions, columns, policies) = futures_util::try_join!( + let (schemas, tables, functions, types, versions, columns, policies, triggers) = futures_util::try_join!( Schema::load(pool), Table::load(pool), Function::load(pool), @@ -29,6 +31,7 @@ impl SchemaCache { Version::load(pool), Column::load(pool), Policy::load(pool), + Trigger::load(pool), )?; Ok(SchemaCache { @@ -39,6 +42,7 @@ impl SchemaCache { versions, columns, policies, + triggers, }) } diff --git a/crates/pgt_schema_cache/src/schemas.rs b/crates/pgt_schema_cache/src/schemas.rs index 41747194..5a007e51 100644 --- a/crates/pgt_schema_cache/src/schemas.rs +++ b/crates/pgt_schema_cache/src/schemas.rs @@ -2,7 +2,7 @@ use sqlx::PgPool; use crate::schema_cache::SchemaCacheItem; -#[derive(Debug, Clone, Default)] +#[derive(Debug, Default)] pub struct Schema { pub id: i64, pub name: String, diff --git a/crates/pgt_schema_cache/src/tables.rs b/crates/pgt_schema_cache/src/tables.rs index ea889ca9..99061384 100644 --- a/crates/pgt_schema_cache/src/tables.rs +++ b/crates/pgt_schema_cache/src/tables.rs @@ -23,7 +23,7 @@ impl From for ReplicaIdentity { } } -#[derive(Debug, Clone, Default, PartialEq, Eq)] +#[derive(Debug, Default, PartialEq, Eq)] pub struct Table { pub id: i64, pub schema: String, diff --git a/crates/pgt_schema_cache/src/triggers.rs b/crates/pgt_schema_cache/src/triggers.rs new file mode 100644 index 00000000..0a5241d6 --- /dev/null +++ b/crates/pgt_schema_cache/src/triggers.rs @@ -0,0 +1,300 @@ +use crate::schema_cache::SchemaCacheItem; +use strum::{EnumIter, IntoEnumIterator}; + +#[derive(Debug, PartialEq, Eq)] +pub enum TriggerAffected { + Row, + Statement, +} + +impl From for TriggerAffected { + fn from(value: i16) -> Self { + let is_row = 0b0000_0001; + if value & is_row == is_row { + Self::Row + } else { + Self::Statement + } + } +} + +#[derive(Debug, PartialEq, Eq, EnumIter)] +pub enum TriggerEvent { + Insert, + Delete, + Update, + Truncate, +} + +struct TriggerEvents(Vec); + +impl From for TriggerEvents { + fn from(value: i16) -> Self { + Self( + TriggerEvent::iter() + .filter(|variant| { + #[rustfmt::skip] + let mask = match variant { + TriggerEvent::Insert => 0b0000_0100, + TriggerEvent::Delete => 0b0000_1000, + TriggerEvent::Update => 0b0001_0000, + TriggerEvent::Truncate => 0b0010_0000, + }; + mask & value == mask + }) + .collect(), + ) + } +} + +#[derive(Debug, PartialEq, Eq, EnumIter)] +pub enum TriggerTiming { + Before, + After, + Instead, +} + +impl TryFrom for TriggerTiming { + type Error = (); + fn try_from(value: i16) -> Result { + TriggerTiming::iter() + .find(|variant| { + match variant { + TriggerTiming::Instead => { + let mask = 0b0100_0000; + mask & value == mask + } + TriggerTiming::Before => { + let mask = 0b0000_0010; + mask & value == mask + } + TriggerTiming::After => { + let mask = 0b1011_1101; + // timing is "AFTER" if neither INSTEAD nor BEFORE bit are set. + mask | value == mask + } + } + }) + .ok_or(()) + } +} + +pub struct TriggerQueried { + name: String, + table_name: String, + schema_name: String, + proc_name: String, + details_bitmask: i16, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct Trigger { + name: String, + table_name: String, + schema_name: String, + proc_name: String, + affected: TriggerAffected, + timing: TriggerTiming, + events: Vec, +} + +impl From for Trigger { + fn from(value: TriggerQueried) -> Self { + Self { + name: value.name, + table_name: value.table_name, + proc_name: value.proc_name, + schema_name: value.schema_name, + affected: value.details_bitmask.into(), + timing: value.details_bitmask.try_into().unwrap(), + events: TriggerEvents::from(value.details_bitmask).0, + } + } +} + +impl SchemaCacheItem for Trigger { + type Item = Trigger; + + async fn load(pool: &sqlx::PgPool) -> Result, sqlx::Error> { + let results = sqlx::query_file_as!(TriggerQueried, "src/queries/triggers.sql") + .fetch_all(pool) + .await?; + + Ok(results.into_iter().map(|r| r.into()).collect()) + } +} + +#[cfg(test)] +mod tests { + use pgt_test_utils::test_database::get_new_test_db; + use sqlx::Executor; + + use crate::{ + SchemaCache, + triggers::{TriggerAffected, TriggerEvent, TriggerTiming}, + }; + + #[tokio::test] + async fn loads_triggers() { + let test_db = get_new_test_db().await; + + let setup = r#" + create table public.users ( + id serial primary key, + name text + ); + + create or replace function public.log_user_insert() + returns trigger as $$ + begin + -- dummy body + return new; + end; + $$ language plpgsql; + + create trigger trg_users_insert + before insert on public.users + for each row + execute function public.log_user_insert(); + + create trigger trg_users_update + after update or insert on public.users + for each statement + execute function public.log_user_insert(); + + create trigger trg_users_delete + before delete on public.users + for each row + execute function public.log_user_insert(); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + let cache = SchemaCache::load(&test_db) + .await + .expect("Failed to load Schema Cache"); + + let triggers: Vec<_> = cache + .triggers + .iter() + .filter(|t| t.table_name == "users") + .collect(); + assert_eq!(triggers.len(), 3); + + let insert_trigger = triggers + .iter() + .find(|t| t.name == "trg_users_insert") + .unwrap(); + assert_eq!(insert_trigger.schema_name, "public"); + assert_eq!(insert_trigger.table_name, "users"); + assert_eq!(insert_trigger.timing, TriggerTiming::Before); + assert_eq!(insert_trigger.affected, TriggerAffected::Row); + assert!(insert_trigger.events.contains(&TriggerEvent::Insert)); + assert_eq!(insert_trigger.proc_name, "log_user_insert"); + + let update_trigger = triggers + .iter() + .find(|t| t.name == "trg_users_update") + .unwrap(); + assert_eq!(insert_trigger.schema_name, "public"); + assert_eq!(insert_trigger.table_name, "users"); + assert_eq!(update_trigger.timing, TriggerTiming::After); + assert_eq!(update_trigger.affected, TriggerAffected::Statement); + assert!(update_trigger.events.contains(&TriggerEvent::Update)); + assert!(update_trigger.events.contains(&TriggerEvent::Insert)); + assert_eq!(update_trigger.proc_name, "log_user_insert"); + + let delete_trigger = triggers + .iter() + .find(|t| t.name == "trg_users_delete") + .unwrap(); + assert_eq!(insert_trigger.schema_name, "public"); + assert_eq!(insert_trigger.table_name, "users"); + assert_eq!(delete_trigger.timing, TriggerTiming::Before); + assert_eq!(delete_trigger.affected, TriggerAffected::Row); + assert!(delete_trigger.events.contains(&TriggerEvent::Delete)); + assert_eq!(delete_trigger.proc_name, "log_user_insert"); + } + + #[tokio::test] + async fn loads_instead_and_truncate_triggers() { + let test_db = get_new_test_db().await; + + let setup = r#" + create table public.docs ( + id serial primary key, + content text + ); + + create view public.docs_view as + select * from public.docs; + + create or replace function public.docs_instead_of_update() + returns trigger as $$ + begin + -- dummy body + return new; + end; + $$ language plpgsql; + + create trigger trg_docs_instead_update + instead of update on public.docs_view + for each row + execute function public.docs_instead_of_update(); + + create or replace function public.docs_truncate() + returns trigger as $$ + begin + -- dummy body + return null; + end; + $$ language plpgsql; + + create trigger trg_docs_truncate + after truncate on public.docs + for each statement + execute function public.docs_truncate(); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + let cache = SchemaCache::load(&test_db) + .await + .expect("Failed to load Schema Cache"); + + let triggers: Vec<_> = cache + .triggers + .iter() + .filter(|t| t.table_name == "docs" || t.table_name == "docs_view") + .collect(); + assert_eq!(triggers.len(), 2); + + let instead_trigger = triggers + .iter() + .find(|t| t.name == "trg_docs_instead_update") + .unwrap(); + assert_eq!(instead_trigger.schema_name, "public"); + assert_eq!(instead_trigger.table_name, "docs_view"); + assert_eq!(instead_trigger.timing, TriggerTiming::Instead); + assert_eq!(instead_trigger.affected, TriggerAffected::Row); + assert!(instead_trigger.events.contains(&TriggerEvent::Update)); + assert_eq!(instead_trigger.proc_name, "docs_instead_of_update"); + + let truncate_trigger = triggers + .iter() + .find(|t| t.name == "trg_docs_truncate") + .unwrap(); + assert_eq!(truncate_trigger.schema_name, "public"); + assert_eq!(truncate_trigger.table_name, "docs"); + assert_eq!(truncate_trigger.timing, TriggerTiming::After); + assert_eq!(truncate_trigger.affected, TriggerAffected::Statement); + assert!(truncate_trigger.events.contains(&TriggerEvent::Truncate)); + assert_eq!(truncate_trigger.proc_name, "docs_truncate"); + } +} diff --git a/crates/pgt_schema_cache/src/types.rs b/crates/pgt_schema_cache/src/types.rs index 8b2d04bb..8df6b0cb 100644 --- a/crates/pgt_schema_cache/src/types.rs +++ b/crates/pgt_schema_cache/src/types.rs @@ -36,7 +36,7 @@ impl From> for Enums { } } -#[derive(Debug, Clone, Default)] +#[derive(Debug, Default)] pub struct PostgresType { pub id: i64, pub name: String, diff --git a/crates/pgt_schema_cache/src/versions.rs b/crates/pgt_schema_cache/src/versions.rs index cf2a140f..a4769c55 100644 --- a/crates/pgt_schema_cache/src/versions.rs +++ b/crates/pgt_schema_cache/src/versions.rs @@ -2,7 +2,7 @@ use sqlx::PgPool; use crate::schema_cache::SchemaCacheItem; -#[derive(Debug, Clone, Default)] +#[derive(Debug, Default)] pub struct Version { pub version: Option, pub version_num: Option, From 400715f21226562a24d406bfc484220295d2b4a8 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Thu, 22 May 2025 09:08:13 +0200 Subject: [PATCH 055/114] feat(completions): complete policies (#397) --- .github/workflows/pull_request.yml | 6 + crates/pgt_completions/src/complete.rs | 5 +- .../src/{context.rs => context/mod.rs} | 201 ++++-- .../src/context/policy_parser.rs | 617 ++++++++++++++++++ crates/pgt_completions/src/item.rs | 2 + .../pgt_completions/src/providers/helper.rs | 24 +- crates/pgt_completions/src/providers/mod.rs | 2 + .../pgt_completions/src/providers/policies.rs | 103 +++ crates/pgt_completions/src/relevance.rs | 1 + .../src/relevance/filtering.rs | 56 +- .../pgt_completions/src/relevance/scoring.rs | 24 +- crates/pgt_completions/src/sanitization.rs | 107 ++- crates/pgt_lsp/src/adapters/mod.rs | 21 + crates/pgt_lsp/src/handlers/completions.rs | 1 + crates/pgt_schema_cache/src/lib.rs | 1 + crates/pgt_schema_cache/src/policies.rs | 16 +- crates/pgt_text_size/src/range.rs | 33 + crates/pgt_workspace/src/workspace/server.rs | 13 +- 18 files changed, 1086 insertions(+), 147 deletions(-) rename crates/pgt_completions/src/{context.rs => context/mod.rs} (77%) create mode 100644 crates/pgt_completions/src/context/policy_parser.rs create mode 100644 crates/pgt_completions/src/providers/policies.rs diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 4600ac92..f79392b7 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -184,6 +184,8 @@ jobs: uses: ./.github/actions/free-disk-space - name: Install toolchain uses: moonrepo/setup-rust@v1 + with: + cache-base: main - name: Build main binary run: cargo build -p pgt_cli --release - name: Setup Bun @@ -222,6 +224,10 @@ jobs: cache-base: main env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Ensure RustFMT on nightly toolchain + run: rustup component add rustfmt --toolchain nightly + - name: echo toolchain + run: rustup show - name: Run the analyser codegen run: cargo run -p xtask_codegen -- analyser - name: Run the configuration codegen diff --git a/crates/pgt_completions/src/complete.rs b/crates/pgt_completions/src/complete.rs index 442ee546..5bc5d41c 100644 --- a/crates/pgt_completions/src/complete.rs +++ b/crates/pgt_completions/src/complete.rs @@ -4,7 +4,9 @@ use crate::{ builder::CompletionBuilder, context::CompletionContext, item::CompletionItem, - providers::{complete_columns, complete_functions, complete_schemas, complete_tables}, + providers::{ + complete_columns, complete_functions, complete_policies, complete_schemas, complete_tables, + }, sanitization::SanitizedCompletionParams, }; @@ -33,6 +35,7 @@ pub fn complete(params: CompletionParams) -> Vec { complete_functions(&ctx, &mut builder); complete_columns(&ctx, &mut builder); complete_schemas(&ctx, &mut builder); + complete_policies(&ctx, &mut builder); builder.finish() } diff --git a/crates/pgt_completions/src/context.rs b/crates/pgt_completions/src/context/mod.rs similarity index 77% rename from crates/pgt_completions/src/context.rs rename to crates/pgt_completions/src/context/mod.rs index a17cafa2..23a6fcae 100644 --- a/crates/pgt_completions/src/context.rs +++ b/crates/pgt_completions/src/context/mod.rs @@ -1,12 +1,19 @@ +mod policy_parser; + use std::collections::{HashMap, HashSet}; use pgt_schema_cache::SchemaCache; +use pgt_text_size::TextRange; use pgt_treesitter_queries::{ TreeSitterQueriesExecutor, queries::{self, QueryResult}, }; -use crate::sanitization::SanitizedCompletionParams; +use crate::{ + NodeText, + context::policy_parser::{PolicyParser, PolicyStmtKind}, + sanitization::SanitizedCompletionParams, +}; #[derive(Debug, PartialEq, Eq, Hash)] pub enum WrappingClause<'a> { @@ -18,12 +25,8 @@ pub enum WrappingClause<'a> { }, Update, Delete, -} - -#[derive(PartialEq, Eq, Debug)] -pub(crate) enum NodeText<'a> { - Replaced, - Original(&'a str), + PolicyName, + ToRoleAssignment, } #[derive(PartialEq, Eq, Hash, Debug)] @@ -47,6 +50,45 @@ pub enum WrappingNode { Assignment, } +#[derive(Debug)] +pub(crate) enum NodeUnderCursor<'a> { + TsNode(tree_sitter::Node<'a>), + CustomNode { + text: NodeText, + range: TextRange, + kind: String, + }, +} + +impl NodeUnderCursor<'_> { + pub fn start_byte(&self) -> usize { + match self { + NodeUnderCursor::TsNode(node) => node.start_byte(), + NodeUnderCursor::CustomNode { range, .. } => range.start().into(), + } + } + + pub fn end_byte(&self) -> usize { + match self { + NodeUnderCursor::TsNode(node) => node.end_byte(), + NodeUnderCursor::CustomNode { range, .. } => range.end().into(), + } + } + + pub fn kind(&self) -> &str { + match self { + NodeUnderCursor::TsNode(node) => node.kind(), + NodeUnderCursor::CustomNode { kind, .. } => kind.as_str(), + } + } +} + +impl<'a> From> for NodeUnderCursor<'a> { + fn from(node: tree_sitter::Node<'a>) -> Self { + NodeUnderCursor::TsNode(node) + } +} + impl TryFrom<&str> for WrappingNode { type Error = String; @@ -77,7 +119,7 @@ impl TryFrom for WrappingNode { } pub(crate) struct CompletionContext<'a> { - pub node_under_cursor: Option>, + pub node_under_cursor: Option>, pub tree: &'a tree_sitter::Tree, pub text: &'a str, @@ -137,12 +179,49 @@ impl<'a> CompletionContext<'a> { is_in_error_node: false, }; - ctx.gather_tree_context(); - ctx.gather_info_from_ts_queries(); + // policy handling is important to Supabase, but they are a PostgreSQL specific extension, + // so the tree_sitter_sql language does not support it. + // We infer the context manually. + if PolicyParser::looks_like_policy_stmt(¶ms.text) { + ctx.gather_policy_context(); + } else { + ctx.gather_tree_context(); + ctx.gather_info_from_ts_queries(); + } ctx } + fn gather_policy_context(&mut self) { + let policy_context = PolicyParser::get_context(self.text, self.position); + + self.node_under_cursor = Some(NodeUnderCursor::CustomNode { + text: policy_context.node_text.into(), + range: policy_context.node_range, + kind: policy_context.node_kind.clone(), + }); + + if policy_context.node_kind == "policy_table" { + self.schema_or_alias_name = policy_context.schema_name.clone(); + } + + if policy_context.table_name.is_some() { + let mut new = HashSet::new(); + new.insert(policy_context.table_name.unwrap()); + self.mentioned_relations + .insert(policy_context.schema_name, new); + } + + self.wrapping_clause_type = match policy_context.node_kind.as_str() { + "policy_name" if policy_context.statement_kind != PolicyStmtKind::Create => { + Some(WrappingClause::PolicyName) + } + "policy_role" => Some(WrappingClause::ToRoleAssignment), + "policy_table" => Some(WrappingClause::From), + _ => None, + }; + } + fn gather_info_from_ts_queries(&mut self) { let stmt_range = self.wrapping_statement_range.as_ref(); let sql = self.text; @@ -195,24 +274,30 @@ impl<'a> CompletionContext<'a> { } } - pub fn get_ts_node_content(&self, ts_node: tree_sitter::Node<'a>) -> Option> { + fn get_ts_node_content(&self, ts_node: &tree_sitter::Node<'a>) -> Option { let source = self.text; ts_node.utf8_text(source.as_bytes()).ok().map(|txt| { if SanitizedCompletionParams::is_sanitized_token(txt) { NodeText::Replaced } else { - NodeText::Original(txt) + NodeText::Original(txt.into()) } }) } pub fn get_node_under_cursor_content(&self) -> Option { - self.node_under_cursor - .and_then(|n| self.get_ts_node_content(n)) - .and_then(|txt| match txt { + match self.node_under_cursor.as_ref()? { + NodeUnderCursor::TsNode(node) => { + self.get_ts_node_content(node).and_then(|nt| match nt { + NodeText::Replaced => None, + NodeText::Original(c) => Some(c.to_string()), + }) + } + NodeUnderCursor::CustomNode { text, .. } => match text { NodeText::Replaced => None, NodeText::Original(c) => Some(c.to_string()), - }) + }, + } } fn gather_tree_context(&mut self) { @@ -250,7 +335,7 @@ impl<'a> CompletionContext<'a> { // prevent infinite recursion – this can happen if we only have a PROGRAM node if current_node_kind == parent_node_kind { - self.node_under_cursor = Some(current_node); + self.node_under_cursor = Some(NodeUnderCursor::from(current_node)); return; } @@ -289,7 +374,7 @@ impl<'a> CompletionContext<'a> { match current_node_kind { "object_reference" | "field" => { - let content = self.get_ts_node_content(current_node); + let content = self.get_ts_node_content(¤t_node); if let Some(node_txt) = content { match node_txt { NodeText::Original(txt) => { @@ -321,7 +406,7 @@ impl<'a> CompletionContext<'a> { // We have arrived at the leaf node if current_node.child_count() == 0 { - self.node_under_cursor = Some(current_node); + self.node_under_cursor = Some(NodeUnderCursor::from(current_node)); return; } @@ -334,11 +419,11 @@ impl<'a> CompletionContext<'a> { node: tree_sitter::Node<'a>, ) -> Option> { if node.kind().starts_with("keyword_") { - if let Some(txt) = self.get_ts_node_content(node).and_then(|txt| match txt { + if let Some(txt) = self.get_ts_node_content(&node).and_then(|txt| match txt { NodeText::Original(txt) => Some(txt), NodeText::Replaced => None, }) { - match txt { + match txt.as_str() { "where" => return Some(WrappingClause::Where), "update" => return Some(WrappingClause::Update), "select" => return Some(WrappingClause::Select), @@ -388,11 +473,14 @@ impl<'a> CompletionContext<'a> { #[cfg(test)] mod tests { use crate::{ - context::{CompletionContext, NodeText, WrappingClause}, + NodeText, + context::{CompletionContext, WrappingClause}, sanitization::SanitizedCompletionParams, test_helper::{CURSOR_POS, get_text_and_position}, }; + use super::NodeUnderCursor; + fn get_tree(input: &str) -> tree_sitter::Tree { let mut parser = tree_sitter::Parser::new(); parser @@ -551,17 +639,22 @@ mod tests { let ctx = CompletionContext::new(¶ms); - let node = ctx.node_under_cursor.unwrap(); + let node = ctx.node_under_cursor.as_ref().unwrap(); - assert_eq!( - ctx.get_ts_node_content(node), - Some(NodeText::Original("select")) - ); + match node { + NodeUnderCursor::TsNode(node) => { + assert_eq!( + ctx.get_ts_node_content(node), + Some(NodeText::Original("select".into())) + ); - assert_eq!( - ctx.wrapping_clause_type, - Some(crate::context::WrappingClause::Select) - ); + assert_eq!( + ctx.wrapping_clause_type, + Some(crate::context::WrappingClause::Select) + ); + } + _ => unreachable!(), + } } } @@ -582,12 +675,17 @@ mod tests { let ctx = CompletionContext::new(¶ms); - let node = ctx.node_under_cursor.unwrap(); + let node = ctx.node_under_cursor.as_ref().unwrap(); - assert_eq!( - ctx.get_ts_node_content(node), - Some(NodeText::Original("from")) - ); + match node { + NodeUnderCursor::TsNode(node) => { + assert_eq!( + ctx.get_ts_node_content(node), + Some(NodeText::Original("from".into())) + ); + } + _ => unreachable!(), + } } #[test] @@ -607,10 +705,18 @@ mod tests { let ctx = CompletionContext::new(¶ms); - let node = ctx.node_under_cursor.unwrap(); + let node = ctx.node_under_cursor.as_ref().unwrap(); - assert_eq!(ctx.get_ts_node_content(node), Some(NodeText::Original(""))); - assert_eq!(ctx.wrapping_clause_type, None); + match node { + NodeUnderCursor::TsNode(node) => { + assert_eq!( + ctx.get_ts_node_content(node), + Some(NodeText::Original("".into())) + ); + assert_eq!(ctx.wrapping_clause_type, None); + } + _ => unreachable!(), + } } #[test] @@ -632,12 +738,17 @@ mod tests { let ctx = CompletionContext::new(¶ms); - let node = ctx.node_under_cursor.unwrap(); + let node = ctx.node_under_cursor.as_ref().unwrap(); - assert_eq!( - ctx.get_ts_node_content(node), - Some(NodeText::Original("fro")) - ); - assert_eq!(ctx.wrapping_clause_type, Some(WrappingClause::Select)); + match node { + NodeUnderCursor::TsNode(node) => { + assert_eq!( + ctx.get_ts_node_content(node), + Some(NodeText::Original("fro".into())) + ); + assert_eq!(ctx.wrapping_clause_type, Some(WrappingClause::Select)); + } + _ => unreachable!(), + } } } diff --git a/crates/pgt_completions/src/context/policy_parser.rs b/crates/pgt_completions/src/context/policy_parser.rs new file mode 100644 index 00000000..db37a13f --- /dev/null +++ b/crates/pgt_completions/src/context/policy_parser.rs @@ -0,0 +1,617 @@ +use std::iter::Peekable; + +use pgt_text_size::{TextRange, TextSize}; + +#[derive(Default, Debug, PartialEq, Eq)] +pub(crate) enum PolicyStmtKind { + #[default] + Create, + + Alter, + Drop, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +struct WordWithIndex { + word: String, + start: usize, + end: usize, +} + +impl WordWithIndex { + fn is_under_cursor(&self, cursor_pos: usize) -> bool { + self.start <= cursor_pos && self.end > cursor_pos + } + + fn get_range(&self) -> TextRange { + let start: u32 = self.start.try_into().expect("Text too long"); + let end: u32 = self.end.try_into().expect("Text too long"); + TextRange::new(TextSize::from(start), TextSize::from(end)) + } +} + +/// Note: A policy name within quotation marks will be considered a single word. +fn sql_to_words(sql: &str) -> Result, String> { + let mut words = vec![]; + + let mut start_of_word: Option = None; + let mut current_word = String::new(); + let mut in_quotation_marks = false; + + for (current_position, current_char) in sql.char_indices() { + if (current_char.is_ascii_whitespace() || current_char == ';') + && !current_word.is_empty() + && start_of_word.is_some() + && !in_quotation_marks + { + words.push(WordWithIndex { + word: current_word, + start: start_of_word.unwrap(), + end: current_position, + }); + + current_word = String::new(); + start_of_word = None; + } else if (current_char.is_ascii_whitespace() || current_char == ';') + && current_word.is_empty() + { + // do nothing + } else if current_char == '"' && start_of_word.is_none() { + in_quotation_marks = true; + current_word.push(current_char); + start_of_word = Some(current_position); + } else if current_char == '"' && start_of_word.is_some() { + current_word.push(current_char); + words.push(WordWithIndex { + word: current_word, + start: start_of_word.unwrap(), + end: current_position + 1, + }); + in_quotation_marks = false; + start_of_word = None; + current_word = String::new() + } else if start_of_word.is_some() { + current_word.push(current_char) + } else { + start_of_word = Some(current_position); + current_word.push(current_char); + } + } + + if let Some(start_of_word) = start_of_word { + if !current_word.is_empty() { + words.push(WordWithIndex { + word: current_word, + start: start_of_word, + end: sql.len(), + }); + } + } + + if in_quotation_marks { + Err("String was not closed properly.".into()) + } else { + Ok(words) + } +} + +#[derive(Default, Debug, PartialEq, Eq)] +pub(crate) struct PolicyContext { + pub policy_name: Option, + pub table_name: Option, + pub schema_name: Option, + pub statement_kind: PolicyStmtKind, + pub node_text: String, + pub node_range: TextRange, + pub node_kind: String, +} + +/// Simple parser that'll turn a policy-related statement into a context object required for +/// completions. +/// The parser will only work if the (trimmed) sql starts with `create policy`, `drop policy`, or `alter policy`. +/// It can only parse policy statements. +pub(crate) struct PolicyParser { + tokens: Peekable>, + previous_token: Option, + current_token: Option, + context: PolicyContext, + cursor_position: usize, +} + +impl PolicyParser { + pub(crate) fn looks_like_policy_stmt(sql: &str) -> bool { + let lowercased = sql.to_ascii_lowercase(); + let trimmed = lowercased.trim(); + trimmed.starts_with("create policy") + || trimmed.starts_with("drop policy") + || trimmed.starts_with("alter policy") + } + + pub(crate) fn get_context(sql: &str, cursor_position: usize) -> PolicyContext { + assert!( + Self::looks_like_policy_stmt(sql), + "PolicyParser should only be used for policy statements. Developer error!" + ); + + match sql_to_words(sql) { + Ok(tokens) => { + let parser = PolicyParser { + tokens: tokens.into_iter().peekable(), + context: PolicyContext::default(), + previous_token: None, + current_token: None, + cursor_position, + }; + + parser.parse() + } + Err(_) => PolicyContext::default(), + } + } + + fn parse(mut self) -> PolicyContext { + while let Some(token) = self.advance() { + if token.is_under_cursor(self.cursor_position) { + self.handle_token_under_cursor(token); + } else { + self.handle_token(token); + } + } + + self.context + } + + fn handle_token_under_cursor(&mut self, token: WordWithIndex) { + if self.previous_token.is_none() { + return; + } + + let previous = self.previous_token.take().unwrap(); + + match previous.word.to_ascii_lowercase().as_str() { + "policy" => { + self.context.node_range = token.get_range(); + self.context.node_kind = "policy_name".into(); + self.context.node_text = token.word; + } + "on" => { + if token.word.contains('.') { + let (schema_name, table_name) = self.schema_and_table_name(&token); + + let schema_name_len = schema_name.len(); + self.context.schema_name = Some(schema_name); + + let offset: u32 = schema_name_len.try_into().expect("Text too long"); + let range_without_schema = token + .get_range() + .checked_expand_start( + TextSize::new(offset + 1), // kill the dot as well + ) + .expect("Text too long"); + + self.context.node_range = range_without_schema; + self.context.node_kind = "policy_table".into(); + + // In practice, we should always have a table name. + // The completion sanitization will add a word after a `.` if nothing follows it; + // the token_text will then look like `schema.REPLACED_TOKEN`. + self.context.node_text = table_name.unwrap_or_default(); + } else { + self.context.node_range = token.get_range(); + self.context.node_text = token.word; + self.context.node_kind = "policy_table".into(); + } + } + "to" => { + self.context.node_range = token.get_range(); + self.context.node_kind = "policy_role".into(); + self.context.node_text = token.word; + } + _ => { + self.context.node_range = token.get_range(); + self.context.node_text = token.word; + } + } + } + + fn handle_token(&mut self, token: WordWithIndex) { + match token.word.to_ascii_lowercase().as_str() { + "create" if self.next_matches("policy") => { + self.context.statement_kind = PolicyStmtKind::Create; + } + "alter" if self.next_matches("policy") => { + self.context.statement_kind = PolicyStmtKind::Alter; + } + "drop" if self.next_matches("policy") => { + self.context.statement_kind = PolicyStmtKind::Drop; + } + "on" => self.table_with_schema(), + + // skip the "to" so we don't parse it as the TO rolename when it's under the cursor + "rename" if self.next_matches("to") => { + self.advance(); + } + + _ => { + if self.prev_matches("policy") { + self.context.policy_name = Some(token.word); + } + } + } + } + + fn next_matches(&mut self, it: &str) -> bool { + self.tokens.peek().is_some_and(|c| c.word.as_str() == it) + } + + fn prev_matches(&self, it: &str) -> bool { + self.previous_token.as_ref().is_some_and(|t| t.word == it) + } + + fn advance(&mut self) -> Option { + // we can't peek back n an iterator, so we'll have to keep track manually. + self.previous_token = self.current_token.take(); + self.current_token = self.tokens.next(); + self.current_token.clone() + } + + fn table_with_schema(&mut self) { + if let Some(token) = self.advance() { + if token.is_under_cursor(self.cursor_position) { + self.handle_token_under_cursor(token); + } else if token.word.contains('.') { + let (schema, maybe_table) = self.schema_and_table_name(&token); + self.context.schema_name = Some(schema); + self.context.table_name = maybe_table; + } else { + self.context.table_name = Some(token.word); + } + }; + } + + fn schema_and_table_name(&self, token: &WordWithIndex) -> (String, Option) { + let mut parts = token.word.split('.'); + + ( + parts.next().unwrap().into(), + parts.next().map(|tb| tb.into()), + ) + } +} + +#[cfg(test)] +mod tests { + use pgt_text_size::{TextRange, TextSize}; + + use crate::{ + context::policy_parser::{PolicyContext, PolicyStmtKind, WordWithIndex}, + test_helper::CURSOR_POS, + }; + + use super::{PolicyParser, sql_to_words}; + + fn with_pos(query: String) -> (usize, String) { + let mut pos: Option = None; + + for (p, c) in query.char_indices() { + if c == CURSOR_POS { + pos = Some(p); + break; + } + } + + ( + pos.expect("Please add cursor position!"), + query.replace(CURSOR_POS, "REPLACED_TOKEN").to_string(), + ) + } + + #[test] + fn infers_progressively() { + let (pos, query) = with_pos(format!( + r#" + create policy {} + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: None, + table_name: None, + schema_name: None, + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(25), TextSize::new(39)), + node_kind: "policy_name".into() + } + ); + + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" {} + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some("\"my cool policy\"".into()), + table_name: None, + schema_name: None, + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_kind: "".into(), + node_range: TextRange::new(TextSize::new(42), TextSize::new(56)), + } + ); + + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" on {} + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some("\"my cool policy\"".into()), + table_name: None, + schema_name: None, + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_kind: "policy_table".into(), + node_range: TextRange::new(TextSize::new(45), TextSize::new(59)), + } + ); + + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" on auth.{} + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some("\"my cool policy\"".into()), + table_name: None, + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_kind: "policy_table".into(), + node_range: TextRange::new(TextSize::new(50), TextSize::new(64)), + } + ); + + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" on auth.users + as {} + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some("\"my cool policy\"".into()), + table_name: Some("users".into()), + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_kind: "".into(), + node_range: TextRange::new(TextSize::new(72), TextSize::new(86)), + } + ); + + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" on auth.users + as permissive + {} + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some("\"my cool policy\"".into()), + table_name: Some("users".into()), + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_kind: "".into(), + node_range: TextRange::new(TextSize::new(95), TextSize::new(109)), + } + ); + + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" on auth.users + as permissive + to {} + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some("\"my cool policy\"".into()), + table_name: Some("users".into()), + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_kind: "policy_role".into(), + node_range: TextRange::new(TextSize::new(98), TextSize::new(112)), + } + ); + } + + #[test] + fn determines_on_table_node() { + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" + on {} + to all + using (true); + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some(r#""my cool policy""#.into()), + table_name: None, + schema_name: None, + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(57), TextSize::new(71)), + node_kind: "policy_table".into() + } + ) + } + + #[test] + fn determines_on_table_node_after_schema() { + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" + on auth.{} + to all + using (true); + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some(r#""my cool policy""#.into()), + table_name: None, + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(62), TextSize::new(76)), + node_kind: "policy_table".into() + } + ) + } + + #[test] + fn determines_we_are_on_column_name() { + let (pos, query) = with_pos(format!( + r#" + drop policy {} on auth.users; + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: None, + table_name: Some("users".into()), + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Drop, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(23), TextSize::new(37)), + node_kind: "policy_name".into() + } + ); + + // cursor within quotation marks. + let (pos, query) = with_pos(format!( + r#" + drop policy "{}" on auth.users; + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: None, + table_name: Some("users".into()), + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Drop, + node_text: "\"REPLACED_TOKEN\"".into(), + node_range: TextRange::new(TextSize::new(23), TextSize::new(39)), + node_kind: "policy_name".into() + } + ); + } + + #[test] + fn single_quotation_mark_does_not_fail() { + let (pos, query) = with_pos(format!( + r#" + drop policy "{} on auth.users; + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!(context, PolicyContext::default()); + } + + fn to_word(word: &str, start: usize, end: usize) -> WordWithIndex { + WordWithIndex { + word: word.into(), + start, + end, + } + } + + #[test] + fn determines_positions_correctly() { + let query = "\ncreate policy \"my cool pol\"\n\ton auth.users\n\tas permissive\n\tfor select\n\t\tto public\n\t\tusing (true);".to_string(); + + let words = sql_to_words(query.as_str()).unwrap(); + + assert_eq!(words[0], to_word("create", 1, 7)); + assert_eq!(words[1], to_word("policy", 8, 14)); + assert_eq!(words[2], to_word("\"my cool pol\"", 15, 28)); + assert_eq!(words[3], to_word("on", 30, 32)); + assert_eq!(words[4], to_word("auth.users", 33, 43)); + assert_eq!(words[5], to_word("as", 45, 47)); + assert_eq!(words[6], to_word("permissive", 48, 58)); + assert_eq!(words[7], to_word("for", 60, 63)); + assert_eq!(words[8], to_word("select", 64, 70)); + assert_eq!(words[9], to_word("to", 73, 75)); + assert_eq!(words[10], to_word("public", 78, 84)); + assert_eq!(words[11], to_word("using", 87, 92)); + assert_eq!(words[12], to_word("(true)", 93, 99)); + } +} diff --git a/crates/pgt_completions/src/item.rs b/crates/pgt_completions/src/item.rs index f37d0efb..702fc766 100644 --- a/crates/pgt_completions/src/item.rs +++ b/crates/pgt_completions/src/item.rs @@ -11,6 +11,7 @@ pub enum CompletionItemKind { Function, Column, Schema, + Policy, } impl Display for CompletionItemKind { @@ -20,6 +21,7 @@ impl Display for CompletionItemKind { CompletionItemKind::Function => "Function", CompletionItemKind::Column => "Column", CompletionItemKind::Schema => "Schema", + CompletionItemKind::Policy => "Policy", }; write!(f, "{txt}") diff --git a/crates/pgt_completions/src/providers/helper.rs b/crates/pgt_completions/src/providers/helper.rs index 999d6b37..eacb8314 100644 --- a/crates/pgt_completions/src/providers/helper.rs +++ b/crates/pgt_completions/src/providers/helper.rs @@ -1,6 +1,6 @@ use pgt_text_size::{TextRange, TextSize}; -use crate::{CompletionText, context::CompletionContext}; +use crate::{CompletionText, context::CompletionContext, remove_sanitized_token}; pub(crate) fn find_matching_alias_for_table( ctx: &CompletionContext, @@ -14,6 +14,21 @@ pub(crate) fn find_matching_alias_for_table( None } +pub(crate) fn get_range_to_replace(ctx: &CompletionContext) -> TextRange { + match ctx.node_under_cursor.as_ref() { + Some(node) => { + let content = ctx.get_node_under_cursor_content().unwrap_or("".into()); + let length = remove_sanitized_token(content.as_str()).len(); + + let start = node.start_byte(); + let end = start + length; + + TextRange::new(start.try_into().unwrap(), end.try_into().unwrap()) + } + None => TextRange::empty(TextSize::new(0)), + } +} + pub(crate) fn get_completion_text_with_schema_or_alias( ctx: &CompletionContext, item_name: &str, @@ -22,12 +37,7 @@ pub(crate) fn get_completion_text_with_schema_or_alias( if schema_or_alias_name == "public" || ctx.schema_or_alias_name.is_some() { None } else { - let node = ctx.node_under_cursor.unwrap(); - - let range = TextRange::new( - TextSize::try_from(node.start_byte()).unwrap(), - TextSize::try_from(node.end_byte()).unwrap(), - ); + let range = get_range_to_replace(ctx); Some(CompletionText { text: format!("{}.{}", schema_or_alias_name, item_name), diff --git a/crates/pgt_completions/src/providers/mod.rs b/crates/pgt_completions/src/providers/mod.rs index 82e32cdf..7b07cee8 100644 --- a/crates/pgt_completions/src/providers/mod.rs +++ b/crates/pgt_completions/src/providers/mod.rs @@ -1,10 +1,12 @@ mod columns; mod functions; mod helper; +mod policies; mod schemas; mod tables; pub use columns::*; pub use functions::*; +pub use policies::*; pub use schemas::*; pub use tables::*; diff --git a/crates/pgt_completions/src/providers/policies.rs b/crates/pgt_completions/src/providers/policies.rs new file mode 100644 index 00000000..2421f1f1 --- /dev/null +++ b/crates/pgt_completions/src/providers/policies.rs @@ -0,0 +1,103 @@ +use pgt_text_size::{TextRange, TextSize}; + +use crate::{ + CompletionItemKind, CompletionText, + builder::{CompletionBuilder, PossibleCompletionItem}, + context::CompletionContext, + relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, +}; + +use super::helper::get_range_to_replace; + +pub fn complete_policies<'a>(ctx: &CompletionContext<'a>, builder: &mut CompletionBuilder<'a>) { + let available_policies = &ctx.schema_cache.policies; + + let surrounded_by_quotes = ctx + .get_node_under_cursor_content() + .is_some_and(|c| c.starts_with('"') && c.ends_with('"') && c != "\"\""); + + for pol in available_policies { + let completion_text = if surrounded_by_quotes { + // If we're within quotes, we want to change the content + // *within* the quotes. + // If we attempt to replace outside the quotes, the VSCode + // client won't show the suggestions. + let range = get_range_to_replace(ctx); + Some(CompletionText { + text: pol.name.clone(), + range: TextRange::new( + range.start() + TextSize::new(1), + range.end() - TextSize::new(1), + ), + }) + } else { + // If we aren't within quotes, we want to complete the + // full policy including quotation marks. + Some(CompletionText { + text: format!("\"{}\"", pol.name), + range: get_range_to_replace(ctx), + }) + }; + + let relevance = CompletionRelevanceData::Policy(pol); + + let item = PossibleCompletionItem { + label: pol.name.chars().take(35).collect::(), + score: CompletionScore::from(relevance.clone()), + filter: CompletionFilter::from(relevance), + description: pol.table_name.to_string(), + kind: CompletionItemKind::Policy, + completion_text, + }; + + builder.add_item(item); + } +} + +#[cfg(test)] +mod tests { + use crate::test_helper::{CURSOR_POS, CompletionAssertion, assert_complete_results}; + + #[tokio::test] + async fn completes_within_quotation_marks() { + let setup = r#" + create schema private; + + create table private.users ( + id serial primary key, + email text + ); + + create policy "read for public users disallowed" on private.users + as restrictive + for select + to public + using (false); + + create policy "write for public users allowed" on private.users + as restrictive + for insert + to public + with check (true); + "#; + + assert_complete_results( + format!("alter policy \"{}\" on private.users;", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("read for public users disallowed".into()), + CompletionAssertion::Label("write for public users allowed".into()), + ], + setup, + ) + .await; + + assert_complete_results( + format!("alter policy \"w{}\" on private.users;", CURSOR_POS).as_str(), + vec![CompletionAssertion::Label( + "write for public users allowed".into(), + )], + setup, + ) + .await; + } +} diff --git a/crates/pgt_completions/src/relevance.rs b/crates/pgt_completions/src/relevance.rs index 911a6433..f51c3c52 100644 --- a/crates/pgt_completions/src/relevance.rs +++ b/crates/pgt_completions/src/relevance.rs @@ -7,4 +7,5 @@ pub(crate) enum CompletionRelevanceData<'a> { Function(&'a pgt_schema_cache::Function), Column(&'a pgt_schema_cache::Column), Schema(&'a pgt_schema_cache::Schema), + Policy(&'a pgt_schema_cache::Policy), } diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index ec12201c..3b148336 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -1,4 +1,4 @@ -use crate::context::{CompletionContext, WrappingClause}; +use crate::context::{CompletionContext, NodeUnderCursor, WrappingClause}; use super::CompletionRelevanceData; @@ -24,7 +24,11 @@ impl CompletionFilter<'_> { } fn completable_context(&self, ctx: &CompletionContext) -> Option<()> { - let current_node_kind = ctx.node_under_cursor.map(|n| n.kind()).unwrap_or(""); + let current_node_kind = ctx + .node_under_cursor + .as_ref() + .map(|n| n.kind()) + .unwrap_or(""); if current_node_kind.starts_with("keyword_") || current_node_kind == "=" @@ -36,20 +40,23 @@ impl CompletionFilter<'_> { } // No autocompletions if there are two identifiers without a separator. - if ctx.node_under_cursor.is_some_and(|n| { - n.prev_sibling().is_some_and(|p| { + if ctx.node_under_cursor.as_ref().is_some_and(|n| match n { + NodeUnderCursor::TsNode(node) => node.prev_sibling().is_some_and(|p| { (p.kind() == "identifier" || p.kind() == "object_reference") && n.kind() == "identifier" - }) + }), + NodeUnderCursor::CustomNode { .. } => false, }) { return None; } // no completions if we're right after an asterisk: // `select * {}` - if ctx.node_under_cursor.is_some_and(|n| { - n.prev_sibling() - .is_some_and(|p| (p.kind() == "all_fields") && n.kind() == "identifier") + if ctx.node_under_cursor.as_ref().is_some_and(|n| match n { + NodeUnderCursor::TsNode(node) => node + .prev_sibling() + .is_some_and(|p| (p.kind() == "all_fields") && n.kind() == "identifier"), + NodeUnderCursor::CustomNode { .. } => false, }) { return None; } @@ -60,18 +67,19 @@ impl CompletionFilter<'_> { fn check_clause(&self, ctx: &CompletionContext) -> Option<()> { let clause = ctx.wrapping_clause_type.as_ref(); + let in_clause = |compare: WrappingClause| clause.is_some_and(|c| c == &compare); + match self.data { CompletionRelevanceData::Table(_) => { - let in_select_clause = clause.is_some_and(|c| c == &WrappingClause::Select); - let in_where_clause = clause.is_some_and(|c| c == &WrappingClause::Where); - - if in_select_clause || in_where_clause { + if in_clause(WrappingClause::Select) + || in_clause(WrappingClause::Where) + || in_clause(WrappingClause::PolicyName) + { return None; }; } CompletionRelevanceData::Column(_) => { - let in_from_clause = clause.is_some_and(|c| c == &WrappingClause::From); - if in_from_clause { + if in_clause(WrappingClause::From) || in_clause(WrappingClause::PolicyName) { return None; } @@ -83,6 +91,7 @@ impl CompletionFilter<'_> { WrappingClause::Join { on_node: Some(on) } => ctx .node_under_cursor + .as_ref() .is_some_and(|n| n.end_byte() < on.start_byte()), _ => false, @@ -92,7 +101,16 @@ impl CompletionFilter<'_> { return None; } } - _ => {} + CompletionRelevanceData::Policy(_) => { + if clause.is_none_or(|c| c != &WrappingClause::PolicyName) { + return None; + } + } + _ => { + if in_clause(WrappingClause::PolicyName) { + return None; + } + } } Some(()) @@ -126,10 +144,10 @@ impl CompletionFilter<'_> { .get(schema_or_alias) .is_some_and(|t| t == &col.table_name), - CompletionRelevanceData::Schema(_) => { - // we should never allow schema suggestions if there already was one. - false - } + // we should never allow schema suggestions if there already was one. + CompletionRelevanceData::Schema(_) => false, + // no policy comletion if user typed a schema node first. + CompletionRelevanceData::Policy(_) => false, }; if !matches { diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs index b0b0bf63..2fe12511 100644 --- a/crates/pgt_completions/src/relevance/scoring.rs +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -37,20 +37,23 @@ impl CompletionScore<'_> { fn check_matches_query_input(&mut self, ctx: &CompletionContext) { let content = match ctx.get_node_under_cursor_content() { - Some(c) => c, + Some(c) => c.replace('"', ""), None => return, }; let name = match self.data { - CompletionRelevanceData::Function(f) => f.name.as_str(), - CompletionRelevanceData::Table(t) => t.name.as_str(), - CompletionRelevanceData::Column(c) => c.name.as_str(), - CompletionRelevanceData::Schema(s) => s.name.as_str(), + CompletionRelevanceData::Function(f) => f.name.as_str().to_ascii_lowercase(), + CompletionRelevanceData::Table(t) => t.name.as_str().to_ascii_lowercase(), + CompletionRelevanceData::Column(c) => c.name.as_str().to_ascii_lowercase(), + CompletionRelevanceData::Schema(s) => s.name.as_str().to_ascii_lowercase(), + CompletionRelevanceData::Policy(p) => p.name.as_str().to_ascii_lowercase(), }; let fz_matcher = SkimMatcherV2::default(); - if let Some(score) = fz_matcher.fuzzy_match(name, content.as_str()) { + if let Some(score) = + fz_matcher.fuzzy_match(name.as_str(), content.to_ascii_lowercase().as_str()) + { let scorei32: i32 = score .try_into() .expect("The length of the input exceeds i32 capacity"); @@ -82,6 +85,7 @@ impl CompletionScore<'_> { WrappingClause::Join { on_node } if on_node.is_none_or(|on| { ctx.node_under_cursor + .as_ref() .is_none_or(|n| n.end_byte() < on.start_byte()) }) => { @@ -102,6 +106,7 @@ impl CompletionScore<'_> { WrappingClause::Join { on_node } if on_node.is_some_and(|on| { ctx.node_under_cursor + .as_ref() .is_some_and(|n| n.start_byte() > on.end_byte()) }) => { @@ -117,6 +122,10 @@ impl CompletionScore<'_> { WrappingClause::Delete if !has_mentioned_schema => 15, _ => -50, }, + CompletionRelevanceData::Policy(_) => match clause_type { + WrappingClause::PolicyName => 25, + _ => -50, + }, } } @@ -150,6 +159,7 @@ impl CompletionScore<'_> { WrappingNode::Relation if !has_mentioned_schema && has_node_text => 0, _ => -50, }, + CompletionRelevanceData::Policy(_) => 0, } } @@ -183,6 +193,7 @@ impl CompletionScore<'_> { CompletionRelevanceData::Table(t) => t.schema.as_str(), CompletionRelevanceData::Column(c) => c.schema_name.as_str(), CompletionRelevanceData::Schema(s) => s.name.as_str(), + CompletionRelevanceData::Policy(p) => p.schema_name.as_str(), } } @@ -190,6 +201,7 @@ impl CompletionScore<'_> { match self.data { CompletionRelevanceData::Column(c) => Some(c.table_name.as_str()), CompletionRelevanceData::Table(t) => Some(t.name.as_str()), + CompletionRelevanceData::Policy(p) => Some(p.table_name.as_str()), _ => None, } } diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index 248a0ffa..6aa75a16 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -4,6 +4,8 @@ use pgt_text_size::TextSize; use crate::CompletionParams; +static SANITIZED_TOKEN: &str = "REPLACED_TOKEN"; + pub(crate) struct SanitizedCompletionParams<'a> { pub position: TextSize, pub text: String, @@ -16,13 +18,39 @@ pub fn benchmark_sanitization(params: CompletionParams) -> String { params.text } +pub(crate) fn remove_sanitized_token(it: &str) -> String { + it.replace(SANITIZED_TOKEN, "") +} + +#[derive(PartialEq, Eq, Debug)] +pub(crate) enum NodeText { + Replaced, + Original(String), +} + +impl From<&str> for NodeText { + fn from(value: &str) -> Self { + if value == SANITIZED_TOKEN { + NodeText::Replaced + } else { + NodeText::Original(value.into()) + } + } +} + +impl From for NodeText { + fn from(value: String) -> Self { + NodeText::from(value.as_str()) + } +} + impl<'larger, 'smaller> From> for SanitizedCompletionParams<'smaller> where 'larger: 'smaller, { fn from(params: CompletionParams<'larger>) -> Self { - if cursor_inbetween_nodes(params.tree, params.position) - || cursor_prepared_to_write_token_after_last_node(params.tree, params.position) + if cursor_inbetween_nodes(¶ms.text, params.position) + || cursor_prepared_to_write_token_after_last_node(¶ms.text, params.position) || cursor_before_semicolon(params.tree, params.position) || cursor_on_a_dot(¶ms.text, params.position) { @@ -33,8 +61,6 @@ where } } -static SANITIZED_TOKEN: &str = "REPLACED_TOKEN"; - impl<'larger, 'smaller> SanitizedCompletionParams<'smaller> where 'larger: 'smaller, @@ -102,37 +128,17 @@ where /// select |from users; -- cursor "touches" from node. returns false. /// select | from users; -- cursor is between select and from nodes. returns true. /// ``` -fn cursor_inbetween_nodes(tree: &tree_sitter::Tree, position: TextSize) -> bool { - let mut cursor = tree.walk(); - let mut leaf_node = tree.root_node(); - - let byte = position.into(); - - // if the cursor escapes the root node, it can't be between nodes. - if byte < leaf_node.start_byte() || byte >= leaf_node.end_byte() { - return false; - } +fn cursor_inbetween_nodes(sql: &str, position: TextSize) -> bool { + let position: usize = position.into(); + let mut chars = sql.chars(); - /* - * Get closer and closer to the leaf node, until - * a) there is no more child *for the node* or - * b) there is no more child *under the cursor*. - */ - loop { - let child_idx = cursor.goto_first_child_for_byte(position.into()); - if child_idx.is_none() { - break; - } - leaf_node = cursor.node(); - } + let previous_whitespace = chars + .nth(position - 1) + .is_some_and(|c| c.is_ascii_whitespace()); - let cursor_on_leafnode = byte >= leaf_node.start_byte() && leaf_node.end_byte() >= byte; + let current_whitespace = chars.next().is_some_and(|c| c.is_ascii_whitespace()); - /* - * The cursor is inbetween nodes if it is not within the range - * of a leaf node. - */ - !cursor_on_leafnode + previous_whitespace && current_whitespace } /// Checks if the cursor is positioned after the last node, @@ -143,12 +149,9 @@ fn cursor_inbetween_nodes(tree: &tree_sitter::Tree, position: TextSize) -> bool /// select * from| -- user still needs to type a space /// select * from | -- too far off. /// ``` -fn cursor_prepared_to_write_token_after_last_node( - tree: &tree_sitter::Tree, - position: TextSize, -) -> bool { +fn cursor_prepared_to_write_token_after_last_node(sql: &str, position: TextSize) -> bool { let cursor_pos: usize = position.into(); - cursor_pos == tree.root_node().end_byte() + 1 + cursor_pos == sql.len() + 1 } fn cursor_on_a_dot(sql: &str, position: TextSize) -> bool { @@ -214,58 +217,44 @@ mod tests { // note: two spaces between select and from. let input = "select from users;"; - let mut parser = tree_sitter::Parser::new(); - parser - .set_language(tree_sitter_sql::language()) - .expect("Error loading sql language"); - - let tree = parser.parse(input, None).unwrap(); - // select | from users; <-- just right, one space after select token, one space before from - assert!(cursor_inbetween_nodes(&tree, TextSize::new(7))); + assert!(cursor_inbetween_nodes(input, TextSize::new(7))); // select| from users; <-- still on select token - assert!(!cursor_inbetween_nodes(&tree, TextSize::new(6))); + assert!(!cursor_inbetween_nodes(input, TextSize::new(6))); // select |from users; <-- already on from token - assert!(!cursor_inbetween_nodes(&tree, TextSize::new(8))); + assert!(!cursor_inbetween_nodes(input, TextSize::new(8))); // select from users;| - assert!(!cursor_inbetween_nodes(&tree, TextSize::new(19))); + assert!(!cursor_inbetween_nodes(input, TextSize::new(19))); } #[test] fn test_cursor_after_nodes() { let input = "select * from"; - let mut parser = tree_sitter::Parser::new(); - parser - .set_language(tree_sitter_sql::language()) - .expect("Error loading sql language"); - - let tree = parser.parse(input, None).unwrap(); - // select * from| <-- still on previous token assert!(!cursor_prepared_to_write_token_after_last_node( - &tree, + input, TextSize::new(13) )); // select * from | <-- too far off, two spaces afterward assert!(!cursor_prepared_to_write_token_after_last_node( - &tree, + input, TextSize::new(15) )); // select * |from <-- it's within assert!(!cursor_prepared_to_write_token_after_last_node( - &tree, + input, TextSize::new(9) )); // select * from | <-- just right assert!(cursor_prepared_to_write_token_after_last_node( - &tree, + input, TextSize::new(14) )); } diff --git a/crates/pgt_lsp/src/adapters/mod.rs b/crates/pgt_lsp/src/adapters/mod.rs index 972dd576..a5375180 100644 --- a/crates/pgt_lsp/src/adapters/mod.rs +++ b/crates/pgt_lsp/src/adapters/mod.rs @@ -158,6 +158,27 @@ mod tests { assert!(offset.is_none()); } + #[test] + fn with_tabs() { + let line_index = LineIndex::new( + r#" +select + email, + id +from auth.users u +join public.client_identities c on u.id = c.user_id; +"# + .trim(), + ); + + // on `i` of `id` in the select + // 22 because of: + // selectemail,i = 13 + // 8 spaces, 2 newlines = 23 characters + // it's zero indexed => index 22 + check_conversion!(line_index: Position { line: 2, character: 4 } => TextSize::from(22)); + } + #[test] fn unicode() { let line_index = LineIndex::new("'Jan 1, 2018 – Jan 1, 2019'"); diff --git a/crates/pgt_lsp/src/handlers/completions.rs b/crates/pgt_lsp/src/handlers/completions.rs index e1a7508c..ee13b26e 100644 --- a/crates/pgt_lsp/src/handlers/completions.rs +++ b/crates/pgt_lsp/src/handlers/completions.rs @@ -65,5 +65,6 @@ fn to_lsp_types_completion_item_kind( pgt_completions::CompletionItemKind::Table => lsp_types::CompletionItemKind::CLASS, pgt_completions::CompletionItemKind::Column => lsp_types::CompletionItemKind::FIELD, pgt_completions::CompletionItemKind::Schema => lsp_types::CompletionItemKind::CLASS, + pgt_completions::CompletionItemKind::Policy => lsp_types::CompletionItemKind::CONSTANT, } } diff --git a/crates/pgt_schema_cache/src/lib.rs b/crates/pgt_schema_cache/src/lib.rs index d978a94b..e73901d0 100644 --- a/crates/pgt_schema_cache/src/lib.rs +++ b/crates/pgt_schema_cache/src/lib.rs @@ -14,6 +14,7 @@ mod versions; pub use columns::*; pub use functions::{Behavior, Function, FunctionArg, FunctionArgs}; +pub use policies::{Policy, PolicyCommand}; pub use schema_cache::SchemaCache; pub use schemas::Schema; pub use tables::{ReplicaIdentity, Table}; diff --git a/crates/pgt_schema_cache/src/policies.rs b/crates/pgt_schema_cache/src/policies.rs index 641dad12..85cd7821 100644 --- a/crates/pgt_schema_cache/src/policies.rs +++ b/crates/pgt_schema_cache/src/policies.rs @@ -56,14 +56,14 @@ impl From for Policy { #[derive(Debug, PartialEq, Eq)] pub struct Policy { - name: String, - table_name: String, - schema_name: String, - is_permissive: bool, - command: PolicyCommand, - role_names: Vec, - security_qualification: Option, - with_check: Option, + pub name: String, + pub table_name: String, + pub schema_name: String, + pub is_permissive: bool, + pub command: PolicyCommand, + pub role_names: Vec, + pub security_qualification: Option, + pub with_check: Option, } impl SchemaCacheItem for Policy { diff --git a/crates/pgt_text_size/src/range.rs b/crates/pgt_text_size/src/range.rs index 3cfc3c96..baab91e9 100644 --- a/crates/pgt_text_size/src/range.rs +++ b/crates/pgt_text_size/src/range.rs @@ -299,6 +299,39 @@ impl TextRange { end: self.end.checked_add(offset)?, }) } + + /// Expand the range's start by the given offset. + /// The start will never exceed the range's end. + /// + /// # Examples + /// + /// ```rust + /// # use pgt_text_size::*; + /// assert_eq!( + /// TextRange::new(2.into(), 12.into()).checked_expand_start(4.into()).unwrap(), + /// TextRange::new(6.into(), 12.into()), + /// ); + /// + /// assert_eq!( + /// TextRange::new(2.into(), 12.into()).checked_expand_start(12.into()).unwrap(), + /// TextRange::new(12.into(), 12.into()), + /// ); + /// ``` + #[inline] + pub fn checked_expand_start(self, offset: TextSize) -> Option { + let new_start = self.start.checked_add(offset)?; + let end = self.end; + + if new_start > end { + Some(TextRange { start: end, end }) + } else { + Some(TextRange { + start: new_start, + end, + }) + } + } + /// Subtract an offset from this range. /// /// Note that this is not appropriate for changing where a `TextRange` is diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 5a7bfc44..2c0f2b75 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -488,8 +488,11 @@ impl Workspace for WorkspaceServer { let schema_cache = self.schema_cache.load(pool)?; match get_statement_for_completions(&parsed_doc, params.position) { - None => Ok(CompletionsResult::default()), - Some((_id, range, content, cst)) => { + None => { + tracing::debug!("No statement found."); + Ok(CompletionsResult::default()) + } + Some((id, range, content, cst)) => { let position = params.position - range.start(); let items = pgt_completions::complete(pgt_completions::CompletionParams { @@ -499,6 +502,12 @@ impl Workspace for WorkspaceServer { text: content, }); + tracing::debug!( + "Found {} completion items for statement with id {}", + items.len(), + id.raw() + ); + Ok(CompletionsResult { items }) } } From 7ffbca6267a60803538dd4f633f5e3b44bee57d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Thu, 22 May 2025 14:26:50 +0200 Subject: [PATCH 056/114] fix: sql fn params (#366) the idea is to replace the fn params with a default value based on their type: ```sql create or replace function users.select_no_ref (user_id int4) returns table ( first_name text ) language sql security invoker as $$ select first_name FROM users_hidden.users where id = user_id; $$; ``` will become ```sql create or replace function users.select_no_ref (user_id int4) returns table ( first_name text ) language sql security invoker as $$ select first_name FROM users_hidden.users where id = 0; -- <-- here $$; ``` ## Todo - [x] pass params to typechecker - [x] implement `apply_identifiers` fixes #353 fixes #352 --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- Cargo.lock | 1 + crates/pgt_completions/src/context/mod.rs | 1 + crates/pgt_schema_cache/src/lib.rs | 1 + crates/pgt_schema_cache/src/types.rs | 6 +- crates/pgt_treesitter_queries/src/lib.rs | 35 +- .../pgt_treesitter_queries/src/queries/mod.rs | 9 + .../src/queries/parameters.rs | 82 +++++ crates/pgt_typecheck/Cargo.toml | 19 +- crates/pgt_typecheck/src/diagnostics.rs | 21 +- crates/pgt_typecheck/src/lib.rs | 20 +- crates/pgt_typecheck/src/typed_identifier.rs | 342 ++++++++++++++++++ crates/pgt_typecheck/tests/diagnostics.rs | 22 +- crates/pgt_workspace/src/workspace/server.rs | 32 +- .../src/workspace/server/document.rs | 7 + .../src/workspace/server/parsed_document.rs | 37 +- .../workspace/server/schema_cache_manager.rs | 10 +- .../src/workspace/server/sql_function.rs | 173 +++++++-- .../workspace/server/statement_identifier.rs | 15 + 18 files changed, 747 insertions(+), 86 deletions(-) create mode 100644 crates/pgt_treesitter_queries/src/queries/parameters.rs create mode 100644 crates/pgt_typecheck/src/typed_identifier.rs diff --git a/Cargo.lock b/Cargo.lock index 10f45b7c..4771c8a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2876,6 +2876,7 @@ dependencies = [ "pgt_schema_cache", "pgt_test_utils", "pgt_text_size", + "pgt_treesitter_queries", "sqlx", "tokio", "tree-sitter", diff --git a/crates/pgt_completions/src/context/mod.rs b/crates/pgt_completions/src/context/mod.rs index 23a6fcae..fec2e2d9 100644 --- a/crates/pgt_completions/src/context/mod.rs +++ b/crates/pgt_completions/src/context/mod.rs @@ -270,6 +270,7 @@ impl<'a> CompletionContext<'a> { .insert(Some(WrappingClause::Select), new); } } + _ => {} }; } } diff --git a/crates/pgt_schema_cache/src/lib.rs b/crates/pgt_schema_cache/src/lib.rs index e73901d0..d9be527d 100644 --- a/crates/pgt_schema_cache/src/lib.rs +++ b/crates/pgt_schema_cache/src/lib.rs @@ -19,3 +19,4 @@ pub use schema_cache::SchemaCache; pub use schemas::Schema; pub use tables::{ReplicaIdentity, Table}; pub use triggers::{Trigger, TriggerAffected, TriggerEvent}; +pub use types::{PostgresType, PostgresTypeAttribute}; diff --git a/crates/pgt_schema_cache/src/types.rs b/crates/pgt_schema_cache/src/types.rs index 8df6b0cb..d540c363 100644 --- a/crates/pgt_schema_cache/src/types.rs +++ b/crates/pgt_schema_cache/src/types.rs @@ -6,13 +6,13 @@ use crate::schema_cache::SchemaCacheItem; #[derive(Debug, Clone, Default)] pub struct TypeAttributes { - attrs: Vec, + pub attrs: Vec, } #[derive(Debug, Clone, Default, Deserialize)] pub struct PostgresTypeAttribute { - name: String, - type_id: i64, + pub name: String, + pub type_id: i64, } impl From> for TypeAttributes { diff --git a/crates/pgt_treesitter_queries/src/lib.rs b/crates/pgt_treesitter_queries/src/lib.rs index 8d1719b0..4bf71e74 100644 --- a/crates/pgt_treesitter_queries/src/lib.rs +++ b/crates/pgt_treesitter_queries/src/lib.rs @@ -70,7 +70,7 @@ mod tests { use crate::{ TreeSitterQueriesExecutor, - queries::{RelationMatch, TableAliasMatch}, + queries::{ParameterMatch, RelationMatch, TableAliasMatch}, }; #[test] @@ -207,11 +207,11 @@ where select * from ( - select * + select * from ( select * from private.something - ) as sq2 + ) as sq2 join private.tableau pt1 on sq2.id = pt1.id ) as sq1 @@ -255,4 +255,33 @@ on sq1.id = pt.id; assert_eq!(results[0].get_schema(sql), Some("private".into())); assert_eq!(results[0].get_table(sql), "something"); } + + #[test] + fn extracts_parameters() { + let sql = r#"select v_test + fn_name.custom_type.v_test2 + $3 + custom_type.v_test3;"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&ParameterMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results.len(), 4); + + assert_eq!(results[0].get_path(sql), "v_test"); + + assert_eq!(results[1].get_path(sql), "fn_name.custom_type.v_test2"); + + assert_eq!(results[2].get_path(sql), "$3"); + + assert_eq!(results[3].get_path(sql), "custom_type.v_test3"); + } } diff --git a/crates/pgt_treesitter_queries/src/queries/mod.rs b/crates/pgt_treesitter_queries/src/queries/mod.rs index e02d675b..aec6ce1a 100644 --- a/crates/pgt_treesitter_queries/src/queries/mod.rs +++ b/crates/pgt_treesitter_queries/src/queries/mod.rs @@ -1,7 +1,9 @@ +mod parameters; mod relations; mod select_columns; mod table_aliases; +pub use parameters::*; pub use relations::*; pub use select_columns::*; pub use table_aliases::*; @@ -9,6 +11,7 @@ pub use table_aliases::*; #[derive(Debug)] pub enum QueryResult<'a> { Relation(RelationMatch<'a>), + Parameter(ParameterMatch<'a>), TableAliases(TableAliasMatch<'a>), SelectClauseColumns(SelectColumnMatch<'a>), } @@ -26,6 +29,12 @@ impl QueryResult<'_> { start >= range.start_point && end <= range.end_point } + Self::Parameter(pm) => { + let node_range = pm.node.range(); + + node_range.start_point >= range.start_point + && node_range.end_point <= range.end_point + } QueryResult::TableAliases(m) => { let start = m.table.start_position(); let end = m.alias.end_position(); diff --git a/crates/pgt_treesitter_queries/src/queries/parameters.rs b/crates/pgt_treesitter_queries/src/queries/parameters.rs new file mode 100644 index 00000000..85ea9ad2 --- /dev/null +++ b/crates/pgt_treesitter_queries/src/queries/parameters.rs @@ -0,0 +1,82 @@ +use std::sync::LazyLock; + +use crate::{Query, QueryResult}; + +use super::QueryTryFrom; + +static TS_QUERY: LazyLock = LazyLock::new(|| { + static QUERY_STR: &str = r#" +[ + (field + (identifier)) @reference + (field + (object_reference) + "." (identifier)) @reference + (parameter) @parameter +] +"#; + tree_sitter::Query::new(tree_sitter_sql::language(), QUERY_STR).expect("Invalid TS Query") +}); + +#[derive(Debug)] +pub struct ParameterMatch<'a> { + pub(crate) node: tree_sitter::Node<'a>, +} + +impl ParameterMatch<'_> { + pub fn get_path(&self, sql: &str) -> String { + self.node + .utf8_text(sql.as_bytes()) + .expect("Failed to get path from ParameterMatch") + .to_string() + } + + pub fn get_range(&self) -> tree_sitter::Range { + self.node.range() + } + + pub fn get_byte_range(&self) -> std::ops::Range { + let range = self.node.range(); + range.start_byte..range.end_byte + } +} + +impl<'a> TryFrom<&'a QueryResult<'a>> for &'a ParameterMatch<'a> { + type Error = String; + + fn try_from(q: &'a QueryResult<'a>) -> Result { + match q { + QueryResult::Parameter(r) => Ok(r), + + #[allow(unreachable_patterns)] + _ => Err("Invalid QueryResult type".into()), + } + } +} + +impl<'a> QueryTryFrom<'a> for ParameterMatch<'a> { + type Ref = &'a ParameterMatch<'a>; +} + +impl<'a> Query<'a> for ParameterMatch<'a> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + let mut cursor = tree_sitter::QueryCursor::new(); + + let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); + + matches + .filter_map(|m| { + let captures = m.captures; + + // We expect exactly one capture for a parameter + if captures.len() != 1 { + return None; + } + + Some(QueryResult::Parameter(ParameterMatch { + node: captures[0].node, + })) + }) + .collect() + } +} diff --git a/crates/pgt_typecheck/Cargo.toml b/crates/pgt_typecheck/Cargo.toml index a097fa56..caacc6d1 100644 --- a/crates/pgt_typecheck/Cargo.toml +++ b/crates/pgt_typecheck/Cargo.toml @@ -12,15 +12,16 @@ version = "0.0.0" [dependencies] -pgt_console.workspace = true -pgt_diagnostics.workspace = true -pgt_query_ext.workspace = true -pgt_schema_cache.workspace = true -pgt_text_size.workspace = true -sqlx.workspace = true -tokio.workspace = true -tree-sitter.workspace = true -tree_sitter_sql.workspace = true +pgt_console.workspace = true +pgt_diagnostics.workspace = true +pgt_query_ext.workspace = true +pgt_schema_cache.workspace = true +pgt_text_size.workspace = true +pgt_treesitter_queries.workspace = true +sqlx.workspace = true +tokio.workspace = true +tree-sitter.workspace = true +tree_sitter_sql.workspace = true [dev-dependencies] insta.workspace = true diff --git a/crates/pgt_typecheck/src/diagnostics.rs b/crates/pgt_typecheck/src/diagnostics.rs index 8fd92da2..2117adbe 100644 --- a/crates/pgt_typecheck/src/diagnostics.rs +++ b/crates/pgt_typecheck/src/diagnostics.rs @@ -97,6 +97,7 @@ impl Advices for TypecheckAdvices { pub(crate) fn create_type_error( pg_err: &PgDatabaseError, ts: &tree_sitter::Tree, + positions_valid: bool, ) -> TypecheckDiagnostic { let position = pg_err.position().and_then(|pos| match pos { sqlx::postgres::PgErrorPosition::Original(pos) => Some(pos - 1), @@ -104,14 +105,18 @@ pub(crate) fn create_type_error( }); let range = position.and_then(|pos| { - ts.root_node() - .named_descendant_for_byte_range(pos, pos) - .map(|node| { - TextRange::new( - node.start_byte().try_into().unwrap(), - node.end_byte().try_into().unwrap(), - ) - }) + if positions_valid { + ts.root_node() + .named_descendant_for_byte_range(pos, pos) + .map(|node| { + TextRange::new( + node.start_byte().try_into().unwrap(), + node.end_byte().try_into().unwrap(), + ) + }) + } else { + None + } }); let severity = match pg_err.severity() { diff --git a/crates/pgt_typecheck/src/lib.rs b/crates/pgt_typecheck/src/lib.rs index f741c0e6..e1dcd259 100644 --- a/crates/pgt_typecheck/src/lib.rs +++ b/crates/pgt_typecheck/src/lib.rs @@ -1,4 +1,5 @@ mod diagnostics; +mod typed_identifier; pub use diagnostics::TypecheckDiagnostic; use diagnostics::create_type_error; @@ -6,6 +7,8 @@ use pgt_text_size::TextRange; use sqlx::postgres::PgDatabaseError; pub use sqlx::postgres::PgSeverity; use sqlx::{Executor, PgPool}; +use typed_identifier::apply_identifiers; +pub use typed_identifier::{IdentifierType, TypedIdentifier}; #[derive(Debug)] pub struct TypecheckParams<'a> { @@ -13,6 +16,8 @@ pub struct TypecheckParams<'a> { pub sql: &'a str, pub ast: &'a pgt_query_ext::NodeEnum, pub tree: &'a tree_sitter::Tree, + pub schema_cache: &'a pgt_schema_cache::SchemaCache, + pub identifiers: Vec, } #[derive(Debug, Clone)] @@ -51,13 +56,24 @@ pub async fn check_sql( // each typecheck operation. conn.close_on_drop(); - let res = conn.prepare(params.sql).await; + let (prepared, positions_valid) = apply_identifiers( + params.identifiers, + params.schema_cache, + params.tree, + params.sql, + ); + + let res = conn.prepare(&prepared).await; match res { Ok(_) => Ok(None), Err(sqlx::Error::Database(err)) => { let pg_err = err.downcast_ref::(); - Ok(Some(create_type_error(pg_err, params.tree))) + Ok(Some(create_type_error( + pg_err, + params.tree, + positions_valid, + ))) } Err(err) => Err(err), } diff --git a/crates/pgt_typecheck/src/typed_identifier.rs b/crates/pgt_typecheck/src/typed_identifier.rs new file mode 100644 index 00000000..5efe0421 --- /dev/null +++ b/crates/pgt_typecheck/src/typed_identifier.rs @@ -0,0 +1,342 @@ +use pgt_schema_cache::PostgresType; +use pgt_treesitter_queries::{TreeSitterQueriesExecutor, queries::ParameterMatch}; + +/// A typed identifier is a parameter that has a type associated with it. +/// It is used to replace parameters within the SQL string. +#[derive(Debug)] +pub struct TypedIdentifier { + /// The path of the parameter, usually the name of the function. + /// This is because `fn_name.arg_name` is a valid reference within a SQL function. + pub path: String, + /// The name of the argument + pub name: Option, + /// The type of the argument with schema and name + pub type_: IdentifierType, +} + +#[derive(Debug, Clone)] +pub struct IdentifierType { + pub schema: Option, + pub name: String, + pub is_array: bool, +} + +/// Applies the identifiers to the SQL string by replacing them with their default values. +pub fn apply_identifiers<'a>( + identifiers: Vec, + schema_cache: &'a pgt_schema_cache::SchemaCache, + cst: &'a tree_sitter::Tree, + sql: &'a str, +) -> (String, bool) { + let mut executor = TreeSitterQueriesExecutor::new(cst.root_node(), sql); + + executor.add_query_results::(); + + // Collect all replacements first to avoid modifying the string while iterating + let replacements: Vec<_> = executor + .get_iter(None) + .filter_map(|q| { + let m: &ParameterMatch = q.try_into().ok()?; + let path = m.get_path(sql); + let parts: Vec<_> = path.split('.').collect(); + + // Find the matching identifier and its position in the path + let (identifier, position) = find_matching_identifier(&parts, &identifiers)?; + + // Resolve the type based on whether we're accessing a field of a composite type + let type_ = resolve_type(identifier, position, &parts, schema_cache)?; + + Some((m.get_byte_range(), type_, identifier.type_.is_array)) + }) + .collect(); + + let mut result = sql.to_string(); + + let mut valid_positions = true; + + // Apply replacements in reverse order to maintain correct byte offsets + for (range, type_, is_array) in replacements.into_iter().rev() { + let default_value = get_formatted_default_value(type_, is_array); + + // if the default_value is shorter than "range", fill it up with spaces + let default_value = if default_value.len() < range.end - range.start { + format!("{: range.end - range.start { + valid_positions = false; + } + + result.replace_range(range, &default_value); + } + + (result, valid_positions) +} + +/// Format the default value based on the type and whether it's an array +fn get_formatted_default_value(pg_type: &PostgresType, is_array: bool) -> String { + // Get the base default value for this type + let default = resolve_default_value(pg_type); + + let default = if default.len() > "NULL".len() { + // If the default value is longer than "NULL", use "NULL" instead + "NULL".to_string() + } else { + // Otherwise, use the default value + default + }; + + // For arrays, wrap the default in array syntax + if is_array { + format!("'{{{}}}'", default) + } else { + default + } +} + +/// Resolve the default value for a given Postgres type. +/// +/// * `pg_type`: The type to return the default value for. +pub fn resolve_default_value(pg_type: &PostgresType) -> String { + // Handle ENUM types by returning the first variant + if !pg_type.enums.values.is_empty() { + return format!("'{}'", pg_type.enums.values[0]); + } + + match pg_type.name.as_str() { + // Numeric types + "smallint" | "int2" | "integer" | "int" | "int4" | "bigint" | "int8" | "decimal" + | "numeric" | "real" | "float4" | "double precision" | "float8" | "smallserial" + | "serial2" | "serial" | "serial4" | "bigserial" | "serial8" => "0".to_string(), + + // Boolean type + "boolean" | "bool" => "false".to_string(), + + // Character types + "character" | "char" | "character varying" | "varchar" | "text" => "''".to_string(), + + // Date/time types + "date" => "'1970-01-01'".to_string(), + "time" | "time without time zone" => "'00:00:00'".to_string(), + "time with time zone" | "timetz" => "'00:00:00+00'".to_string(), + "timestamp" | "timestamp without time zone" => "'1970-01-01 00:00:00'".to_string(), + "timestamp with time zone" | "timestamptz" => "'1970-01-01 00:00:00+00'".to_string(), + "interval" => "'0'".to_string(), + + // JSON types + "json" | "jsonb" => "'null'".to_string(), + + // UUID + "uuid" => "'00000000-0000-0000-0000-000000000000'".to_string(), + + // Byte array + "bytea" => "'\\x'".to_string(), + + // Network types + "inet" => "'0.0.0.0'".to_string(), + "cidr" => "'0.0.0.0/0'".to_string(), + "macaddr" => "'00:00:00:00:00:00'".to_string(), + "macaddr8" => "'00:00:00:00:00:00:00:00'".to_string(), + + // Monetary type + "money" => "'0.00'".to_string(), + + // Geometric types + "point" => "'(0,0)'".to_string(), + "line" => "'{0,0,0}'".to_string(), + "lseg" => "'[(0,0),(0,0)]'".to_string(), + "box" => "'((0,0),(0,0))'".to_string(), + "path" => "'((0,0),(0,0))'".to_string(), + "polygon" => "'((0,0),(0,0),(0,0))'".to_string(), + "circle" => "'<(0,0),0>'".to_string(), + + // Text search types + "tsvector" => "''".to_string(), + "tsquery" => "''".to_string(), + + // XML + "xml" => "''".to_string(), + + // Log sequence number + "pg_lsn" => "'0/0'".to_string(), + + // Snapshot types + "txid_snapshot" | "pg_snapshot" => "NULL".to_string(), + + // Fallback for unrecognized types + _ => "NULL".to_string(), + } +} + +// Helper function to find the matching identifier and its position in the path +fn find_matching_identifier<'a>( + parts: &[&str], + identifiers: &'a [TypedIdentifier], +) -> Option<(&'a TypedIdentifier, usize)> { + // Case 1: Parameter reference (e.g., $2) + if parts.len() == 1 && parts[0].starts_with('$') { + let idx = parts[0][1..].parse::().ok()?; + let identifier = identifiers.get(idx - 1)?; + return Some((identifier, idx)); + } + + // Case 2: Named reference (e.g., fn_name.custom_type.v_test2) + identifiers.iter().find_map(|identifier| { + let name = identifier.name.as_ref()?; + + parts + .iter() + .enumerate() + .find(|(_idx, part)| **part == name) + .map(|(idx, _)| (identifier, idx)) + }) +} + +// Helper function to resolve the type based on the identifier and path +fn resolve_type<'a>( + identifier: &TypedIdentifier, + position: usize, + parts: &[&str], + schema_cache: &'a pgt_schema_cache::SchemaCache, +) -> Option<&'a PostgresType> { + if position < parts.len() - 1 { + // Find the composite type + let schema_type = schema_cache.types.iter().find(|t| { + identifier + .type_ + .schema + .as_ref() + .is_none_or(|s| t.schema == *s) + && t.name == *identifier.type_.name + })?; + + // Find the field within the composite type + let field_name = parts.last().unwrap(); + let field = schema_type + .attributes + .attrs + .iter() + .find(|a| a.name == *field_name)?; + + // Find the field's type + schema_cache.types.iter().find(|t| t.id == field.type_id) + } else { + // Direct type reference + schema_cache.find_type(&identifier.type_.name, identifier.type_.schema.as_deref()) + } +} + +#[cfg(test)] +mod tests { + use pgt_test_utils::test_database::get_new_test_db; + use sqlx::Executor; + + #[tokio::test] + async fn test_apply_identifiers() { + let input = "select v_test + fn_name.custom_type.v_test2 + $3 + custom_type.v_test3 + fn_name.v_test2 + enum_type"; + + let identifiers = vec![ + super::TypedIdentifier { + path: "fn_name".to_string(), + name: Some("v_test".to_string()), + type_: super::IdentifierType { + schema: None, + name: "int4".to_string(), + is_array: false, + }, + }, + super::TypedIdentifier { + path: "fn_name".to_string(), + name: Some("custom_type".to_string()), + type_: super::IdentifierType { + schema: Some("public".to_string()), + name: "custom_type".to_string(), + is_array: false, + }, + }, + super::TypedIdentifier { + path: "fn_name".to_string(), + name: Some("another".to_string()), + type_: super::IdentifierType { + schema: None, + name: "numeric".to_string(), + is_array: false, + }, + }, + super::TypedIdentifier { + path: "fn_name".to_string(), + name: Some("custom_type".to_string()), + type_: super::IdentifierType { + schema: Some("public".to_string()), + name: "custom_type".to_string(), + is_array: false, + }, + }, + super::TypedIdentifier { + path: "fn_name".to_string(), + name: Some("v_test2".to_string()), + type_: super::IdentifierType { + schema: None, + name: "int4".to_string(), + is_array: false, + }, + }, + super::TypedIdentifier { + path: "fn_name".to_string(), + name: Some("enum_type".to_string()), + type_: super::IdentifierType { + schema: Some("public".to_string()), + name: "enum_type".to_string(), + is_array: false, + }, + }, + ]; + + let test_db = get_new_test_db().await; + + let setup = r#" + CREATE TYPE "public"."custom_type" AS ( + v_test2 integer, + v_test3 integer + ); + + CREATE TYPE "public"."enum_type" AS ENUM ( + 'critical', + 'high', + 'default', + 'low', + 'very_low' + ); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + let mut parser = tree_sitter::Parser::new(); + parser + .set_language(tree_sitter_sql::language()) + .expect("Error loading sql language"); + + let schema_cache = pgt_schema_cache::SchemaCache::load(&test_db) + .await + .expect("Failed to load Schema Cache"); + + let tree = parser.parse(input, None).unwrap(); + + let (sql_out, valid_pos) = + super::apply_identifiers(identifiers, &schema_cache, &tree, input); + + assert!(valid_pos); + assert_eq!( + sql_out, + // the numeric parameters are filled with 0; + // all values of the enums are longer than `NULL`, so we use `NULL` instead + "select 0 + 0 + 0 + 0 + 0 + NULL " + ); + } +} diff --git a/crates/pgt_typecheck/tests/diagnostics.rs b/crates/pgt_typecheck/tests/diagnostics.rs index 4c780d74..9628962d 100644 --- a/crates/pgt_typecheck/tests/diagnostics.rs +++ b/crates/pgt_typecheck/tests/diagnostics.rs @@ -7,19 +7,25 @@ use pgt_test_utils::test_database::get_new_test_db; use pgt_typecheck::{TypecheckParams, check_sql}; use sqlx::Executor; -async fn test(name: &str, query: &str, setup: &str) { +async fn test(name: &str, query: &str, setup: Option<&str>) { let test_db = get_new_test_db().await; - test_db - .execute(setup) - .await - .expect("Failed to setup test database"); + if let Some(setup) = setup { + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + } let mut parser = tree_sitter::Parser::new(); parser .set_language(tree_sitter_sql::language()) .expect("Error loading sql language"); + let schema_cache = pgt_schema_cache::SchemaCache::load(&test_db) + .await + .expect("Failed to load Schema Cache"); + let root = pgt_query_ext::parse(query).unwrap(); let tree = parser.parse(query, None).unwrap(); @@ -29,6 +35,8 @@ async fn test(name: &str, query: &str, setup: &str) { sql: query, ast: &root, tree: &tree, + schema_cache: &schema_cache, + identifiers: vec![], }) .await; @@ -55,7 +63,8 @@ async fn invalid_column() { test( "invalid_column", "select id, unknown from contacts;", - r#" + Some( + r#" create table public.contacts ( id serial primary key, name varchar(255) not null, @@ -63,6 +72,7 @@ async fn invalid_column() { middle_name varchar(255) ); "#, + ), ) .await; } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 2c0f2b75..82e79e10 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -1,4 +1,9 @@ -use std::{fs, panic::RefUnwindSafe, path::Path, sync::RwLock}; +use std::{ + fs, + panic::RefUnwindSafe, + path::Path, + sync::{Arc, RwLock}, +}; use analyser::AnalyserVisitorBuilder; use async_helper::run_async; @@ -16,7 +21,7 @@ use pgt_diagnostics::{ Diagnostic, DiagnosticExt, Error, Severity, serde::Diagnostic as SDiagnostic, }; use pgt_fs::{ConfigName, PgTPath}; -use pgt_typecheck::TypecheckParams; +use pgt_typecheck::{IdentifierType, TypecheckParams, TypedIdentifier}; use schema_cache_manager::SchemaCacheManager; use sqlx::Executor; use tracing::info; @@ -366,12 +371,16 @@ impl Workspace for WorkspaceServer { .get_pool() { let path_clone = params.path.clone(); + let schema_cache = self.schema_cache.load(pool.clone())?; + let schema_cache_arc = schema_cache.get_arc(); let input = parser.iter(AsyncDiagnosticsMapper).collect::>(); + // sorry for the ugly code :( let async_results = run_async(async move { stream::iter(input) - .map(|(_id, range, content, ast, cst)| { + .map(|(_id, range, content, ast, cst, sign)| { let pool = pool.clone(); let path = path_clone.clone(); + let schema_cache = Arc::clone(&schema_cache_arc); async move { if let Some(ast) = ast { pgt_typecheck::check_sql(TypecheckParams { @@ -379,6 +388,23 @@ impl Workspace for WorkspaceServer { sql: &content, ast: &ast, tree: &cst, + schema_cache: schema_cache.as_ref(), + identifiers: sign + .map(|s| { + s.args + .iter() + .map(|a| TypedIdentifier { + path: s.name.clone(), + name: a.name.clone(), + type_: IdentifierType { + schema: a.type_.schema.clone(), + name: a.type_.name.clone(), + is_array: a.type_.is_array, + }, + }) + .collect::>() + }) + .unwrap_or_default(), }) .await .map(|d| { diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index 67ed991c..ed0ca40f 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -34,6 +34,13 @@ impl Document { } } + pub fn statement_content(&self, id: &StatementId) -> Option<&str> { + self.positions + .iter() + .find(|(statement_id, _)| statement_id == id) + .map(|(_, range)| &self.content[*range]) + } + /// Returns true if there is at least one fatal error in the diagnostics /// /// A fatal error is a scan error that prevents the document from being used diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs index 92f33926..2b81faba 100644 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ b/crates/pgt_workspace/src/workspace/server/parsed_document.rs @@ -12,7 +12,7 @@ use super::{ change::StatementChange, document::{Document, StatementIterator}, pg_query::PgQueryStore, - sql_function::SQLFunctionBodyStore, + sql_function::{SQLFunctionSignature, get_sql_fn_body, get_sql_fn_signature}, statement_identifier::StatementId, tree_sitter::TreeSitterStore, }; @@ -24,7 +24,6 @@ pub struct ParsedDocument { doc: Document, ast_db: PgQueryStore, cst_db: TreeSitterStore, - sql_fn_db: SQLFunctionBodyStore, annotation_db: AnnotationStore, } @@ -34,7 +33,6 @@ impl ParsedDocument { let cst_db = TreeSitterStore::new(); let ast_db = PgQueryStore::new(); - let sql_fn_db = SQLFunctionBodyStore::new(); let annotation_db = AnnotationStore::new(); doc.iter().for_each(|(stmt, _, content)| { @@ -46,7 +44,6 @@ impl ParsedDocument { doc, ast_db, cst_db, - sql_fn_db, annotation_db, } } @@ -72,7 +69,6 @@ impl ParsedDocument { tracing::debug!("Deleting statement: id {:?}", s,); self.cst_db.remove_statement(s); self.ast_db.clear_statement(s); - self.sql_fn_db.clear_statement(s); self.annotation_db.clear_statement(s); } StatementChange::Modified(s) => { @@ -88,7 +84,6 @@ impl ParsedDocument { self.cst_db.modify_statement(s); self.ast_db.clear_statement(&s.old_stmt); - self.sql_fn_db.clear_statement(&s.old_stmt); self.annotation_db.clear_statement(&s.old_stmt); } } @@ -197,11 +192,7 @@ where .as_ref() { // Check if this is a SQL function definition with a body - if let Some(sub_statement) = - self.parser - .sql_fn_db - .get_function_body(&root_id, ast, &content_owned) - { + if let Some(sub_statement) = get_sql_fn_body(ast, &content_owned) { // Add sub-statements to our pending queue self.pending_sub_statements.push(( root_id.create_child(), @@ -274,6 +265,7 @@ impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { String, Option, Arc, + Option, ); fn map( @@ -293,7 +285,26 @@ impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { let cst_result = parser.cst_db.get_or_cache_tree(&id, &content_owned); - (id, range, content_owned, ast_option, cst_result) + let sql_fn_sig = id + .parent() + .and_then(|root| { + let c = parser.doc.statement_content(&root)?; + Some((root, c)) + }) + .and_then(|(root, c)| { + let ast_option = parser + .ast_db + .get_or_cache_ast(&root, c) + .as_ref() + .clone() + .ok(); + + let ast_option = ast_option.as_ref()?; + + get_sql_fn_signature(ast_option) + }); + + (id, range, content_owned, ast_option, cst_result, sql_fn_sig) } } @@ -413,7 +424,7 @@ mod tests { #[test] fn sql_function_body() { - let input = "CREATE FUNCTION add(integer, integer) RETURNS integer + let input = "CREATE FUNCTION add(test0 integer, test1 integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE diff --git a/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs b/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs index 57a37536..03cd6ded 100644 --- a/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs +++ b/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs @@ -1,4 +1,4 @@ -use std::sync::{RwLock, RwLockReadGuard}; +use std::sync::{Arc, RwLock, RwLockReadGuard}; use pgt_schema_cache::SchemaCache; use sqlx::PgPool; @@ -21,6 +21,10 @@ impl<'a> SchemaCacheHandle<'a> { pub(crate) fn wrap(inner: RwLockReadGuard<'a, SchemaCacheManagerInner>) -> Self { Self { inner } } + + pub fn get_arc(&self) -> Arc { + Arc::clone(&self.inner.cache) + } } impl AsRef for SchemaCacheHandle<'_> { @@ -31,7 +35,7 @@ impl AsRef for SchemaCacheHandle<'_> { #[derive(Default)] pub(crate) struct SchemaCacheManagerInner { - cache: SchemaCache, + cache: Arc, conn_str: String, } @@ -62,7 +66,7 @@ impl SchemaCacheManager { // Double-check that we still need to refresh (another thread might have done it) if new_conn_str != inner.conn_str { - inner.cache = refreshed; + inner.cache = Arc::new(refreshed); inner.conn_str = new_conn_str; tracing::info!("Refreshed connection."); } diff --git a/crates/pgt_workspace/src/workspace/server/sql_function.rs b/crates/pgt_workspace/src/workspace/server/sql_function.rs index 777210d5..48f91ef4 100644 --- a/crates/pgt_workspace/src/workspace/server/sql_function.rs +++ b/crates/pgt_workspace/src/workspace/server/sql_function.rs @@ -1,56 +1,82 @@ -use std::sync::Arc; - -use dashmap::DashMap; use pgt_text_size::TextRange; -use super::statement_identifier::StatementId; +#[derive(Debug, Clone)] +pub struct ArgType { + pub schema: Option, + pub name: String, + pub is_array: bool, +} #[derive(Debug, Clone)] -pub struct SQLFunctionBody { - pub range: TextRange, - pub body: String, +pub struct SQLFunctionArg { + pub name: Option, + pub type_: ArgType, } -pub struct SQLFunctionBodyStore { - db: DashMap>>, +#[derive(Debug, Clone)] +pub struct SQLFunctionSignature { + pub schema: Option, + pub name: String, + pub args: Vec, } -impl SQLFunctionBodyStore { - pub fn new() -> SQLFunctionBodyStore { - SQLFunctionBodyStore { db: DashMap::new() } - } +#[derive(Debug, Clone)] +pub struct SQLFunctionBody { + pub range: TextRange, + pub body: String, +} - pub fn get_function_body( - &self, - statement: &StatementId, - ast: &pgt_query_ext::NodeEnum, - content: &str, - ) -> Option> { - // First check if we already have this statement cached - if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { - return existing; - } +/// Extracts the function signature from a SQL function definition +pub fn get_sql_fn_signature(ast: &pgt_query_ext::NodeEnum) -> Option { + let create_fn = match ast { + pgt_query_ext::NodeEnum::CreateFunctionStmt(cf) => cf, + _ => return None, + }; - // If not cached, try to extract it from the AST - let fn_body = get_sql_fn(ast, content).map(Arc::new); + // Extract language from function options + let language = find_option_value(create_fn, "language")?; - // Cache the result and return it - self.db.insert(statement.clone(), fn_body.clone()); - fn_body + // Only process SQL functions + if language != "sql" { + return None; } - pub fn clear_statement(&self, id: &StatementId) { - self.db.remove(id); - - if let Some(child_id) = id.get_child_id() { - self.db.remove(&child_id); + let fn_name = parse_name(&create_fn.funcname)?; + + // we return None if anything is not expected + let mut fn_args = Vec::new(); + for arg in &create_fn.parameters { + if let Some(pgt_query_ext::NodeEnum::FunctionParameter(node)) = &arg.node { + let arg_name = (!node.name.is_empty()).then_some(node.name.clone()); + + let arg_type = node.arg_type.as_ref()?; + let type_name = parse_name(&arg_type.names)?; + fn_args.push(SQLFunctionArg { + name: arg_name, + type_: ArgType { + schema: type_name.0, + name: type_name.1, + is_array: node + .arg_type + .as_ref() + .map(|t| !t.array_bounds.is_empty()) + .unwrap_or(false), + }, + }); + } else { + return None; } } + + Some(SQLFunctionSignature { + schema: fn_name.0, + name: fn_name.1, + args: fn_args, + }) } -/// Extracts SQL function body and its text range from a CreateFunctionStmt node. -/// Returns None if the function is not an SQL function or if the body can't be found. -fn get_sql_fn(ast: &pgt_query_ext::NodeEnum, content: &str) -> Option { +/// Extracts the SQL body from a function definition +pub fn get_sql_fn_body(ast: &pgt_query_ext::NodeEnum, content: &str) -> Option { let create_fn = match ast { pgt_query_ext::NodeEnum::CreateFunctionStmt(cf) => cf, _ => return None, @@ -120,3 +146,78 @@ fn find_option_value( } }) } + +fn parse_name(nodes: &[pgt_query_ext::protobuf::Node]) -> Option<(Option, String)> { + let names = nodes + .iter() + .map(|n| match &n.node { + Some(pgt_query_ext::NodeEnum::String(s)) => Some(s.sval.clone()), + _ => None, + }) + .collect::>(); + + match names.as_slice() { + [Some(schema), Some(name)] => Some((Some(schema.clone()), name.clone())), + [Some(name)] => Some((None, name.clone())), + _ => None, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn sql_function_signature() { + let input = "CREATE FUNCTION add(test0 integer, test1 integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT;"; + + let ast = pgt_query_ext::parse(input).unwrap(); + + let sig = get_sql_fn_signature(&ast); + + assert!(sig.is_some()); + + let sig = sig.unwrap(); + + let arg1 = sig.args.first().unwrap(); + + assert_eq!(arg1.name, Some("test0".to_string())); + assert_eq!(arg1.type_.name, "int4"); + + let arg2 = sig.args.get(1).unwrap(); + assert_eq!(arg2.name, Some("test1".to_string())); + assert_eq!(arg2.type_.name, "int4"); + } + + #[test] + fn array_type() { + let input = "CREATE FUNCTION add(test0 integer[], test1 integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT;"; + + let ast = pgt_query_ext::parse(input).unwrap(); + + let sig = get_sql_fn_signature(&ast); + + assert!(sig.is_some()); + + let sig = sig.unwrap(); + + assert!( + sig.args + .iter() + .find(|arg| arg.type_.is_array) + .map(|arg| { + assert_eq!(arg.type_.name, "int4"); + assert!(arg.type_.is_array); + }) + .is_some() + ); + } +} diff --git a/crates/pgt_workspace/src/workspace/server/statement_identifier.rs b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs index 8c02814d..7c7d76f0 100644 --- a/crates/pgt_workspace/src/workspace/server/statement_identifier.rs +++ b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs @@ -57,6 +57,21 @@ impl StatementId { StatementId::Child(s) => s.inner, } } + + pub fn is_root(&self) -> bool { + matches!(self, StatementId::Root(_)) + } + + pub fn is_child(&self) -> bool { + matches!(self, StatementId::Child(_)) + } + + pub fn parent(&self) -> Option { + match self { + StatementId::Root(_) => None, + StatementId::Child(id) => Some(StatementId::Root(id.clone())), + } + } } /// Helper struct to generate unique statement ids From a2e4640b9c997003c90bd93807fa31b00f42508b Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 24 May 2025 19:13:25 +0200 Subject: [PATCH 057/114] feat(completions): nicer function completions (#401) --- crates/pgt_completions/src/item.rs | 2 + .../src/providers/functions.rs | 44 ++++++++++++++++--- .../pgt_completions/src/providers/helper.rs | 5 ++- .../pgt_completions/src/providers/policies.rs | 2 + crates/pgt_lsp/src/handlers/completions.rs | 9 +++- .../src/workspace/server/sql_function.rs | 1 + 6 files changed, 55 insertions(+), 8 deletions(-) diff --git a/crates/pgt_completions/src/item.rs b/crates/pgt_completions/src/item.rs index 702fc766..59b7b371 100644 --- a/crates/pgt_completions/src/item.rs +++ b/crates/pgt_completions/src/item.rs @@ -41,6 +41,8 @@ pub struct CompletionText { /// others naively insert the text. /// Having a range where start == end makes it an insertion. pub range: TextRange, + + pub is_snippet: bool, } #[derive(Debug, Serialize, Deserialize)] diff --git a/crates/pgt_completions/src/providers/functions.rs b/crates/pgt_completions/src/providers/functions.rs index 6bc04deb..ed5afd6a 100644 --- a/crates/pgt_completions/src/providers/functions.rs +++ b/crates/pgt_completions/src/providers/functions.rs @@ -1,7 +1,10 @@ +use pgt_schema_cache::Function; + use crate::{ - CompletionItemKind, + CompletionItemKind, CompletionText, builder::{CompletionBuilder, PossibleCompletionItem}, context::CompletionContext, + providers::helper::get_range_to_replace, relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; @@ -19,17 +22,46 @@ pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut Completi filter: CompletionFilter::from(relevance), description: format!("Schema: {}", func.schema), kind: CompletionItemKind::Function, - completion_text: get_completion_text_with_schema_or_alias( - ctx, - &func.name, - &func.schema, - ), + completion_text: Some(get_completion_text(ctx, func)), }; builder.add_item(item); } } +fn get_completion_text(ctx: &CompletionContext, func: &Function) -> CompletionText { + let range = get_range_to_replace(ctx); + let mut text = get_completion_text_with_schema_or_alias(ctx, &func.name, &func.schema) + .map(|ct| ct.text) + .unwrap_or(func.name.to_string()); + + if ctx.is_invocation { + CompletionText { + text, + range, + is_snippet: false, + } + } else { + text.push('('); + + let num_args = func.args.args.len(); + for (idx, arg) in func.args.args.iter().enumerate() { + text.push_str(format!(r#"${{{}:{}}}"#, idx + 1, arg.name).as_str()); + if idx < num_args - 1 { + text.push_str(", "); + } + } + + text.push(')'); + + CompletionText { + text, + range, + is_snippet: num_args > 0, + } + } +} + #[cfg(test)] mod tests { use crate::{ diff --git a/crates/pgt_completions/src/providers/helper.rs b/crates/pgt_completions/src/providers/helper.rs index eacb8314..811125bd 100644 --- a/crates/pgt_completions/src/providers/helper.rs +++ b/crates/pgt_completions/src/providers/helper.rs @@ -34,7 +34,9 @@ pub(crate) fn get_completion_text_with_schema_or_alias( item_name: &str, schema_or_alias_name: &str, ) -> Option { - if schema_or_alias_name == "public" || ctx.schema_or_alias_name.is_some() { + let is_already_prefixed_with_schema_name = ctx.schema_or_alias_name.is_some(); + + if schema_or_alias_name == "public" || is_already_prefixed_with_schema_name { None } else { let range = get_range_to_replace(ctx); @@ -42,6 +44,7 @@ pub(crate) fn get_completion_text_with_schema_or_alias( Some(CompletionText { text: format!("{}.{}", schema_or_alias_name, item_name), range, + is_snippet: false, }) } } diff --git a/crates/pgt_completions/src/providers/policies.rs b/crates/pgt_completions/src/providers/policies.rs index 2421f1f1..53aff32d 100644 --- a/crates/pgt_completions/src/providers/policies.rs +++ b/crates/pgt_completions/src/providers/policies.rs @@ -25,6 +25,7 @@ pub fn complete_policies<'a>(ctx: &CompletionContext<'a>, builder: &mut Completi let range = get_range_to_replace(ctx); Some(CompletionText { text: pol.name.clone(), + is_snippet: false, range: TextRange::new( range.start() + TextSize::new(1), range.end() - TextSize::new(1), @@ -34,6 +35,7 @@ pub fn complete_policies<'a>(ctx: &CompletionContext<'a>, builder: &mut Completi // If we aren't within quotes, we want to complete the // full policy including quotation marks. Some(CompletionText { + is_snippet: false, text: format!("\"{}\"", pol.name), range: get_range_to_replace(ctx), }) diff --git a/crates/pgt_lsp/src/handlers/completions.rs b/crates/pgt_lsp/src/handlers/completions.rs index ee13b26e..1627632d 100644 --- a/crates/pgt_lsp/src/handlers/completions.rs +++ b/crates/pgt_lsp/src/handlers/completions.rs @@ -5,7 +5,9 @@ use crate::{ }; use anyhow::Result; use pgt_workspace::{WorkspaceError, features::completions::GetCompletionsParams}; -use tower_lsp::lsp_types::{self, CompletionItem, CompletionItemLabelDetails, TextEdit}; +use tower_lsp::lsp_types::{ + self, CompletionItem, CompletionItemLabelDetails, InsertTextFormat, TextEdit, +}; #[tracing::instrument(level = "debug", skip(session), err)] pub fn get_completions( @@ -43,6 +45,11 @@ pub fn get_completions( }), preselect: Some(i.preselected), sort_text: Some(i.sort_text), + insert_text_format: if i.completion_text.as_ref().is_some_and(|c| c.is_snippet) { + Some(InsertTextFormat::SNIPPET) + } else { + Some(InsertTextFormat::PLAIN_TEXT) + }, text_edit: i.completion_text.map(|c| { lsp_types::CompletionTextEdit::Edit(TextEdit { new_text: c.text, diff --git a/crates/pgt_workspace/src/workspace/server/sql_function.rs b/crates/pgt_workspace/src/workspace/server/sql_function.rs index 48f91ef4..bc2c6c3b 100644 --- a/crates/pgt_workspace/src/workspace/server/sql_function.rs +++ b/crates/pgt_workspace/src/workspace/server/sql_function.rs @@ -15,6 +15,7 @@ pub struct SQLFunctionArg { #[derive(Debug, Clone)] pub struct SQLFunctionSignature { + #[allow(dead_code)] pub schema: Option, pub name: String, pub args: Vec, From 2f08a674592353d2489441f09d6a186ea40f342a Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 24 May 2025 19:32:32 +0200 Subject: [PATCH 058/114] feat(completions): show data type for columns (#402) --- ...b0ce33a0002d170200d77afeee60a7977278.json} | 20 ++++++++++++------- crates/pgt_completions/src/builder.rs | 2 ++ crates/pgt_completions/src/item.rs | 1 + .../pgt_completions/src/providers/columns.rs | 1 + .../src/providers/functions.rs | 1 + .../pgt_completions/src/providers/policies.rs | 1 + .../pgt_completions/src/providers/schemas.rs | 1 + .../pgt_completions/src/providers/tables.rs | 1 + crates/pgt_lsp/src/handlers/completions.rs | 5 ++++- crates/pgt_schema_cache/src/columns.rs | 1 + .../pgt_schema_cache/src/queries/columns.sql | 2 ++ 11 files changed, 28 insertions(+), 8 deletions(-) rename .sqlx/{query-fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45.json => query-97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278.json} (67%) diff --git a/.sqlx/query-fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45.json b/.sqlx/query-97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278.json similarity index 67% rename from .sqlx/query-fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45.json rename to .sqlx/query-97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278.json index 01043a69..924369cd 100644 --- a/.sqlx/query-fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45.json +++ b/.sqlx/query-97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "with\n available_tables as (\n select\n c.relname as table_name,\n c.oid as table_oid,\n c.relkind as class_kind,\n n.nspname as schema_name\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n on n.oid = c.relnamespace\n where\n -- r: normal tables\n -- v: views\n -- m: materialized views\n -- f: foreign tables\n -- p: partitioned tables\n c.relkind in ('r', 'v', 'm', 'f', 'p')\n ),\n available_indexes as (\n select\n unnest (ix.indkey) as attnum,\n ix.indisprimary as is_primary,\n ix.indisunique as is_unique,\n ix.indrelid as table_oid\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_index ix on c.oid = ix.indexrelid\n where\n c.relkind = 'i'\n )\nselect\n atts.attname as name,\n ts.table_name,\n ts.table_oid :: int8 as \"table_oid!\",\n ts.class_kind :: char as \"class_kind!\",\n ts.schema_name,\n atts.atttypid :: int8 as \"type_id!\",\n not atts.attnotnull as \"is_nullable!\",\n nullif(\n information_schema._pg_char_max_length (atts.atttypid, atts.atttypmod),\n -1\n ) as varchar_length,\n pg_get_expr (def.adbin, def.adrelid) as default_expr,\n coalesce(ix.is_primary, false) as \"is_primary_key!\",\n coalesce(ix.is_unique, false) as \"is_unique!\",\n pg_catalog.col_description (ts.table_oid, atts.attnum) as comment\nfrom\n pg_catalog.pg_attribute atts\n join available_tables ts on atts.attrelid = ts.table_oid\n left join available_indexes ix on atts.attrelid = ix.table_oid\n and atts.attnum = ix.attnum\n left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid\n and atts.attnum = def.adnum\nwhere\n -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s\n atts.attnum >= 0\norder by\n schema_name desc,\n table_name,\n atts.attnum;", + "query": "with\n available_tables as (\n select\n c.relname as table_name,\n c.oid as table_oid,\n c.relkind as class_kind,\n n.nspname as schema_name\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n on n.oid = c.relnamespace\n where\n -- r: normal tables\n -- v: views\n -- m: materialized views\n -- f: foreign tables\n -- p: partitioned tables\n c.relkind in ('r', 'v', 'm', 'f', 'p')\n ),\n available_indexes as (\n select\n unnest (ix.indkey) as attnum,\n ix.indisprimary as is_primary,\n ix.indisunique as is_unique,\n ix.indrelid as table_oid\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_index ix on c.oid = ix.indexrelid\n where\n c.relkind = 'i'\n )\nselect\n atts.attname as name,\n ts.table_name,\n ts.table_oid :: int8 as \"table_oid!\",\n ts.class_kind :: char as \"class_kind!\",\n ts.schema_name,\n atts.atttypid :: int8 as \"type_id!\",\n tps.typname as \"type_name\",\n not atts.attnotnull as \"is_nullable!\",\n nullif(\n information_schema._pg_char_max_length (atts.atttypid, atts.atttypmod),\n -1\n ) as varchar_length,\n pg_get_expr (def.adbin, def.adrelid) as default_expr,\n coalesce(ix.is_primary, false) as \"is_primary_key!\",\n coalesce(ix.is_unique, false) as \"is_unique!\",\n pg_catalog.col_description (ts.table_oid, atts.attnum) as comment\nfrom\n pg_catalog.pg_attribute atts\n join available_tables ts on atts.attrelid = ts.table_oid\n left join available_indexes ix on atts.attrelid = ix.table_oid\n and atts.attnum = ix.attnum\n left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid\n and atts.attnum = def.adnum\n left join pg_catalog.pg_type tps on tps.oid = atts.atttypid\nwhere\n -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s\n atts.attnum >= 0\norder by\n schema_name desc,\n table_name,\n atts.attnum;", "describe": { "columns": [ { @@ -35,31 +35,36 @@ }, { "ordinal": 6, + "name": "type_name", + "type_info": "Name" + }, + { + "ordinal": 7, "name": "is_nullable!", "type_info": "Bool" }, { - "ordinal": 7, + "ordinal": 8, "name": "varchar_length", "type_info": "Int4" }, { - "ordinal": 8, + "ordinal": 9, "name": "default_expr", "type_info": "Text" }, { - "ordinal": 9, + "ordinal": 10, "name": "is_primary_key!", "type_info": "Bool" }, { - "ordinal": 10, + "ordinal": 11, "name": "is_unique!", "type_info": "Bool" }, { - "ordinal": 11, + "ordinal": 12, "name": "comment", "type_info": "Text" } @@ -74,6 +79,7 @@ null, false, null, + false, null, null, null, @@ -82,5 +88,5 @@ null ] }, - "hash": "fc0a0aa6d2a06bf3103d26a0233ae86f456892fa9ce48854a8b960cdf2d11a45" + "hash": "97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278" } diff --git a/crates/pgt_completions/src/builder.rs b/crates/pgt_completions/src/builder.rs index 40d29db4..96576053 100644 --- a/crates/pgt_completions/src/builder.rs +++ b/crates/pgt_completions/src/builder.rs @@ -12,6 +12,7 @@ pub(crate) struct PossibleCompletionItem<'a> { pub score: CompletionScore<'a>, pub filter: CompletionFilter<'a>, pub completion_text: Option, + pub detail: Option, } pub(crate) struct CompletionBuilder<'a> { @@ -70,6 +71,7 @@ impl<'a> CompletionBuilder<'a> { kind: item.kind, label: item.label, preselected, + detail: item.detail, // wonderous Rust syntax ftw sort_text: format!("{:0>padding$}", idx, padding = max_padding), diff --git a/crates/pgt_completions/src/item.rs b/crates/pgt_completions/src/item.rs index 59b7b371..73e08cc0 100644 --- a/crates/pgt_completions/src/item.rs +++ b/crates/pgt_completions/src/item.rs @@ -54,6 +54,7 @@ pub struct CompletionItem { pub kind: CompletionItemKind, /// String used for sorting by LSP clients. pub sort_text: String, + pub detail: Option, pub completion_text: Option, } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 8109ba83..1df6581d 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -20,6 +20,7 @@ pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut Completio description: format!("Table: {}.{}", col.schema_name, col.table_name), kind: CompletionItemKind::Column, completion_text: None, + detail: Some(col.type_name.to_string()), }; // autocomplete with the alias in a join clause if we find one diff --git a/crates/pgt_completions/src/providers/functions.rs b/crates/pgt_completions/src/providers/functions.rs index ed5afd6a..f1b57e8c 100644 --- a/crates/pgt_completions/src/providers/functions.rs +++ b/crates/pgt_completions/src/providers/functions.rs @@ -22,6 +22,7 @@ pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut Completi filter: CompletionFilter::from(relevance), description: format!("Schema: {}", func.schema), kind: CompletionItemKind::Function, + detail: None, completion_text: Some(get_completion_text(ctx, func)), }; diff --git a/crates/pgt_completions/src/providers/policies.rs b/crates/pgt_completions/src/providers/policies.rs index 53aff32d..a4d3a9bb 100644 --- a/crates/pgt_completions/src/providers/policies.rs +++ b/crates/pgt_completions/src/providers/policies.rs @@ -50,6 +50,7 @@ pub fn complete_policies<'a>(ctx: &CompletionContext<'a>, builder: &mut Completi description: pol.table_name.to_string(), kind: CompletionItemKind::Policy, completion_text, + detail: None, }; builder.add_item(item); diff --git a/crates/pgt_completions/src/providers/schemas.rs b/crates/pgt_completions/src/providers/schemas.rs index aaa5ebe6..02d2fd0c 100644 --- a/crates/pgt_completions/src/providers/schemas.rs +++ b/crates/pgt_completions/src/providers/schemas.rs @@ -16,6 +16,7 @@ pub fn complete_schemas<'a>(ctx: &'a CompletionContext, builder: &mut Completion kind: crate::CompletionItemKind::Schema, score: CompletionScore::from(relevance.clone()), filter: CompletionFilter::from(relevance), + detail: None, completion_text: None, }; diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index 57195da7..e028e22b 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -19,6 +19,7 @@ pub fn complete_tables<'a>(ctx: &'a CompletionContext, builder: &mut CompletionB filter: CompletionFilter::from(relevance), description: format!("Schema: {}", table.schema), kind: CompletionItemKind::Table, + detail: None, completion_text: get_completion_text_with_schema_or_alias( ctx, &table.name, diff --git a/crates/pgt_lsp/src/handlers/completions.rs b/crates/pgt_lsp/src/handlers/completions.rs index 1627632d..7e901c79 100644 --- a/crates/pgt_lsp/src/handlers/completions.rs +++ b/crates/pgt_lsp/src/handlers/completions.rs @@ -41,7 +41,10 @@ pub fn get_completions( label: i.label, label_details: Some(CompletionItemLabelDetails { description: Some(i.description), - detail: Some(format!(" {}", i.kind)), + detail: i + .detail + .map(|s| format!(" {}", s)) + .or(Some(format!(" {}", i.kind))), }), preselect: Some(i.preselected), sort_text: Some(i.sort_text), diff --git a/crates/pgt_schema_cache/src/columns.rs b/crates/pgt_schema_cache/src/columns.rs index de7c2d4a..0eb64cc6 100644 --- a/crates/pgt_schema_cache/src/columns.rs +++ b/crates/pgt_schema_cache/src/columns.rs @@ -48,6 +48,7 @@ pub struct Column { pub schema_name: String, pub type_id: i64, + pub type_name: String, pub is_nullable: bool, pub is_primary_key: bool, diff --git a/crates/pgt_schema_cache/src/queries/columns.sql b/crates/pgt_schema_cache/src/queries/columns.sql index 86df7cf4..d0c09cd0 100644 --- a/crates/pgt_schema_cache/src/queries/columns.sql +++ b/crates/pgt_schema_cache/src/queries/columns.sql @@ -35,6 +35,7 @@ select ts.class_kind :: char as "class_kind!", ts.schema_name, atts.atttypid :: int8 as "type_id!", + tps.typname as "type_name", not atts.attnotnull as "is_nullable!", nullif( information_schema._pg_char_max_length (atts.atttypid, atts.atttypmod), @@ -51,6 +52,7 @@ from and atts.attnum = ix.attnum left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid and atts.attnum = def.adnum + left join pg_catalog.pg_type tps on tps.oid = atts.atttypid where -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s atts.attnum >= 0 From 74f913ece6587a7adc370ecaae60a437214cc2f0 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 24 May 2025 19:32:43 +0200 Subject: [PATCH 059/114] chore(schema_cache): add query for roles (#404) --- ...300a7e131d2fb17fc74bd7f40a60b68df56c0.json | 44 ++++++++++ crates/pgt_schema_cache/src/lib.rs | 2 + crates/pgt_schema_cache/src/queries/roles.sql | 7 ++ crates/pgt_schema_cache/src/roles.rs | 85 +++++++++++++++++++ crates/pgt_schema_cache/src/schema_cache.rs | 7 +- 5 files changed, 143 insertions(+), 2 deletions(-) create mode 100644 .sqlx/query-5e12c1d242ea9fcc68c20807b72300a7e131d2fb17fc74bd7f40a60b68df56c0.json create mode 100644 crates/pgt_schema_cache/src/queries/roles.sql create mode 100644 crates/pgt_schema_cache/src/roles.rs diff --git a/.sqlx/query-5e12c1d242ea9fcc68c20807b72300a7e131d2fb17fc74bd7f40a60b68df56c0.json b/.sqlx/query-5e12c1d242ea9fcc68c20807b72300a7e131d2fb17fc74bd7f40a60b68df56c0.json new file mode 100644 index 00000000..fa456c71 --- /dev/null +++ b/.sqlx/query-5e12c1d242ea9fcc68c20807b72300a7e131d2fb17fc74bd7f40a60b68df56c0.json @@ -0,0 +1,44 @@ +{ + "db_name": "PostgreSQL", + "query": "select \n rolname as \"name!\", \n rolsuper as \"is_super_user!\", \n rolcreatedb as \"can_create_db!\", \n rolcanlogin as \"can_login!\",\n rolbypassrls as \"can_bypass_rls!\"\nfrom pg_catalog.pg_roles;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name!", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "is_super_user!", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "can_create_db!", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "can_login!", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "can_bypass_rls!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true, + true, + true, + true + ] + }, + "hash": "5e12c1d242ea9fcc68c20807b72300a7e131d2fb17fc74bd7f40a60b68df56c0" +} diff --git a/crates/pgt_schema_cache/src/lib.rs b/crates/pgt_schema_cache/src/lib.rs index d9be527d..186fbdb9 100644 --- a/crates/pgt_schema_cache/src/lib.rs +++ b/crates/pgt_schema_cache/src/lib.rs @@ -5,6 +5,7 @@ mod columns; mod functions; mod policies; +mod roles; mod schema_cache; mod schemas; mod tables; @@ -15,6 +16,7 @@ mod versions; pub use columns::*; pub use functions::{Behavior, Function, FunctionArg, FunctionArgs}; pub use policies::{Policy, PolicyCommand}; +pub use roles::*; pub use schema_cache::SchemaCache; pub use schemas::Schema; pub use tables::{ReplicaIdentity, Table}; diff --git a/crates/pgt_schema_cache/src/queries/roles.sql b/crates/pgt_schema_cache/src/queries/roles.sql new file mode 100644 index 00000000..da5d0bfc --- /dev/null +++ b/crates/pgt_schema_cache/src/queries/roles.sql @@ -0,0 +1,7 @@ +select + rolname as "name!", + rolsuper as "is_super_user!", + rolcreatedb as "can_create_db!", + rolcanlogin as "can_login!", + rolbypassrls as "can_bypass_rls!" +from pg_catalog.pg_roles; \ No newline at end of file diff --git a/crates/pgt_schema_cache/src/roles.rs b/crates/pgt_schema_cache/src/roles.rs new file mode 100644 index 00000000..c212b791 --- /dev/null +++ b/crates/pgt_schema_cache/src/roles.rs @@ -0,0 +1,85 @@ +use crate::schema_cache::SchemaCacheItem; + +#[derive(Debug, PartialEq, Eq)] +pub struct Role { + pub name: String, + pub is_super_user: bool, + pub can_create_db: bool, + pub can_login: bool, + pub can_bypass_rls: bool, +} + +impl SchemaCacheItem for Role { + type Item = Role; + + async fn load(pool: &sqlx::PgPool) -> Result, sqlx::Error> { + sqlx::query_file_as!(Role, "src/queries/roles.sql") + .fetch_all(pool) + .await + } +} + +#[cfg(test)] +mod tests { + use crate::SchemaCache; + use pgt_test_utils::test_database::get_new_test_db; + use sqlx::Executor; + + #[tokio::test] + async fn loads_roles() { + let test_db = get_new_test_db().await; + + let setup = r#" + do $$ + begin + if not exists ( + select from pg_catalog.pg_roles + where rolname = 'test_super' + ) then + create role test_super superuser createdb login bypassrls; + end if; + if not exists ( + select from pg_catalog.pg_roles + where rolname = 'test_nologin' + ) then + create role test_nologin; + end if; + if not exists ( + select from pg_catalog.pg_roles + where rolname = 'test_login' + ) then + create role test_login login; + end if; + end $$; + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + let cache = SchemaCache::load(&test_db) + .await + .expect("Failed to load Schema Cache"); + + let roles = &cache.roles; + + let super_role = roles.iter().find(|r| r.name == "test_super").unwrap(); + assert!(super_role.is_super_user); + assert!(super_role.can_create_db); + assert!(super_role.can_login); + assert!(super_role.can_bypass_rls); + + let nologin_role = roles.iter().find(|r| r.name == "test_nologin").unwrap(); + assert!(!nologin_role.is_super_user); + assert!(!nologin_role.can_create_db); + assert!(!nologin_role.can_login); + assert!(!nologin_role.can_bypass_rls); + + let login_role = roles.iter().find(|r| r.name == "test_login").unwrap(); + assert!(!login_role.is_super_user); + assert!(!login_role.can_create_db); + assert!(login_role.can_login); + assert!(!login_role.can_bypass_rls); + } +} diff --git a/crates/pgt_schema_cache/src/schema_cache.rs b/crates/pgt_schema_cache/src/schema_cache.rs index b21d2baf..516b37e6 100644 --- a/crates/pgt_schema_cache/src/schema_cache.rs +++ b/crates/pgt_schema_cache/src/schema_cache.rs @@ -1,6 +1,5 @@ use sqlx::postgres::PgPool; -use crate::Trigger; use crate::columns::Column; use crate::functions::Function; use crate::policies::Policy; @@ -8,6 +7,7 @@ use crate::schemas::Schema; use crate::tables::Table; use crate::types::PostgresType; use crate::versions::Version; +use crate::{Role, Trigger}; #[derive(Debug, Default)] pub struct SchemaCache { @@ -19,11 +19,12 @@ pub struct SchemaCache { pub columns: Vec, pub policies: Vec, pub triggers: Vec, + pub roles: Vec, } impl SchemaCache { pub async fn load(pool: &PgPool) -> Result { - let (schemas, tables, functions, types, versions, columns, policies, triggers) = futures_util::try_join!( + let (schemas, tables, functions, types, versions, columns, policies, triggers, roles) = futures_util::try_join!( Schema::load(pool), Table::load(pool), Function::load(pool), @@ -32,6 +33,7 @@ impl SchemaCache { Column::load(pool), Policy::load(pool), Trigger::load(pool), + Role::load(pool) )?; Ok(SchemaCache { @@ -43,6 +45,7 @@ impl SchemaCache { columns, policies, triggers, + roles, }) } From 62ce45d4cf3e1850a08487e323e283901e00a38c Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 24 May 2025 19:33:00 +0200 Subject: [PATCH 060/114] feats(completions): complete insert, drop/alter table, ignore many situations, improve WHERE (#400) --- crates/pgt_completions/src/context/mod.rs | 348 ++++++++++++++---- .../pgt_completions/src/providers/columns.rs | 160 +++++++- .../pgt_completions/src/providers/tables.rs | 119 ++++++ .../src/relevance/filtering.rs | 155 +++++--- crates/pgt_completions/src/sanitization.rs | 137 ++++++- crates/pgt_lsp/src/capabilities.rs | 2 +- .../src/queries/insert_columns.rs | 150 ++++++++ .../pgt_treesitter_queries/src/queries/mod.rs | 21 ++ .../src/queries/relations.rs | 106 ++++++ .../src/queries/where_columns.rs | 96 +++++ 10 files changed, 1163 insertions(+), 131 deletions(-) create mode 100644 crates/pgt_treesitter_queries/src/queries/insert_columns.rs create mode 100644 crates/pgt_treesitter_queries/src/queries/where_columns.rs diff --git a/crates/pgt_completions/src/context/mod.rs b/crates/pgt_completions/src/context/mod.rs index fec2e2d9..0bb190a9 100644 --- a/crates/pgt_completions/src/context/mod.rs +++ b/crates/pgt_completions/src/context/mod.rs @@ -1,7 +1,9 @@ +use std::{ + cmp, + collections::{HashMap, HashSet}, +}; mod policy_parser; -use std::collections::{HashMap, HashSet}; - use pgt_schema_cache::SchemaCache; use pgt_text_size::TextRange; use pgt_treesitter_queries::{ @@ -15,7 +17,7 @@ use crate::{ sanitization::SanitizedCompletionParams, }; -#[derive(Debug, PartialEq, Eq, Hash)] +#[derive(Debug, PartialEq, Eq, Hash, Clone)] pub enum WrappingClause<'a> { Select, Where, @@ -25,11 +27,15 @@ pub enum WrappingClause<'a> { }, Update, Delete, + ColumnDefinitions, + Insert, + AlterTable, + DropTable, PolicyName, ToRoleAssignment, } -#[derive(PartialEq, Eq, Hash, Debug)] +#[derive(PartialEq, Eq, Hash, Debug, Clone)] pub(crate) struct MentionedColumn { pub(crate) column: String, pub(crate) alias: Option, @@ -48,6 +54,7 @@ pub enum WrappingNode { Relation, BinaryExpression, Assignment, + List, } #[derive(Debug)] @@ -97,6 +104,7 @@ impl TryFrom<&str> for WrappingNode { "relation" => Ok(Self::Relation), "assignment" => Ok(Self::Assignment), "binary_expression" => Ok(Self::BinaryExpression), + "list" => Ok(Self::List), _ => { let message = format!("Unimplemented Relation: {}", value); @@ -118,6 +126,7 @@ impl TryFrom for WrappingNode { } } +#[derive(Debug)] pub(crate) struct CompletionContext<'a> { pub node_under_cursor: Option>, @@ -152,9 +161,6 @@ pub(crate) struct CompletionContext<'a> { pub is_invocation: bool, pub wrapping_statement_range: Option, - /// Some incomplete statements can't be correctly parsed by TreeSitter. - pub is_in_error_node: bool, - pub mentioned_relations: HashMap, HashSet>, pub mentioned_table_aliases: HashMap, pub mentioned_columns: HashMap>, HashSet>, @@ -176,7 +182,6 @@ impl<'a> CompletionContext<'a> { mentioned_relations: HashMap::new(), mentioned_table_aliases: HashMap::new(), mentioned_columns: HashMap::new(), - is_in_error_node: false, }; // policy handling is important to Supabase, but they are a PostgreSQL specific extension, @@ -231,6 +236,8 @@ impl<'a> CompletionContext<'a> { executor.add_query_results::(); executor.add_query_results::(); executor.add_query_results::(); + executor.add_query_results::(); + executor.add_query_results::(); for relation_match in executor.get_iter(stmt_range) { match relation_match { @@ -238,37 +245,61 @@ impl<'a> CompletionContext<'a> { let schema_name = r.get_schema(sql); let table_name = r.get_table(sql); - if let Some(c) = self.mentioned_relations.get_mut(&schema_name) { - c.insert(table_name); - } else { - let mut new = HashSet::new(); - new.insert(table_name); - self.mentioned_relations.insert(schema_name, new); - } + self.mentioned_relations + .entry(schema_name) + .and_modify(|s| { + s.insert(table_name.clone()); + }) + .or_insert(HashSet::from([table_name])); } + QueryResult::TableAliases(table_alias_match) => { self.mentioned_table_aliases.insert( table_alias_match.get_alias(sql), table_alias_match.get_table(sql), ); } + QueryResult::SelectClauseColumns(c) => { let mentioned = MentionedColumn { column: c.get_column(sql), alias: c.get_alias(sql), }; - if let Some(cols) = self - .mentioned_columns - .get_mut(&Some(WrappingClause::Select)) - { - cols.insert(mentioned); - } else { - let mut new = HashSet::new(); - new.insert(mentioned); - self.mentioned_columns - .insert(Some(WrappingClause::Select), new); - } + self.mentioned_columns + .entry(Some(WrappingClause::Select)) + .and_modify(|s| { + s.insert(mentioned.clone()); + }) + .or_insert(HashSet::from([mentioned])); + } + + QueryResult::WhereClauseColumns(c) => { + let mentioned = MentionedColumn { + column: c.get_column(sql), + alias: c.get_alias(sql), + }; + + self.mentioned_columns + .entry(Some(WrappingClause::Where)) + .and_modify(|s| { + s.insert(mentioned.clone()); + }) + .or_insert(HashSet::from([mentioned])); + } + + QueryResult::InsertClauseColumns(c) => { + let mentioned = MentionedColumn { + column: c.get_column(sql), + alias: None, + }; + + self.mentioned_columns + .entry(Some(WrappingClause::Insert)) + .and_modify(|s| { + s.insert(mentioned.clone()); + }) + .or_insert(HashSet::from([mentioned])); } _ => {} }; @@ -317,10 +348,20 @@ impl<'a> CompletionContext<'a> { * `select * from use {}` becomes `select * from use{}`. */ let current_node = cursor.node(); - while cursor.goto_first_child_for_byte(self.position).is_none() && self.position > 0 { - self.position -= 1; + + let mut chars = self.text.chars(); + + if chars + .nth(self.position) + .is_some_and(|c| !c.is_ascii_whitespace() && !&[';', ')'].contains(&c)) + { + self.position = cmp::min(self.position + 1, self.text.len()); + } else { + self.position = cmp::min(self.position, self.text.len()); } + cursor.goto_first_child_for_byte(self.position); + self.gather_context_from_node(cursor, current_node); } @@ -334,8 +375,9 @@ impl<'a> CompletionContext<'a> { let parent_node_kind = parent_node.kind(); let current_node_kind = current_node.kind(); - // prevent infinite recursion – this can happen if we only have a PROGRAM node - if current_node_kind == parent_node_kind { + // prevent infinite recursion – this can happen with ERROR nodes + if current_node_kind == parent_node_kind && ["ERROR", "program"].contains(&parent_node_kind) + { self.node_under_cursor = Some(NodeUnderCursor::from(current_node)); return; } @@ -352,25 +394,17 @@ impl<'a> CompletionContext<'a> { } // try to gather context from the siblings if we're within an error node. - if self.is_in_error_node { - let mut next_sibling = current_node.next_named_sibling(); - while let Some(n) = next_sibling { - if let Some(clause_type) = self.get_wrapping_clause_from_keyword_node(n) { - self.wrapping_clause_type = Some(clause_type); - break; - } else { - next_sibling = n.next_named_sibling(); - } + if parent_node_kind == "ERROR" { + if let Some(clause_type) = self.get_wrapping_clause_from_error_node_child(current_node) + { + self.wrapping_clause_type = Some(clause_type); } - let mut prev_sibling = current_node.prev_named_sibling(); - while let Some(n) = prev_sibling { - if let Some(clause_type) = self.get_wrapping_clause_from_keyword_node(n) { - self.wrapping_clause_type = Some(clause_type); - break; - } else { - prev_sibling = n.prev_named_sibling(); - } + if let Some(wrapping_node) = self.get_wrapping_node_from_error_node_child(current_node) + { + self.wrapping_node_kind = Some(wrapping_node) } + + self.get_info_from_error_node_child(current_node); } match current_node_kind { @@ -389,7 +423,8 @@ impl<'a> CompletionContext<'a> { } } - "where" | "update" | "select" | "delete" | "from" | "join" => { + "where" | "update" | "select" | "delete" | "from" | "join" | "column_definitions" + | "drop_table" | "alter_table" => { self.wrapping_clause_type = self.get_wrapping_clause_from_current_node(current_node, &mut cursor); } @@ -398,8 +433,13 @@ impl<'a> CompletionContext<'a> { self.wrapping_node_kind = current_node_kind.try_into().ok(); } - "ERROR" => { - self.is_in_error_node = true; + "list" => { + if current_node + .prev_sibling() + .is_none_or(|n| n.kind() != "keyword_values") + { + self.wrapping_node_kind = current_node_kind.try_into().ok(); + } } _ => {} @@ -415,31 +455,165 @@ impl<'a> CompletionContext<'a> { self.gather_context_from_node(cursor, current_node); } - fn get_wrapping_clause_from_keyword_node( + fn get_first_sibling(&self, node: tree_sitter::Node<'a>) -> tree_sitter::Node<'a> { + let mut first_sibling = node; + while let Some(n) = first_sibling.prev_sibling() { + first_sibling = n; + } + first_sibling + } + + fn get_wrapping_node_from_error_node_child( + &self, + node: tree_sitter::Node<'a>, + ) -> Option { + self.wrapping_clause_type + .as_ref() + .and_then(|clause| match clause { + WrappingClause::Insert => { + let mut first_sib = self.get_first_sibling(node); + + let mut after_opening_bracket = false; + let mut before_closing_bracket = false; + + while let Some(next_sib) = first_sib.next_sibling() { + if next_sib.kind() == "(" + && next_sib.end_position() <= node.start_position() + { + after_opening_bracket = true; + } + + if next_sib.kind() == ")" + && next_sib.start_position() >= node.end_position() + { + before_closing_bracket = true; + } + + first_sib = next_sib; + } + + if after_opening_bracket && before_closing_bracket { + Some(WrappingNode::List) + } else { + None + } + } + _ => None, + }) + } + + fn get_wrapping_clause_from_error_node_child( &self, node: tree_sitter::Node<'a>, ) -> Option> { - if node.kind().starts_with("keyword_") { - if let Some(txt) = self.get_ts_node_content(&node).and_then(|txt| match txt { - NodeText::Original(txt) => Some(txt), - NodeText::Replaced => None, - }) { - match txt.as_str() { - "where" => return Some(WrappingClause::Where), - "update" => return Some(WrappingClause::Update), - "select" => return Some(WrappingClause::Select), - "delete" => return Some(WrappingClause::Delete), - "from" => return Some(WrappingClause::From), - "join" => { - // TODO: not sure if we can infer it here. - return Some(WrappingClause::Join { on_node: None }); + let clause_combinations: Vec<(WrappingClause, &[&'static str])> = vec![ + (WrappingClause::Where, &["where"]), + (WrappingClause::Update, &["update"]), + (WrappingClause::Select, &["select"]), + (WrappingClause::Delete, &["delete"]), + (WrappingClause::Insert, &["insert", "into"]), + (WrappingClause::From, &["from"]), + (WrappingClause::Join { on_node: None }, &["join"]), + (WrappingClause::AlterTable, &["alter", "table"]), + ( + WrappingClause::AlterTable, + &["alter", "table", "if", "exists"], + ), + (WrappingClause::DropTable, &["drop", "table"]), + ( + WrappingClause::DropTable, + &["drop", "table", "if", "exists"], + ), + ]; + + let first_sibling = self.get_first_sibling(node); + + /* + * For each clause, we'll iterate from first_sibling to the next ones, + * either until the end or until we land on the node under the cursor. + * We'll score the `WrappingClause` by how many tokens it matches in order. + */ + let mut clauses_with_score: Vec<(WrappingClause, usize)> = clause_combinations + .into_iter() + .map(|(clause, tokens)| { + let mut idx = 0; + + let mut sibling = Some(first_sibling); + while let Some(sib) = sibling { + if sib.end_byte() >= node.end_byte() || idx >= tokens.len() { + break; + } + + if let Some(sibling_content) = + self.get_ts_node_content(&sib).and_then(|txt| match txt { + NodeText::Original(txt) => Some(txt), + NodeText::Replaced => None, + }) + { + if sibling_content == tokens[idx] { + idx += 1; + } + } else { + break; } - _ => {} + + sibling = sib.next_sibling(); } - }; - } - None + (clause, idx) + }) + .collect(); + + clauses_with_score.sort_by(|(_, score_a), (_, score_b)| score_b.cmp(score_a)); + clauses_with_score + .iter() + .find(|(_, score)| *score > 0) + .map(|c| c.0.clone()) + } + + fn get_info_from_error_node_child(&mut self, node: tree_sitter::Node<'a>) { + let mut first_sibling = self.get_first_sibling(node); + + if let Some(clause) = self.wrapping_clause_type.as_ref() { + if clause == &WrappingClause::Insert { + while let Some(sib) = first_sibling.next_sibling() { + match sib.kind() { + "object_reference" => { + if let Some(NodeText::Original(txt)) = self.get_ts_node_content(&sib) { + let mut iter = txt.split('.').rev(); + let table = iter.next().unwrap().to_string(); + let schema = iter.next().map(|s| s.to_string()); + self.mentioned_relations + .entry(schema) + .and_modify(|s| { + s.insert(table.clone()); + }) + .or_insert(HashSet::from([table])); + } + } + "column" => { + if let Some(NodeText::Original(txt)) = self.get_ts_node_content(&sib) { + let entry = MentionedColumn { + column: txt, + alias: None, + }; + + self.mentioned_columns + .entry(Some(WrappingClause::Insert)) + .and_modify(|s| { + s.insert(entry.clone()); + }) + .or_insert(HashSet::from([entry])); + } + } + + _ => {} + } + + first_sibling = sib; + } + } + } } fn get_wrapping_clause_from_current_node( @@ -453,6 +627,10 @@ impl<'a> CompletionContext<'a> { "select" => Some(WrappingClause::Select), "delete" => Some(WrappingClause::Delete), "from" => Some(WrappingClause::From), + "drop_table" => Some(WrappingClause::DropTable), + "alter_table" => Some(WrappingClause::AlterTable), + "column_definitions" => Some(WrappingClause::ColumnDefinitions), + "insert" => Some(WrappingClause::Insert), "join" => { // sadly, we need to manually iterate over the children – // `node.child_by_field_id(..)` does not work as expected @@ -469,6 +647,38 @@ impl<'a> CompletionContext<'a> { _ => None, } } + + pub(crate) fn parent_matches_one_of_kind(&self, kinds: &[&'static str]) -> bool { + self.node_under_cursor + .as_ref() + .is_some_and(|under_cursor| match under_cursor { + NodeUnderCursor::TsNode(node) => node + .parent() + .is_some_and(|parent| kinds.contains(&parent.kind())), + + NodeUnderCursor::CustomNode { .. } => false, + }) + } + pub(crate) fn before_cursor_matches_kind(&self, kinds: &[&'static str]) -> bool { + self.node_under_cursor.as_ref().is_some_and(|under_cursor| { + match under_cursor { + NodeUnderCursor::TsNode(node) => { + let mut current = *node; + + // move up to the parent until we're at top OR we have a prev sibling + while current.prev_sibling().is_none() && current.parent().is_some() { + current = current.parent().unwrap(); + } + + current + .prev_sibling() + .is_some_and(|sib| kinds.contains(&sib.kind())) + } + + NodeUnderCursor::CustomNode { .. } => false, + } + }) + } } #[cfg(test)] diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 1df6581d..d18f0938 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -24,7 +24,12 @@ pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut Completio }; // autocomplete with the alias in a join clause if we find one - if matches!(ctx.wrapping_clause_type, Some(WrappingClause::Join { .. })) { + if matches!( + ctx.wrapping_clause_type, + Some(WrappingClause::Join { .. }) + | Some(WrappingClause::Where) + | Some(WrappingClause::Select) + ) { item.completion_text = find_matching_alias_for_table(ctx, col.table_name.as_str()) .and_then(|alias| { get_completion_text_with_schema_or_alias(ctx, col.name.as_str(), alias.as_str()) @@ -37,11 +42,13 @@ pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut Completio #[cfg(test)] mod tests { + use std::vec; + use crate::{ CompletionItem, CompletionItemKind, complete, test_helper::{ - CURSOR_POS, CompletionAssertion, InputQuery, assert_complete_results, get_test_deps, - get_test_params, + CURSOR_POS, CompletionAssertion, InputQuery, assert_complete_results, + assert_no_complete_results, get_test_deps, get_test_params, }, }; @@ -574,4 +581,151 @@ mod tests { ) .await; } + + #[tokio::test] + async fn suggests_columns_in_insert_clause() { + let setup = r#" + create table instruments ( + id bigint primary key generated always as identity, + name text not null, + z text + ); + + create table others ( + id serial primary key, + a text, + b text + ); + "#; + + // We should prefer the instrument columns, even though they + // are lower in the alphabet + + assert_complete_results( + format!("insert into instruments ({})", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("id".to_string()), + CompletionAssertion::Label("name".to_string()), + CompletionAssertion::Label("z".to_string()), + ], + setup, + ) + .await; + + assert_complete_results( + format!("insert into instruments (id, {})", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("name".to_string()), + CompletionAssertion::Label("z".to_string()), + ], + setup, + ) + .await; + + assert_complete_results( + format!("insert into instruments (id, {}, name)", CURSOR_POS).as_str(), + vec![CompletionAssertion::Label("z".to_string())], + setup, + ) + .await; + + // works with completed statement + assert_complete_results( + format!( + "insert into instruments (name, {}) values ('my_bass');", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::Label("id".to_string()), + CompletionAssertion::Label("z".to_string()), + ], + setup, + ) + .await; + + // no completions in the values list! + assert_no_complete_results( + format!("insert into instruments (id, name) values ({})", CURSOR_POS).as_str(), + setup, + ) + .await; + } + + #[tokio::test] + async fn suggests_columns_in_where_clause() { + let setup = r#" + create table instruments ( + id bigint primary key generated always as identity, + name text not null, + z text, + created_at timestamp with time zone default now() + ); + + create table others ( + a text, + b text, + c text + ); + "#; + + assert_complete_results( + format!("select name from instruments where {} ", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::Label("created_at".into()), + CompletionAssertion::Label("id".into()), + CompletionAssertion::Label("name".into()), + CompletionAssertion::Label("z".into()), + ], + setup, + ) + .await; + + assert_complete_results( + format!( + "select name from instruments where z = 'something' and created_at > {}", + CURSOR_POS + ) + .as_str(), + // simply do not complete columns + schemas; functions etc. are ok + vec![ + CompletionAssertion::KindNotExists(CompletionItemKind::Column), + CompletionAssertion::KindNotExists(CompletionItemKind::Schema), + ], + setup, + ) + .await; + + // prefers not mentioned columns + assert_complete_results( + format!( + "select name from instruments where id = 'something' and {}", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::Label("created_at".into()), + CompletionAssertion::Label("name".into()), + CompletionAssertion::Label("z".into()), + ], + setup, + ) + .await; + + // // uses aliases + assert_complete_results( + format!( + "select name from instruments i join others o on i.z = o.a where i.{}", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::Label("created_at".into()), + CompletionAssertion::Label("id".into()), + CompletionAssertion::Label("name".into()), + ], + setup, + ) + .await; + } } diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index e028e22b..6ed3760e 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -311,4 +311,123 @@ mod tests { ) .await; } + + #[tokio::test] + async fn suggests_tables_in_alter_and_drop_statements() { + let setup = r#" + create schema auth; + + create table auth.users ( + uid serial primary key, + name text not null, + email text unique not null + ); + + create table auth.posts ( + pid serial primary key, + user_id int not null references auth.users(uid), + title text not null, + content text, + created_at timestamp default now() + ); + "#; + + assert_complete_results( + format!("alter table {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), + CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), + ], + setup, + ) + .await; + + assert_complete_results( + format!("alter table if exists {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), + CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), + ], + setup, + ) + .await; + + assert_complete_results( + format!("drop table {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), + CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), + ], + setup, + ) + .await; + + assert_complete_results( + format!("drop table if exists {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), // self-join + CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), + ], + setup, + ) + .await; + } + + #[tokio::test] + async fn suggests_tables_in_insert_into() { + let setup = r#" + create schema auth; + + create table auth.users ( + uid serial primary key, + name text not null, + email text unique not null + ); + "#; + + assert_complete_results( + format!("insert into {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), + ], + setup, + ) + .await; + + assert_complete_results( + format!("insert into auth.{}", CURSOR_POS).as_str(), + vec![CompletionAssertion::LabelAndKind( + "users".into(), + CompletionItemKind::Table, + )], + setup, + ) + .await; + + // works with complete statement. + assert_complete_results( + format!( + "insert into {} (name, email) values ('jules', 'a@b.com');", + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), + CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), + ], + setup, + ) + .await; + } } diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index 3b148336..5323e2bc 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -1,4 +1,4 @@ -use crate::context::{CompletionContext, NodeUnderCursor, WrappingClause}; +use crate::context::{CompletionContext, NodeUnderCursor, WrappingClause, WrappingNode}; use super::CompletionRelevanceData; @@ -24,6 +24,10 @@ impl CompletionFilter<'_> { } fn completable_context(&self, ctx: &CompletionContext) -> Option<()> { + if ctx.wrapping_node_kind.is_none() && ctx.wrapping_clause_type.is_none() { + return None; + } + let current_node_kind = ctx .node_under_cursor .as_ref() @@ -65,55 +69,109 @@ impl CompletionFilter<'_> { } fn check_clause(&self, ctx: &CompletionContext) -> Option<()> { - let clause = ctx.wrapping_clause_type.as_ref(); + ctx.wrapping_clause_type + .as_ref() + .map(|clause| { + match self.data { + CompletionRelevanceData::Table(_) => match clause { + WrappingClause::Select + | WrappingClause::Where + | WrappingClause::ColumnDefinitions => false, - let in_clause = |compare: WrappingClause| clause.is_some_and(|c| c == &compare); + WrappingClause::Insert => { + ctx.wrapping_node_kind + .as_ref() + .is_none_or(|n| n != &WrappingNode::List) + && (ctx.before_cursor_matches_kind(&["keyword_into"]) + || (ctx.before_cursor_matches_kind(&["."]) + && ctx.parent_matches_one_of_kind(&["object_reference"]))) + } - match self.data { - CompletionRelevanceData::Table(_) => { - if in_clause(WrappingClause::Select) - || in_clause(WrappingClause::Where) - || in_clause(WrappingClause::PolicyName) - { - return None; - }; - } - CompletionRelevanceData::Column(_) => { - if in_clause(WrappingClause::From) || in_clause(WrappingClause::PolicyName) { - return None; - } + WrappingClause::DropTable | WrappingClause::AlterTable => ctx + .before_cursor_matches_kind(&[ + "keyword_exists", + "keyword_only", + "keyword_table", + ]), - // We can complete columns in JOIN cluases, but only if we are after the - // ON node in the "ON u.id = posts.user_id" part. - let in_join_clause_before_on_node = clause.is_some_and(|c| match c { - // we are in a JOIN, but definitely not after an ON - WrappingClause::Join { on_node: None } => true, + _ => true, + }, - WrappingClause::Join { on_node: Some(on) } => ctx - .node_under_cursor - .as_ref() - .is_some_and(|n| n.end_byte() < on.start_byte()), + CompletionRelevanceData::Column(_) => { + match clause { + WrappingClause::From + | WrappingClause::ColumnDefinitions + | WrappingClause::AlterTable + | WrappingClause::DropTable => false, - _ => false, - }); + // We can complete columns in JOIN cluases, but only if we are after the + // ON node in the "ON u.id = posts.user_id" part. + WrappingClause::Join { on_node: Some(on) } => ctx + .node_under_cursor + .as_ref() + .is_some_and(|cn| cn.start_byte() >= on.end_byte()), - if in_join_clause_before_on_node { - return None; - } - } - CompletionRelevanceData::Policy(_) => { - if clause.is_none_or(|c| c != &WrappingClause::PolicyName) { - return None; - } - } - _ => { - if in_clause(WrappingClause::PolicyName) { - return None; - } - } - } + // we are in a JOIN, but definitely not after an ON + WrappingClause::Join { on_node: None } => false, - Some(()) + WrappingClause::Insert => ctx + .wrapping_node_kind + .as_ref() + .is_some_and(|n| n == &WrappingNode::List), + + // only autocomplete left side of binary expression + WrappingClause::Where => { + ctx.before_cursor_matches_kind(&["keyword_and", "keyword_where"]) + || (ctx.before_cursor_matches_kind(&["."]) + && ctx.parent_matches_one_of_kind(&["field"])) + } + + _ => true, + } + } + + CompletionRelevanceData::Function(_) => matches!( + clause, + WrappingClause::From + | WrappingClause::Select + | WrappingClause::Where + | WrappingClause::Join { .. } + ), + + CompletionRelevanceData::Schema(_) => match clause { + WrappingClause::Select + | WrappingClause::From + | WrappingClause::Join { .. } + | WrappingClause::Update + | WrappingClause::Delete => true, + + WrappingClause::Where => { + ctx.before_cursor_matches_kind(&["keyword_and", "keyword_where"]) + } + + WrappingClause::DropTable | WrappingClause::AlterTable => ctx + .before_cursor_matches_kind(&[ + "keyword_exists", + "keyword_only", + "keyword_table", + ]), + + WrappingClause::Insert => { + ctx.wrapping_node_kind + .as_ref() + .is_none_or(|n| n != &WrappingNode::List) + && ctx.before_cursor_matches_kind(&["keyword_into"]) + } + + _ => false, + }, + + CompletionRelevanceData::Policy(_) => { + matches!(clause, WrappingClause::PolicyName) + } + } + }) + .and_then(|is_ok| if is_ok { Some(()) } else { None }) } fn check_invocation(&self, ctx: &CompletionContext) -> Option<()> { @@ -188,4 +246,15 @@ mod tests { ) .await; } + + #[tokio::test] + async fn completion_after_create_table() { + assert_no_complete_results(format!("create table {}", CURSOR_POS).as_str(), "").await; + } + + #[tokio::test] + async fn completion_in_column_definitions() { + let query = format!(r#"create table instruments ( {} )"#, CURSOR_POS); + assert_no_complete_results(query.as_str(), "").await; + } } diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index 6aa75a16..40dea7e6 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -53,6 +53,7 @@ where || cursor_prepared_to_write_token_after_last_node(¶ms.text, params.position) || cursor_before_semicolon(params.tree, params.position) || cursor_on_a_dot(¶ms.text, params.position) + || cursor_between_parentheses(¶ms.text, params.position) { SanitizedCompletionParams::with_adjusted_sql(params) } else { @@ -192,24 +193,81 @@ fn cursor_before_semicolon(tree: &tree_sitter::Tree, position: TextSize) -> bool return false; } - // not okay to be on the semi. - if byte == leaf_node.start_byte() { - return false; - } - leaf_node .prev_named_sibling() .map(|n| n.end_byte() < byte) .unwrap_or(false) } +fn cursor_between_parentheses(sql: &str, position: TextSize) -> bool { + let position: usize = position.into(); + + let mut level = 0; + let mut tracking_open_idx = None; + + let mut matching_open_idx = None; + let mut matching_close_idx = None; + + for (idx, char) in sql.chars().enumerate() { + if char == '(' { + tracking_open_idx = Some(idx); + level += 1; + } + + if char == ')' { + level -= 1; + + if tracking_open_idx.is_some_and(|it| it < position) && idx >= position { + matching_open_idx = tracking_open_idx; + matching_close_idx = Some(idx) + } + } + } + + // invalid statement + if level != 0 { + return false; + } + + // early check: '(|)' + // however, we want to check this after the level nesting. + let mut chars = sql.chars(); + if chars.nth(position - 1).is_some_and(|c| c == '(') && chars.next().is_some_and(|c| c == ')') { + return true; + } + + // not *within* parentheses + if matching_open_idx.is_none() || matching_close_idx.is_none() { + return false; + } + + // use string indexing, because we can't `.rev()` after `.take()` + let before = sql[..position] + .to_string() + .chars() + .rev() + .find(|c| !c.is_whitespace()) + .unwrap_or_default(); + + let after = sql + .chars() + .skip(position) + .find(|c| !c.is_whitespace()) + .unwrap_or_default(); + + let before_matches = before == ',' || before == '('; + let after_matches = after == ',' || after == ')'; + + before_matches && after_matches +} + #[cfg(test)] mod tests { use pgt_text_size::TextSize; use crate::sanitization::{ - cursor_before_semicolon, cursor_inbetween_nodes, cursor_on_a_dot, - cursor_prepared_to_write_token_after_last_node, + cursor_before_semicolon, cursor_between_parentheses, cursor_inbetween_nodes, + cursor_on_a_dot, cursor_prepared_to_write_token_after_last_node, }; #[test] @@ -292,18 +350,67 @@ mod tests { // select * from| ; <-- still touches the from assert!(!cursor_before_semicolon(&tree, TextSize::new(13))); - // not okay to be ON the semi. - // select * from |; - assert!(!cursor_before_semicolon(&tree, TextSize::new(18))); - // anything is fine here - // select * from | ; - // select * from | ; - // select * from | ; - // select * from |; + // select * from | ; + // select * from | ; + // select * from | ; + // select * from | ; + // select * from |; assert!(cursor_before_semicolon(&tree, TextSize::new(14))); assert!(cursor_before_semicolon(&tree, TextSize::new(15))); assert!(cursor_before_semicolon(&tree, TextSize::new(16))); assert!(cursor_before_semicolon(&tree, TextSize::new(17))); + assert!(cursor_before_semicolon(&tree, TextSize::new(18))); + } + + #[test] + fn between_parentheses() { + let input = "insert into instruments ()"; + + // insert into (|) <- right in the parentheses + assert!(cursor_between_parentheses(input, TextSize::new(25))); + + // insert into ()| <- too late + assert!(!cursor_between_parentheses(input, TextSize::new(26))); + + // insert into |() <- too early + assert!(!cursor_between_parentheses(input, TextSize::new(24))); + + let input = "insert into instruments (name, id, )"; + + // insert into instruments (name, id, |) <-- we should sanitize the next column + assert!(cursor_between_parentheses(input, TextSize::new(35))); + + // insert into instruments (name, id|, ) <-- we are still on the previous token. + assert!(!cursor_between_parentheses(input, TextSize::new(33))); + + let input = "insert into instruments (name, , id)"; + + // insert into instruments (name, |, id) <-- we can sanitize! + assert!(cursor_between_parentheses(input, TextSize::new(31))); + + // insert into instruments (name, ,| id) <-- we are already on the next token + assert!(!cursor_between_parentheses(input, TextSize::new(32))); + + let input = "insert into instruments (, name, id)"; + + // insert into instruments (|, name, id) <-- we can sanitize! + assert!(cursor_between_parentheses(input, TextSize::new(25))); + + // insert into instruments (,| name, id) <-- already on next token + assert!(!cursor_between_parentheses(input, TextSize::new(26))); + + // bails on invalidly nested statements + assert!(!cursor_between_parentheses( + "insert into (instruments ()", + TextSize::new(26) + )); + + // can find its position in nested statements + // "insert into instruments (name) values (a_function(name, |))", + assert!(cursor_between_parentheses( + "insert into instruments (name) values (a_function(name, ))", + TextSize::new(56) + )); } } diff --git a/crates/pgt_lsp/src/capabilities.rs b/crates/pgt_lsp/src/capabilities.rs index b3e35b69..acfc60ed 100644 --- a/crates/pgt_lsp/src/capabilities.rs +++ b/crates/pgt_lsp/src/capabilities.rs @@ -37,7 +37,7 @@ pub(crate) fn server_capabilities(capabilities: &ClientCapabilities) -> ServerCa // The request is used to get more information about a simple CompletionItem. resolve_provider: None, - trigger_characters: Some(vec![".".to_owned(), " ".to_owned()]), + trigger_characters: Some(vec![".".to_owned(), " ".to_owned(), "(".to_owned()]), // No character will lead to automatically inserting the selected completion-item all_commit_characters: None, diff --git a/crates/pgt_treesitter_queries/src/queries/insert_columns.rs b/crates/pgt_treesitter_queries/src/queries/insert_columns.rs new file mode 100644 index 00000000..3e88d998 --- /dev/null +++ b/crates/pgt_treesitter_queries/src/queries/insert_columns.rs @@ -0,0 +1,150 @@ +use std::sync::LazyLock; + +use crate::{Query, QueryResult}; + +use super::QueryTryFrom; + +static TS_QUERY: LazyLock = LazyLock::new(|| { + static QUERY_STR: &str = r#" + (insert + (object_reference) + (list + "("? + (column) @column + ","? + ")"? + ) + ) +"#; + tree_sitter::Query::new(tree_sitter_sql::language(), QUERY_STR).expect("Invalid TS Query") +}); + +#[derive(Debug)] +pub struct InsertColumnMatch<'a> { + pub(crate) column: tree_sitter::Node<'a>, +} + +impl InsertColumnMatch<'_> { + pub fn get_column(&self, sql: &str) -> String { + self.column + .utf8_text(sql.as_bytes()) + .expect("Failed to get column from ColumnMatch") + .to_string() + } +} + +impl<'a> TryFrom<&'a QueryResult<'a>> for &'a InsertColumnMatch<'a> { + type Error = String; + + fn try_from(q: &'a QueryResult<'a>) -> Result { + match q { + QueryResult::InsertClauseColumns(c) => Ok(c), + + #[allow(unreachable_patterns)] + _ => Err("Invalid QueryResult type".into()), + } + } +} + +impl<'a> QueryTryFrom<'a> for InsertColumnMatch<'a> { + type Ref = &'a InsertColumnMatch<'a>; +} + +impl<'a> Query<'a> for InsertColumnMatch<'a> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + let mut cursor = tree_sitter::QueryCursor::new(); + + let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); + + let mut to_return = vec![]; + + for m in matches { + if m.captures.len() == 1 { + let capture = m.captures[0].node; + to_return.push(QueryResult::InsertClauseColumns(InsertColumnMatch { + column: capture, + })); + } + } + + to_return + } +} +#[cfg(test)] +mod tests { + use super::InsertColumnMatch; + use crate::TreeSitterQueriesExecutor; + + #[test] + fn finds_all_insert_columns() { + let sql = r#"insert into users (id, email, name) values (1, 'a@b.com', 'Alice');"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&InsertColumnMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + let columns: Vec = results.iter().map(|c| c.get_column(sql)).collect(); + + assert_eq!(columns, vec!["id", "email", "name"]); + } + + #[test] + fn finds_insert_columns_with_whitespace_and_commas() { + let sql = r#" + insert into users ( + id, + email, + name + ) values (1, 'a@b.com', 'Alice'); + "#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&InsertColumnMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + let columns: Vec = results.iter().map(|c| c.get_column(sql)).collect(); + + assert_eq!(columns, vec!["id", "email", "name"]); + } + + #[test] + fn returns_empty_for_insert_without_columns() { + let sql = r#"insert into users values (1, 'a@b.com', 'Alice');"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&InsertColumnMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert!(results.is_empty()); + } +} diff --git a/crates/pgt_treesitter_queries/src/queries/mod.rs b/crates/pgt_treesitter_queries/src/queries/mod.rs index aec6ce1a..b9f39aed 100644 --- a/crates/pgt_treesitter_queries/src/queries/mod.rs +++ b/crates/pgt_treesitter_queries/src/queries/mod.rs @@ -1,12 +1,16 @@ +mod insert_columns; mod parameters; mod relations; mod select_columns; mod table_aliases; +mod where_columns; +pub use insert_columns::*; pub use parameters::*; pub use relations::*; pub use select_columns::*; pub use table_aliases::*; +pub use where_columns::*; #[derive(Debug)] pub enum QueryResult<'a> { @@ -14,6 +18,8 @@ pub enum QueryResult<'a> { Parameter(ParameterMatch<'a>), TableAliases(TableAliasMatch<'a>), SelectClauseColumns(SelectColumnMatch<'a>), + InsertClauseColumns(InsertColumnMatch<'a>), + WhereClauseColumns(WhereColumnMatch<'a>), } impl QueryResult<'_> { @@ -50,6 +56,21 @@ impl QueryResult<'_> { start >= range.start_point && end <= range.end_point } + Self::WhereClauseColumns(cm) => { + let start = match cm.alias { + Some(n) => n.start_position(), + None => cm.column.start_position(), + }; + + let end = cm.column.end_position(); + + start >= range.start_point && end <= range.end_point + } + Self::InsertClauseColumns(cm) => { + let start = cm.column.start_position(); + let end = cm.column.end_position(); + start >= range.start_point && end <= range.end_point + } } } } diff --git a/crates/pgt_treesitter_queries/src/queries/relations.rs b/crates/pgt_treesitter_queries/src/queries/relations.rs index f9061ce8..38fd0513 100644 --- a/crates/pgt_treesitter_queries/src/queries/relations.rs +++ b/crates/pgt_treesitter_queries/src/queries/relations.rs @@ -14,6 +14,14 @@ static TS_QUERY: LazyLock = LazyLock::new(|| { (identifier)? @table )+ ) + (insert + (object_reference + . + (identifier) @schema_or_table + "."? + (identifier)? @table + )+ + ) "#; tree_sitter::Query::new(tree_sitter_sql::language(), QUERY_STR).expect("Invalid TS Query") }); @@ -91,3 +99,101 @@ impl<'a> Query<'a> for RelationMatch<'a> { to_return } } + +#[cfg(test)] +mod tests { + use super::RelationMatch; + use crate::TreeSitterQueriesExecutor; + + #[test] + fn finds_table_without_schema() { + let sql = r#"select * from users;"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&RelationMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].get_schema(sql), None); + assert_eq!(results[0].get_table(sql), "users"); + } + + #[test] + fn finds_table_with_schema() { + let sql = r#"select * from public.users;"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&RelationMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].get_schema(sql), Some("public".to_string())); + assert_eq!(results[0].get_table(sql), "users"); + } + + #[test] + fn finds_insert_into_with_schema_and_table() { + let sql = r#"insert into auth.accounts (id, email) values (1, 'a@b.com');"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&RelationMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].get_schema(sql), Some("auth".to_string())); + assert_eq!(results[0].get_table(sql), "accounts"); + } + + #[test] + fn finds_insert_into_without_schema() { + let sql = r#"insert into users (id, email) values (1, 'a@b.com');"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&RelationMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].get_schema(sql), None); + assert_eq!(results[0].get_table(sql), "users"); + } +} diff --git a/crates/pgt_treesitter_queries/src/queries/where_columns.rs b/crates/pgt_treesitter_queries/src/queries/where_columns.rs new file mode 100644 index 00000000..8e19590d --- /dev/null +++ b/crates/pgt_treesitter_queries/src/queries/where_columns.rs @@ -0,0 +1,96 @@ +use std::sync::LazyLock; + +use crate::{Query, QueryResult}; + +use super::QueryTryFrom; + +static TS_QUERY: LazyLock = LazyLock::new(|| { + static QUERY_STR: &str = r#" + (where + (binary_expression + (binary_expression + (field + (object_reference)? @alias + "."? + (identifier) @column + ) + ) + ) + ) +"#; + tree_sitter::Query::new(tree_sitter_sql::language(), QUERY_STR).expect("Invalid TS Query") +}); + +#[derive(Debug)] +pub struct WhereColumnMatch<'a> { + pub(crate) alias: Option>, + pub(crate) column: tree_sitter::Node<'a>, +} + +impl WhereColumnMatch<'_> { + pub fn get_alias(&self, sql: &str) -> Option { + let str = self + .alias + .as_ref()? + .utf8_text(sql.as_bytes()) + .expect("Failed to get alias from ColumnMatch"); + + Some(str.to_string()) + } + + pub fn get_column(&self, sql: &str) -> String { + self.column + .utf8_text(sql.as_bytes()) + .expect("Failed to get column from ColumnMatch") + .to_string() + } +} + +impl<'a> TryFrom<&'a QueryResult<'a>> for &'a WhereColumnMatch<'a> { + type Error = String; + + fn try_from(q: &'a QueryResult<'a>) -> Result { + match q { + QueryResult::WhereClauseColumns(c) => Ok(c), + + #[allow(unreachable_patterns)] + _ => Err("Invalid QueryResult type".into()), + } + } +} + +impl<'a> QueryTryFrom<'a> for WhereColumnMatch<'a> { + type Ref = &'a WhereColumnMatch<'a>; +} + +impl<'a> Query<'a> for WhereColumnMatch<'a> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + let mut cursor = tree_sitter::QueryCursor::new(); + + let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); + + let mut to_return = vec![]; + + for m in matches { + if m.captures.len() == 1 { + let capture = m.captures[0].node; + to_return.push(QueryResult::WhereClauseColumns(WhereColumnMatch { + alias: None, + column: capture, + })); + } + + if m.captures.len() == 2 { + let alias = m.captures[0].node; + let column = m.captures[1].node; + + to_return.push(QueryResult::WhereClauseColumns(WhereColumnMatch { + alias: Some(alias), + column, + })); + } + } + + to_return + } +} From f24ddf6822975d1a0f30fafa995f21a62a7da5ff Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 24 May 2025 21:18:54 +0200 Subject: [PATCH 061/114] fix: nullabe column query (#406) --- ...eeace15f64083f4d944b9d45eb6c60d7eece8878ed26b3530484.json} | 4 ++-- crates/pgt_completions/src/providers/columns.rs | 2 +- crates/pgt_schema_cache/src/columns.rs | 2 +- crates/pgt_schema_cache/src/queries/columns.sql | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) rename .sqlx/{query-97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278.json => query-fa065a78ad10eeace15f64083f4d944b9d45eb6c60d7eece8878ed26b3530484.json} (91%) diff --git a/.sqlx/query-97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278.json b/.sqlx/query-fa065a78ad10eeace15f64083f4d944b9d45eb6c60d7eece8878ed26b3530484.json similarity index 91% rename from .sqlx/query-97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278.json rename to .sqlx/query-fa065a78ad10eeace15f64083f4d944b9d45eb6c60d7eece8878ed26b3530484.json index 924369cd..36723330 100644 --- a/.sqlx/query-97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278.json +++ b/.sqlx/query-fa065a78ad10eeace15f64083f4d944b9d45eb6c60d7eece8878ed26b3530484.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "with\n available_tables as (\n select\n c.relname as table_name,\n c.oid as table_oid,\n c.relkind as class_kind,\n n.nspname as schema_name\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n on n.oid = c.relnamespace\n where\n -- r: normal tables\n -- v: views\n -- m: materialized views\n -- f: foreign tables\n -- p: partitioned tables\n c.relkind in ('r', 'v', 'm', 'f', 'p')\n ),\n available_indexes as (\n select\n unnest (ix.indkey) as attnum,\n ix.indisprimary as is_primary,\n ix.indisunique as is_unique,\n ix.indrelid as table_oid\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_index ix on c.oid = ix.indexrelid\n where\n c.relkind = 'i'\n )\nselect\n atts.attname as name,\n ts.table_name,\n ts.table_oid :: int8 as \"table_oid!\",\n ts.class_kind :: char as \"class_kind!\",\n ts.schema_name,\n atts.atttypid :: int8 as \"type_id!\",\n tps.typname as \"type_name\",\n not atts.attnotnull as \"is_nullable!\",\n nullif(\n information_schema._pg_char_max_length (atts.atttypid, atts.atttypmod),\n -1\n ) as varchar_length,\n pg_get_expr (def.adbin, def.adrelid) as default_expr,\n coalesce(ix.is_primary, false) as \"is_primary_key!\",\n coalesce(ix.is_unique, false) as \"is_unique!\",\n pg_catalog.col_description (ts.table_oid, atts.attnum) as comment\nfrom\n pg_catalog.pg_attribute atts\n join available_tables ts on atts.attrelid = ts.table_oid\n left join available_indexes ix on atts.attrelid = ix.table_oid\n and atts.attnum = ix.attnum\n left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid\n and atts.attnum = def.adnum\n left join pg_catalog.pg_type tps on tps.oid = atts.atttypid\nwhere\n -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s\n atts.attnum >= 0\norder by\n schema_name desc,\n table_name,\n atts.attnum;", + "query": "with\n available_tables as (\n select\n c.relname as table_name,\n c.oid as table_oid,\n c.relkind as class_kind,\n n.nspname as schema_name\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n on n.oid = c.relnamespace\n where\n -- r: normal tables\n -- v: views\n -- m: materialized views\n -- f: foreign tables\n -- p: partitioned tables\n c.relkind in ('r', 'v', 'm', 'f', 'p')\n ),\n available_indexes as (\n select\n unnest (ix.indkey) as attnum,\n ix.indisprimary as is_primary,\n ix.indisunique as is_unique,\n ix.indrelid as table_oid\n from\n pg_catalog.pg_class c\n join pg_catalog.pg_index ix on c.oid = ix.indexrelid\n where\n c.relkind = 'i'\n )\nselect\n atts.attname as name,\n ts.table_name,\n ts.table_oid :: int8 as \"table_oid!\",\n ts.class_kind :: char as \"class_kind!\",\n ts.schema_name,\n atts.atttypid :: int8 as \"type_id!\",\n tps.typname as \"type_name\",\n not atts.attnotnull as \"is_nullable!\",\n nullif(\n information_schema._pg_char_max_length (atts.atttypid, atts.atttypmod),\n -1\n ) as varchar_length,\n pg_get_expr (def.adbin, def.adrelid) as default_expr,\n coalesce(ix.is_primary, false) as \"is_primary_key!\",\n coalesce(ix.is_unique, false) as \"is_unique!\",\n pg_catalog.col_description (ts.table_oid, atts.attnum) as comment\nfrom\n pg_catalog.pg_attribute atts\n join available_tables ts on atts.attrelid = ts.table_oid\n left join available_indexes ix on atts.attrelid = ix.table_oid\n and atts.attnum = ix.attnum\n left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid\n and atts.attnum = def.adnum\n left join pg_catalog.pg_type tps on atts.atttypid = tps.oid\nwhere\n -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s\n atts.attnum >= 0 and atts.atttypid is not null and tps.oid is not null\norder by\n schema_name desc,\n table_name,\n atts.attnum;", "describe": { "columns": [ { @@ -88,5 +88,5 @@ null ] }, - "hash": "97da37af0d64378cb622cab14bb3b0ce33a0002d170200d77afeee60a7977278" + "hash": "fa065a78ad10eeace15f64083f4d944b9d45eb6c60d7eece8878ed26b3530484" } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index d18f0938..d4767f14 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -20,7 +20,7 @@ pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut Completio description: format!("Table: {}.{}", col.schema_name, col.table_name), kind: CompletionItemKind::Column, completion_text: None, - detail: Some(col.type_name.to_string()), + detail: col.type_name.as_ref().map(|t| t.to_string()), }; // autocomplete with the alias in a join clause if we find one diff --git a/crates/pgt_schema_cache/src/columns.rs b/crates/pgt_schema_cache/src/columns.rs index 0eb64cc6..60d422fd 100644 --- a/crates/pgt_schema_cache/src/columns.rs +++ b/crates/pgt_schema_cache/src/columns.rs @@ -48,7 +48,7 @@ pub struct Column { pub schema_name: String, pub type_id: i64, - pub type_name: String, + pub type_name: Option, pub is_nullable: bool, pub is_primary_key: bool, diff --git a/crates/pgt_schema_cache/src/queries/columns.sql b/crates/pgt_schema_cache/src/queries/columns.sql index d0c09cd0..14b32cb2 100644 --- a/crates/pgt_schema_cache/src/queries/columns.sql +++ b/crates/pgt_schema_cache/src/queries/columns.sql @@ -52,10 +52,10 @@ from and atts.attnum = ix.attnum left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid and atts.attnum = def.adnum - left join pg_catalog.pg_type tps on tps.oid = atts.atttypid + left join pg_catalog.pg_type tps on atts.atttypid = tps.oid where -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s - atts.attnum >= 0 + atts.attnum >= 0 and atts.atttypid is not null and tps.oid is not null order by schema_name desc, table_name, From a435c42eaeb3c33d4a6789db6aa1efd6746d4e70 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 27 May 2025 18:13:54 +0200 Subject: [PATCH 062/114] feat(completions): complete (materialized) views (#409) --- ...cf4c296b594fe9e6cebbdc382acde73f4fb9.json} | 24 +++-- .../pgt_completions/src/providers/columns.rs | 21 ++-- .../pgt_completions/src/providers/tables.rs | 12 ++- crates/pgt_schema_cache/src/lib.rs | 2 +- .../pgt_schema_cache/src/queries/tables.sql | 3 +- crates/pgt_schema_cache/src/tables.rs | 102 ++++++++++++++++++ 6 files changed, 139 insertions(+), 25 deletions(-) rename .sqlx/{query-2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f.json => query-66d92238c94b5f1c99fbf068a0b5cf4c296b594fe9e6cebbdc382acde73f4fb9.json} (50%) diff --git a/.sqlx/query-2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f.json b/.sqlx/query-66d92238c94b5f1c99fbf068a0b5cf4c296b594fe9e6cebbdc382acde73f4fb9.json similarity index 50% rename from .sqlx/query-2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f.json rename to .sqlx/query-66d92238c94b5f1c99fbf068a0b5cf4c296b594fe9e6cebbdc382acde73f4fb9.json index 96439422..447ba93b 100644 --- a/.sqlx/query-2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f.json +++ b/.sqlx/query-66d92238c94b5f1c99fbf068a0b5cf4c296b594fe9e6cebbdc382acde73f4fb9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "select\n c.oid :: int8 as \"id!\",\n nc.nspname as schema,\n c.relname as name,\n c.relrowsecurity as rls_enabled,\n c.relforcerowsecurity as rls_forced,\n case\n when c.relreplident = 'd' then 'DEFAULT'\n when c.relreplident = 'i' then 'INDEX'\n when c.relreplident = 'f' then 'FULL'\n else 'NOTHING'\n end as \"replica_identity!\",\n pg_total_relation_size(format('%I.%I', nc.nspname, c.relname)) :: int8 as \"bytes!\",\n pg_size_pretty(\n pg_total_relation_size(format('%I.%I', nc.nspname, c.relname))\n ) as \"size!\",\n pg_stat_get_live_tuples(c.oid) as \"live_rows_estimate!\",\n pg_stat_get_dead_tuples(c.oid) as \"dead_rows_estimate!\",\n obj_description(c.oid) as comment\nfrom\n pg_namespace nc\n join pg_class c on nc.oid = c.relnamespace\nwhere\n c.relkind in ('r', 'p')\n and not pg_is_other_temp_schema(nc.oid)\n and (\n pg_has_role(c.relowner, 'USAGE')\n or has_table_privilege(\n c.oid,\n 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER'\n )\n or has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES')\n )\ngroup by\n c.oid,\n c.relname,\n c.relrowsecurity,\n c.relforcerowsecurity,\n c.relreplident,\n nc.nspname;", + "query": "select\n c.oid :: int8 as \"id!\",\n nc.nspname as schema,\n c.relname as name,\n c.relkind as table_kind,\n c.relrowsecurity as rls_enabled,\n c.relforcerowsecurity as rls_forced,\n case\n when c.relreplident = 'd' then 'DEFAULT'\n when c.relreplident = 'i' then 'INDEX'\n when c.relreplident = 'f' then 'FULL'\n else 'NOTHING'\n end as \"replica_identity!\",\n pg_total_relation_size(format('%I.%I', nc.nspname, c.relname)) :: int8 as \"bytes!\",\n pg_size_pretty(\n pg_total_relation_size(format('%I.%I', nc.nspname, c.relname))\n ) as \"size!\",\n pg_stat_get_live_tuples(c.oid) as \"live_rows_estimate!\",\n pg_stat_get_dead_tuples(c.oid) as \"dead_rows_estimate!\",\n obj_description(c.oid) as comment\nfrom\n pg_namespace nc\n join pg_class c on nc.oid = c.relnamespace\nwhere\n c.relkind in ('r', 'p', 'v', 'm')\n and not pg_is_other_temp_schema(nc.oid)\n and (\n pg_has_role(c.relowner, 'USAGE')\n or has_table_privilege(\n c.oid,\n 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER'\n )\n or has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES')\n )\ngroup by\n c.oid,\n c.relname,\n c.relrowsecurity,\n c.relforcerowsecurity,\n c.relreplident,\n nc.nspname;", "describe": { "columns": [ { @@ -20,41 +20,46 @@ }, { "ordinal": 3, + "name": "table_kind", + "type_info": "Char" + }, + { + "ordinal": 4, "name": "rls_enabled", "type_info": "Bool" }, { - "ordinal": 4, + "ordinal": 5, "name": "rls_forced", "type_info": "Bool" }, { - "ordinal": 5, + "ordinal": 6, "name": "replica_identity!", "type_info": "Text" }, { - "ordinal": 6, + "ordinal": 7, "name": "bytes!", "type_info": "Int8" }, { - "ordinal": 7, + "ordinal": 8, "name": "size!", "type_info": "Text" }, { - "ordinal": 8, + "ordinal": 9, "name": "live_rows_estimate!", "type_info": "Int8" }, { - "ordinal": 9, + "ordinal": 10, "name": "dead_rows_estimate!", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 11, "name": "comment", "type_info": "Text" } @@ -68,6 +73,7 @@ false, false, false, + false, null, null, null, @@ -76,5 +82,5 @@ null ] }, - "hash": "2a964a12383b977bbbbd6fe7298dfce00358ecbe878952e8d4915c06cc5c9e0f" + "hash": "66d92238c94b5f1c99fbf068a0b5cf4c296b594fe9e6cebbdc382acde73f4fb9" } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index d4767f14..da6d23bc 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -17,7 +17,7 @@ pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut Completio label: col.name.clone(), score: CompletionScore::from(relevance.clone()), filter: CompletionFilter::from(relevance), - description: format!("Table: {}.{}", col.schema_name, col.table_name), + description: format!("{}.{}", col.schema_name, col.table_name), kind: CompletionItemKind::Column, completion_text: None, detail: col.type_name.as_ref().map(|t| t.to_string()), @@ -92,7 +92,7 @@ mod tests { message: "correctly prefers the columns of present tables", query: format!(r#"select na{} from public.audio_books;"#, CURSOR_POS), label: "narrator", - description: "Table: public.audio_books", + description: "public.audio_books", }, TestCase { message: "correctly handles nested queries", @@ -110,13 +110,13 @@ mod tests { CURSOR_POS ), label: "narrator_id", - description: "Table: private.audio_books", + description: "private.audio_books", }, TestCase { message: "works without a schema", query: format!(r#"select na{} from users;"#, CURSOR_POS), label: "name", - description: "Table: public.users", + description: "public.users", }, ]; @@ -186,10 +186,10 @@ mod tests { .collect(); let expected = vec![ - ("name", "Table: public.users"), - ("narrator", "Table: public.audio_books"), - ("narrator_id", "Table: private.audio_books"), - ("id", "Table: public.audio_books"), + ("name", "public.users"), + ("narrator", "public.audio_books"), + ("narrator_id", "private.audio_books"), + ("id", "public.audio_books"), ("name", "Schema: pg_catalog"), ("nameconcatoid", "Schema: pg_catalog"), ] @@ -559,10 +559,7 @@ mod tests { ) .as_str(), vec![ - CompletionAssertion::LabelAndDesc( - "id".to_string(), - "Table: public.two".to_string(), - ), + CompletionAssertion::LabelAndDesc("id".to_string(), "public.two".to_string()), CompletionAssertion::Label("z".to_string()), ], setup, diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index 6ed3760e..2102d41c 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -13,13 +13,21 @@ pub fn complete_tables<'a>(ctx: &'a CompletionContext, builder: &mut CompletionB for table in available_tables { let relevance = CompletionRelevanceData::Table(table); + let detail: Option = match table.table_kind { + pgt_schema_cache::TableKind::Ordinary | pgt_schema_cache::TableKind::Partitioned => { + None + } + pgt_schema_cache::TableKind::View => Some("View".into()), + pgt_schema_cache::TableKind::MaterializedView => Some("MView".into()), + }; + let item = PossibleCompletionItem { label: table.name.clone(), score: CompletionScore::from(relevance.clone()), filter: CompletionFilter::from(relevance), - description: format!("Schema: {}", table.schema), + description: table.schema.to_string(), kind: CompletionItemKind::Table, - detail: None, + detail, completion_text: get_completion_text_with_schema_or_alias( ctx, &table.name, diff --git a/crates/pgt_schema_cache/src/lib.rs b/crates/pgt_schema_cache/src/lib.rs index 186fbdb9..9beb2f8a 100644 --- a/crates/pgt_schema_cache/src/lib.rs +++ b/crates/pgt_schema_cache/src/lib.rs @@ -19,6 +19,6 @@ pub use policies::{Policy, PolicyCommand}; pub use roles::*; pub use schema_cache::SchemaCache; pub use schemas::Schema; -pub use tables::{ReplicaIdentity, Table}; +pub use tables::{ReplicaIdentity, Table, TableKind}; pub use triggers::{Trigger, TriggerAffected, TriggerEvent}; pub use types::{PostgresType, PostgresTypeAttribute}; diff --git a/crates/pgt_schema_cache/src/queries/tables.sql b/crates/pgt_schema_cache/src/queries/tables.sql index bcce4fcc..6e6865a2 100644 --- a/crates/pgt_schema_cache/src/queries/tables.sql +++ b/crates/pgt_schema_cache/src/queries/tables.sql @@ -2,6 +2,7 @@ select c.oid :: int8 as "id!", nc.nspname as schema, c.relname as name, + c.relkind as table_kind, c.relrowsecurity as rls_enabled, c.relforcerowsecurity as rls_forced, case @@ -21,7 +22,7 @@ from pg_namespace nc join pg_class c on nc.oid = c.relnamespace where - c.relkind in ('r', 'p') + c.relkind in ('r', 'p', 'v', 'm') and not pg_is_other_temp_schema(nc.oid) and ( pg_has_role(c.relowner, 'USAGE') diff --git a/crates/pgt_schema_cache/src/tables.rs b/crates/pgt_schema_cache/src/tables.rs index 99061384..a0a40d6a 100644 --- a/crates/pgt_schema_cache/src/tables.rs +++ b/crates/pgt_schema_cache/src/tables.rs @@ -23,6 +23,34 @@ impl From for ReplicaIdentity { } } +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub enum TableKind { + #[default] + Ordinary, + View, + MaterializedView, + Partitioned, +} + +impl From for TableKind { + fn from(s: char) -> Self { + match s { + 'r' => Self::Ordinary, + 'p' => Self::Partitioned, + 'v' => Self::View, + 'm' => Self::MaterializedView, + _ => panic!("Invalid table kind"), + } + } +} + +impl From for TableKind { + fn from(s: i8) -> Self { + let c = char::from(u8::try_from(s).unwrap()); + c.into() + } +} + #[derive(Debug, Default, PartialEq, Eq)] pub struct Table { pub id: i64, @@ -31,6 +59,7 @@ pub struct Table { pub rls_enabled: bool, pub rls_forced: bool, pub replica_identity: ReplicaIdentity, + pub table_kind: TableKind, pub bytes: i64, pub size: String, pub live_rows_estimate: i64, @@ -47,3 +76,76 @@ impl SchemaCacheItem for Table { .await } } + +#[cfg(test)] +mod tests { + use crate::{SchemaCache, tables::TableKind}; + use pgt_test_utils::test_database::get_new_test_db; + use sqlx::Executor; + + #[tokio::test] + async fn includes_views_in_query() { + let test_db = get_new_test_db().await; + + let setup = r#" + create table public.base_table ( + id serial primary key, + value text + ); + + create view public.my_view as + select * from public.base_table; + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + let cache = SchemaCache::load(&test_db) + .await + .expect("Failed to load Schema Cache"); + + let view = cache + .tables + .iter() + .find(|t| t.name == "my_view") + .expect("View not found"); + + assert_eq!(view.table_kind, TableKind::View); + assert_eq!(view.schema, "public"); + } + + #[tokio::test] + async fn includes_materialized_views_in_query() { + let test_db = get_new_test_db().await; + + let setup = r#" + create table public.base_table ( + id serial primary key, + value text + ); + + create materialized view public.my_mat_view as + select * from public.base_table; + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + let cache = SchemaCache::load(&test_db) + .await + .expect("Failed to load Schema Cache"); + + let mat_view = cache + .tables + .iter() + .find(|t| t.name == "my_mat_view") + .expect("Materialized view not found"); + + assert_eq!(mat_view.table_kind, TableKind::MaterializedView); + assert_eq!(mat_view.schema, "public"); + } +} From f6b752c4482c5150af6dd8e7f3ded000a3669978 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 31 May 2025 17:29:59 +0200 Subject: [PATCH 063/114] refactor(test_db): use sqlx::test instead (#411) --- .github/workflows/pull_request.yml | 3 + ...053db65ea6a7529e2cb97b2d3432a18aff6ba.json | 20 +++ .../pgt_completions/src/providers/columns.rs | 137 +++++++++++------- .../src/providers/functions.rs | 26 ++-- .../pgt_completions/src/providers/policies.rs | 14 +- .../pgt_completions/src/providers/schemas.rs | 16 +- .../pgt_completions/src/providers/tables.rs | 113 ++++++++++----- .../src/relevance/filtering.rs | 26 ++-- .../pgt_completions/src/relevance/scoring.rs | 27 +++- crates/pgt_completions/src/test_helper.rs | 29 ++-- crates/pgt_lsp/tests/server.rs | 32 ++-- crates/pgt_schema_cache/src/columns.rs | 11 +- crates/pgt_schema_cache/src/policies.rs | 61 +++----- crates/pgt_schema_cache/src/roles.rs | 41 +----- crates/pgt_schema_cache/src/schema_cache.rs | 8 +- crates/pgt_schema_cache/src/tables.rs | 16 +- crates/pgt_schema_cache/src/triggers.rs | 16 +- crates/pgt_test_utils/src/lib.rs | 2 +- crates/pgt_test_utils/src/test_database.rs | 42 ------ .../testdb_migrations/0001_setup-roles.sql | 32 ++++ crates/pgt_typecheck/src/typed_identifier.rs | 9 +- crates/pgt_typecheck/tests/diagnostics.rs | 14 +- 22 files changed, 370 insertions(+), 325 deletions(-) create mode 100644 .sqlx/query-b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba.json delete mode 100644 crates/pgt_test_utils/src/test_database.rs create mode 100644 crates/pgt_test_utils/testdb_migrations/0001_setup-roles.sql diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index f79392b7..8aa24265 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -157,7 +157,10 @@ jobs: # running containers via `services` only works on linux # https://github.com/actions/runner/issues/1866 - name: Setup postgres + id: postgres uses: ikalnytskyi/action-setup-postgres@v7 + - name: Print Roles + run: psql ${{ steps.postgres.outputs.connection-uri }} -c "select rolname from pg_roles;" - name: Run tests run: cargo test --workspace diff --git a/.sqlx/query-b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba.json b/.sqlx/query-b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba.json new file mode 100644 index 00000000..dfc842b7 --- /dev/null +++ b/.sqlx/query-b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "select rolname from pg_catalog.pg_roles;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "rolname", + "type_info": "Name" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true + ] + }, + "hash": "b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba" +} diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index da6d23bc..b1dcbdf7 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -44,6 +44,8 @@ pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut Completio mod tests { use std::vec; + use sqlx::{Executor, PgPool}; + use crate::{ CompletionItem, CompletionItemKind, complete, test_helper::{ @@ -66,8 +68,8 @@ mod tests { } } - #[tokio::test] - async fn completes_columns() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn completes_columns(pool: PgPool) { let setup = r#" create schema private; @@ -87,6 +89,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + let queries: Vec = vec![ TestCase { message: "correctly prefers the columns of present tables", @@ -121,7 +125,7 @@ mod tests { ]; for q in queries { - let (tree, cache) = get_test_deps(setup, q.get_input_query()).await; + let (tree, cache) = get_test_deps(None, q.get_input_query(), &pool).await; let params = get_test_params(&tree, &cache, q.get_input_query()); let results = complete(params); @@ -137,8 +141,8 @@ mod tests { } } - #[tokio::test] - async fn shows_multiple_columns_if_no_relation_specified() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn shows_multiple_columns_if_no_relation_specified(pool: PgPool) { let setup = r#" create schema private; @@ -158,6 +162,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + let case = TestCase { query: format!(r#"select n{};"#, CURSOR_POS), description: "", @@ -165,11 +171,11 @@ mod tests { message: "", }; - let (tree, cache) = get_test_deps(setup, case.get_input_query()).await; + let (tree, cache) = get_test_deps(None, case.get_input_query(), &pool).await; let params = get_test_params(&tree, &cache, case.get_input_query()); let mut items = complete(params); - let _ = items.split_off(6); + let _ = items.split_off(4); #[derive(Eq, PartialEq, Debug)] struct LabelAndDesc { @@ -190,8 +196,6 @@ mod tests { ("narrator", "public.audio_books"), ("narrator_id", "private.audio_books"), ("id", "public.audio_books"), - ("name", "Schema: pg_catalog"), - ("nameconcatoid", "Schema: pg_catalog"), ] .into_iter() .map(|(label, schema)| LabelAndDesc { @@ -203,8 +207,8 @@ mod tests { assert_eq!(labels, expected); } - #[tokio::test] - async fn suggests_relevant_columns_without_letters() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_relevant_columns_without_letters(pool: PgPool) { let setup = r#" create table users ( id serial primary key, @@ -221,7 +225,7 @@ mod tests { description: "", }; - let (tree, cache) = get_test_deps(setup, test_case.get_input_query()).await; + let (tree, cache) = get_test_deps(Some(setup), test_case.get_input_query(), &pool).await; let params = get_test_params(&tree, &cache, test_case.get_input_query()); let results = complete(params); @@ -251,8 +255,8 @@ mod tests { ); } - #[tokio::test] - async fn ignores_cols_in_from_clause() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn ignores_cols_in_from_clause(pool: PgPool) { let setup = r#" create schema private; @@ -271,7 +275,7 @@ mod tests { description: "", }; - let (tree, cache) = get_test_deps(setup, test_case.get_input_query()).await; + let (tree, cache) = get_test_deps(Some(setup), test_case.get_input_query(), &pool).await; let params = get_test_params(&tree, &cache, test_case.get_input_query()); let results = complete(params); @@ -282,8 +286,8 @@ mod tests { ); } - #[tokio::test] - async fn prefers_columns_of_mentioned_tables() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn prefers_columns_of_mentioned_tables(pool: PgPool) { let setup = r#" create schema private; @@ -304,6 +308,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + assert_complete_results( format!(r#"select {} from users"#, CURSOR_POS).as_str(), vec![ @@ -312,7 +318,8 @@ mod tests { CompletionAssertion::Label("id2".into()), CompletionAssertion::Label("name2".into()), ], - setup, + None, + &pool, ) .await; @@ -324,7 +331,8 @@ mod tests { CompletionAssertion::Label("id1".into()), CompletionAssertion::Label("name1".into()), ], - setup, + None, + &pool, ) .await; @@ -332,13 +340,14 @@ mod tests { assert_complete_results( format!(r#"select sett{} from private.users"#, CURSOR_POS).as_str(), vec![CompletionAssertion::Label("user_settings".into())], - setup, + None, + &pool, ) .await; } - #[tokio::test] - async fn filters_out_by_aliases() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn filters_out_by_aliases(pool: PgPool) { let setup = r#" create schema auth; @@ -357,6 +366,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + // test in SELECT clause assert_complete_results( format!( @@ -374,7 +385,8 @@ mod tests { CompletionAssertion::Label("title".to_string()), CompletionAssertion::Label("user_id".to_string()), ], - setup, + None, + &pool, ) .await; @@ -396,13 +408,14 @@ mod tests { CompletionAssertion::Label("title".to_string()), CompletionAssertion::Label("user_id".to_string()), ], - setup, + None, + &pool, ) .await; } - #[tokio::test] - async fn does_not_complete_cols_in_join_clauses() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn does_not_complete_cols_in_join_clauses(pool: PgPool) { let setup = r#" create schema auth; @@ -435,13 +448,14 @@ mod tests { CompletionAssertion::LabelAndKind("posts".to_string(), CompletionItemKind::Table), CompletionAssertion::LabelAndKind("users".to_string(), CompletionItemKind::Table), ], - setup, + Some(setup), + &pool, ) .await; } - #[tokio::test] - async fn completes_in_join_on_clause() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn completes_in_join_on_clause(pool: PgPool) { let setup = r#" create schema auth; @@ -460,6 +474,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + assert_complete_results( format!( "select u.id, auth.posts.content from auth.users u join auth.posts on u.{}", @@ -472,7 +488,8 @@ mod tests { CompletionAssertion::LabelAndKind("email".to_string(), CompletionItemKind::Column), CompletionAssertion::LabelAndKind("name".to_string(), CompletionItemKind::Column), ], - setup, + None, + &pool, ) .await; @@ -488,13 +505,14 @@ mod tests { CompletionAssertion::LabelAndKind("email".to_string(), CompletionItemKind::Column), CompletionAssertion::LabelAndKind("name".to_string(), CompletionItemKind::Column), ], - setup, + None, + &pool, ) .await; } - #[tokio::test] - async fn prefers_not_mentioned_columns() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn prefers_not_mentioned_columns(pool: PgPool) { let setup = r#" create schema auth; @@ -513,6 +531,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + assert_complete_results( format!( "select {} from public.one o join public.two on o.id = t.id;", @@ -526,7 +546,8 @@ mod tests { CompletionAssertion::Label("d".to_string()), CompletionAssertion::Label("e".to_string()), ], - setup, + None, + &pool, ) .await; @@ -546,7 +567,8 @@ mod tests { CompletionAssertion::Label("z".to_string()), CompletionAssertion::Label("a".to_string()), ], - setup, + None, + &pool, ) .await; @@ -562,7 +584,8 @@ mod tests { CompletionAssertion::LabelAndDesc("id".to_string(), "public.two".to_string()), CompletionAssertion::Label("z".to_string()), ], - setup, + None, + &pool, ) .await; @@ -574,13 +597,14 @@ mod tests { ) .as_str(), vec![CompletionAssertion::Label("z".to_string())], - setup, + None, + &pool, ) .await; } - #[tokio::test] - async fn suggests_columns_in_insert_clause() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_columns_in_insert_clause(pool: PgPool) { let setup = r#" create table instruments ( id bigint primary key generated always as identity, @@ -595,6 +619,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + // We should prefer the instrument columns, even though they // are lower in the alphabet @@ -605,7 +631,8 @@ mod tests { CompletionAssertion::Label("name".to_string()), CompletionAssertion::Label("z".to_string()), ], - setup, + None, + &pool, ) .await; @@ -615,14 +642,16 @@ mod tests { CompletionAssertion::Label("name".to_string()), CompletionAssertion::Label("z".to_string()), ], - setup, + None, + &pool, ) .await; assert_complete_results( format!("insert into instruments (id, {}, name)", CURSOR_POS).as_str(), vec![CompletionAssertion::Label("z".to_string())], - setup, + None, + &pool, ) .await; @@ -637,20 +666,22 @@ mod tests { CompletionAssertion::Label("id".to_string()), CompletionAssertion::Label("z".to_string()), ], - setup, + None, + &pool, ) .await; // no completions in the values list! assert_no_complete_results( format!("insert into instruments (id, name) values ({})", CURSOR_POS).as_str(), - setup, + None, + &pool, ) .await; } - #[tokio::test] - async fn suggests_columns_in_where_clause() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_columns_in_where_clause(pool: PgPool) { let setup = r#" create table instruments ( id bigint primary key generated always as identity, @@ -666,6 +697,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + assert_complete_results( format!("select name from instruments where {} ", CURSOR_POS).as_str(), vec![ @@ -674,7 +707,8 @@ mod tests { CompletionAssertion::Label("name".into()), CompletionAssertion::Label("z".into()), ], - setup, + None, + &pool, ) .await; @@ -689,7 +723,8 @@ mod tests { CompletionAssertion::KindNotExists(CompletionItemKind::Column), CompletionAssertion::KindNotExists(CompletionItemKind::Schema), ], - setup, + None, + &pool, ) .await; @@ -705,7 +740,8 @@ mod tests { CompletionAssertion::Label("name".into()), CompletionAssertion::Label("z".into()), ], - setup, + None, + &pool, ) .await; @@ -721,7 +757,8 @@ mod tests { CompletionAssertion::Label("id".into()), CompletionAssertion::Label("name".into()), ], - setup, + None, + &pool, ) .await; } diff --git a/crates/pgt_completions/src/providers/functions.rs b/crates/pgt_completions/src/providers/functions.rs index f1b57e8c..2bc4f331 100644 --- a/crates/pgt_completions/src/providers/functions.rs +++ b/crates/pgt_completions/src/providers/functions.rs @@ -65,13 +65,15 @@ fn get_completion_text(ctx: &CompletionContext, func: &Function) -> CompletionTe #[cfg(test)] mod tests { + use sqlx::PgPool; + use crate::{ CompletionItem, CompletionItemKind, complete, test_helper::{CURSOR_POS, get_test_deps, get_test_params}, }; - #[tokio::test] - async fn completes_fn() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn completes_fn(pool: PgPool) { let setup = r#" create or replace function cool() returns trigger @@ -86,7 +88,7 @@ mod tests { let query = format!("select coo{}", CURSOR_POS); - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); let results = complete(params); @@ -98,8 +100,8 @@ mod tests { assert_eq!(label, "cool"); } - #[tokio::test] - async fn prefers_fn_if_invocation() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn prefers_fn_if_invocation(pool: PgPool) { let setup = r#" create table coos ( id serial primary key, @@ -119,7 +121,7 @@ mod tests { let query = format!(r#"select * from coo{}()"#, CURSOR_POS); - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); let results = complete(params); @@ -132,8 +134,8 @@ mod tests { assert_eq!(kind, CompletionItemKind::Function); } - #[tokio::test] - async fn prefers_fn_in_select_clause() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn prefers_fn_in_select_clause(pool: PgPool) { let setup = r#" create table coos ( id serial primary key, @@ -153,7 +155,7 @@ mod tests { let query = format!(r#"select coo{}"#, CURSOR_POS); - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); let results = complete(params); @@ -166,8 +168,8 @@ mod tests { assert_eq!(kind, CompletionItemKind::Function); } - #[tokio::test] - async fn prefers_function_in_from_clause_if_invocation() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn prefers_function_in_from_clause_if_invocation(pool: PgPool) { let setup = r#" create table coos ( id serial primary key, @@ -187,7 +189,7 @@ mod tests { let query = format!(r#"select * from coo{}()"#, CURSOR_POS); - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); let results = complete(params); diff --git a/crates/pgt_completions/src/providers/policies.rs b/crates/pgt_completions/src/providers/policies.rs index a4d3a9bb..216fcefa 100644 --- a/crates/pgt_completions/src/providers/policies.rs +++ b/crates/pgt_completions/src/providers/policies.rs @@ -59,10 +59,12 @@ pub fn complete_policies<'a>(ctx: &CompletionContext<'a>, builder: &mut Completi #[cfg(test)] mod tests { + use sqlx::{Executor, PgPool}; + use crate::test_helper::{CURSOR_POS, CompletionAssertion, assert_complete_results}; - #[tokio::test] - async fn completes_within_quotation_marks() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn completes_within_quotation_marks(pool: PgPool) { let setup = r#" create schema private; @@ -84,13 +86,16 @@ mod tests { with check (true); "#; + pool.execute(setup).await.unwrap(); + assert_complete_results( format!("alter policy \"{}\" on private.users;", CURSOR_POS).as_str(), vec![ CompletionAssertion::Label("read for public users disallowed".into()), CompletionAssertion::Label("write for public users allowed".into()), ], - setup, + None, + &pool, ) .await; @@ -99,7 +104,8 @@ mod tests { vec![CompletionAssertion::Label( "write for public users allowed".into(), )], - setup, + None, + &pool, ) .await; } diff --git a/crates/pgt_completions/src/providers/schemas.rs b/crates/pgt_completions/src/providers/schemas.rs index 02d2fd0c..561da0f8 100644 --- a/crates/pgt_completions/src/providers/schemas.rs +++ b/crates/pgt_completions/src/providers/schemas.rs @@ -27,13 +27,15 @@ pub fn complete_schemas<'a>(ctx: &'a CompletionContext, builder: &mut Completion #[cfg(test)] mod tests { + use sqlx::PgPool; + use crate::{ CompletionItemKind, test_helper::{CURSOR_POS, CompletionAssertion, assert_complete_results}, }; - #[tokio::test] - async fn autocompletes_schemas() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn autocompletes_schemas(pool: PgPool) { let setup = r#" create schema private; create schema auth; @@ -75,13 +77,14 @@ mod tests { CompletionItemKind::Schema, ), ], - setup, + Some(setup), + &pool, ) .await; } - #[tokio::test] - async fn suggests_tables_and_schemas_with_matching_keys() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_tables_and_schemas_with_matching_keys(pool: PgPool) { let setup = r#" create schema ultimate; @@ -99,7 +102,8 @@ mod tests { CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), CompletionAssertion::LabelAndKind("ultimate".into(), CompletionItemKind::Schema), ], - setup, + Some(setup), + &pool, ) .await; } diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index 2102d41c..3fbee8f1 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -42,6 +42,8 @@ pub fn complete_tables<'a>(ctx: &'a CompletionContext, builder: &mut CompletionB #[cfg(test)] mod tests { + use sqlx::{Executor, PgPool}; + use crate::{ CompletionItem, CompletionItemKind, complete, test_helper::{ @@ -50,8 +52,8 @@ mod tests { }, }; - #[tokio::test] - async fn autocompletes_simple_table() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn autocompletes_simple_table(pool: PgPool) { let setup = r#" create table users ( id serial primary key, @@ -62,7 +64,7 @@ mod tests { let query = format!("select * from u{}", CURSOR_POS); - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); let items = complete(params); @@ -77,8 +79,8 @@ mod tests { ) } - #[tokio::test] - async fn autocompletes_table_alphanumerically() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn autocompletes_table_alphanumerically(pool: PgPool) { let setup = r#" create table addresses ( id serial primary key @@ -93,6 +95,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + let test_cases = vec![ (format!("select * from u{}", CURSOR_POS), "users"), (format!("select * from e{}", CURSOR_POS), "emails"), @@ -100,7 +104,7 @@ mod tests { ]; for (query, expected_label) in test_cases { - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let (tree, cache) = get_test_deps(None, query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); let items = complete(params); @@ -116,8 +120,8 @@ mod tests { } } - #[tokio::test] - async fn autocompletes_table_with_schema() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn autocompletes_table_with_schema(pool: PgPool) { let setup = r#" create schema customer_support; create schema private; @@ -135,6 +139,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + let test_cases = vec![ (format!("select * from u{}", CURSOR_POS), "user_y"), // user_y is preferred alphanumerically (format!("select * from private.u{}", CURSOR_POS), "user_z"), @@ -145,7 +151,7 @@ mod tests { ]; for (query, expected_label) in test_cases { - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let (tree, cache) = get_test_deps(None, query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); let items = complete(params); @@ -161,8 +167,8 @@ mod tests { } } - #[tokio::test] - async fn prefers_table_in_from_clause() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn prefers_table_in_from_clause(pool: PgPool) { let setup = r#" create table coos ( id serial primary key, @@ -182,7 +188,7 @@ mod tests { let query = format!(r#"select * from coo{}"#, CURSOR_POS); - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; + let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); let items = complete(params); @@ -195,8 +201,8 @@ mod tests { assert_eq!(kind, CompletionItemKind::Table); } - #[tokio::test] - async fn suggests_tables_in_update() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_tables_in_update(pool: PgPool) { let setup = r#" create table coos ( id serial primary key, @@ -204,13 +210,16 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + assert_complete_results( format!("update {}", CURSOR_POS).as_str(), vec![CompletionAssertion::LabelAndKind( "public".into(), CompletionItemKind::Schema, )], - setup, + None, + &pool, ) .await; @@ -220,12 +229,17 @@ mod tests { "coos".into(), CompletionItemKind::Table, )], - setup, + None, + &pool, ) .await; - assert_no_complete_results(format!("update public.coos {}", CURSOR_POS).as_str(), setup) - .await; + assert_no_complete_results( + format!("update public.coos {}", CURSOR_POS).as_str(), + None, + &pool, + ) + .await; assert_complete_results( format!("update coos set {}", CURSOR_POS).as_str(), @@ -233,7 +247,8 @@ mod tests { CompletionAssertion::Label("id".into()), CompletionAssertion::Label("name".into()), ], - setup, + None, + &pool, ) .await; @@ -243,13 +258,14 @@ mod tests { CompletionAssertion::Label("id".into()), CompletionAssertion::Label("name".into()), ], - setup, + None, + &pool, ) .await; } - #[tokio::test] - async fn suggests_tables_in_delete() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_tables_in_delete(pool: PgPool) { let setup = r#" create table coos ( id serial primary key, @@ -257,7 +273,9 @@ mod tests { ); "#; - assert_no_complete_results(format!("delete {}", CURSOR_POS).as_str(), setup).await; + pool.execute(setup).await.unwrap(); + + assert_no_complete_results(format!("delete {}", CURSOR_POS).as_str(), None, &pool).await; assert_complete_results( format!("delete from {}", CURSOR_POS).as_str(), @@ -265,14 +283,16 @@ mod tests { CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("coos".into(), CompletionItemKind::Table), ], - setup, + None, + &pool, ) .await; assert_complete_results( format!("delete from public.{}", CURSOR_POS).as_str(), vec![CompletionAssertion::Label("coos".into())], - setup, + None, + &pool, ) .await; @@ -282,13 +302,14 @@ mod tests { CompletionAssertion::Label("id".into()), CompletionAssertion::Label("name".into()), ], - setup, + None, + &pool, ) .await; } - #[tokio::test] - async fn suggests_tables_in_join() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_tables_in_join(pool: PgPool) { let setup = r#" create schema auth; @@ -315,13 +336,14 @@ mod tests { CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), // self-join CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), ], - setup, + Some(setup), + &pool, ) .await; } - #[tokio::test] - async fn suggests_tables_in_alter_and_drop_statements() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_tables_in_alter_and_drop_statements(pool: PgPool) { let setup = r#" create schema auth; @@ -340,6 +362,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + assert_complete_results( format!("alter table {}", CURSOR_POS).as_str(), vec![ @@ -348,7 +372,8 @@ mod tests { CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), ], - setup, + None, + &pool, ) .await; @@ -360,7 +385,8 @@ mod tests { CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), ], - setup, + None, + &pool, ) .await; @@ -372,7 +398,8 @@ mod tests { CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), ], - setup, + None, + &pool, ) .await; @@ -384,13 +411,14 @@ mod tests { CompletionAssertion::LabelAndKind("posts".into(), CompletionItemKind::Table), // self-join CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), ], - setup, + None, + &pool, ) .await; } - #[tokio::test] - async fn suggests_tables_in_insert_into() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_tables_in_insert_into(pool: PgPool) { let setup = r#" create schema auth; @@ -401,6 +429,8 @@ mod tests { ); "#; + pool.execute(setup).await.unwrap(); + assert_complete_results( format!("insert into {}", CURSOR_POS).as_str(), vec![ @@ -408,7 +438,8 @@ mod tests { CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), ], - setup, + None, + &pool, ) .await; @@ -418,7 +449,8 @@ mod tests { "users".into(), CompletionItemKind::Table, )], - setup, + None, + &pool, ) .await; @@ -434,7 +466,8 @@ mod tests { CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), ], - setup, + None, + &pool, ) .await; } diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index 5323e2bc..0be9e48a 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -218,12 +218,14 @@ impl CompletionFilter<'_> { #[cfg(test)] mod tests { + use sqlx::{Executor, PgPool}; + use crate::test_helper::{ CURSOR_POS, CompletionAssertion, assert_complete_results, assert_no_complete_results, }; - #[tokio::test] - async fn completion_after_asterisk() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn completion_after_asterisk(pool: PgPool) { let setup = r#" create table users ( id serial primary key, @@ -232,7 +234,9 @@ mod tests { ); "#; - assert_no_complete_results(format!("select * {}", CURSOR_POS).as_str(), setup).await; + pool.execute(setup).await.unwrap(); + + assert_no_complete_results(format!("select * {}", CURSOR_POS).as_str(), None, &pool).await; // if there s a COMMA after the asterisk, we're good assert_complete_results( @@ -242,19 +246,21 @@ mod tests { CompletionAssertion::Label("email".into()), CompletionAssertion::Label("id".into()), ], - setup, + None, + &pool, ) .await; } - #[tokio::test] - async fn completion_after_create_table() { - assert_no_complete_results(format!("create table {}", CURSOR_POS).as_str(), "").await; + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn completion_after_create_table(pool: PgPool) { + assert_no_complete_results(format!("create table {}", CURSOR_POS).as_str(), None, &pool) + .await; } - #[tokio::test] - async fn completion_in_column_definitions() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn completion_in_column_definitions(pool: PgPool) { let query = format!(r#"create table instruments ( {} )"#, CURSOR_POS); - assert_no_complete_results(query.as_str(), "").await; + assert_no_complete_results(query.as_str(), None, &pool).await; } } diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs index 2fe12511..a8c89f50 100644 --- a/crates/pgt_completions/src/relevance/scoring.rs +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -187,6 +187,16 @@ impl CompletionScore<'_> { } } + fn get_item_name(&self) -> &str { + match self.data { + CompletionRelevanceData::Table(t) => t.name.as_str(), + CompletionRelevanceData::Function(f) => f.name.as_str(), + CompletionRelevanceData::Column(c) => c.name.as_str(), + CompletionRelevanceData::Schema(s) => s.name.as_str(), + CompletionRelevanceData::Policy(p) => p.name.as_str(), + } + } + fn get_schema_name(&self) -> &str { match self.data { CompletionRelevanceData::Function(f) => f.schema.as_str(), @@ -234,19 +244,30 @@ impl CompletionScore<'_> { } fn check_is_user_defined(&mut self) { - let schema = self.get_schema_name().to_string(); + let schema_name = self.get_schema_name().to_string(); let system_schemas = ["pg_catalog", "information_schema", "pg_toast"]; - if system_schemas.contains(&schema.as_str()) { + if system_schemas.contains(&schema_name.as_str()) { self.score -= 20; } // "public" is the default postgres schema where users // create objects. Prefer it by a slight bit. - if schema.as_str() == "public" { + if schema_name.as_str() == "public" { self.score += 2; } + + let item_name = self.get_item_name().to_string(); + let table_name = self.get_table_name(); + + // migrations shouldn't pop up on top + if item_name.contains("migrations") + || table_name.is_some_and(|t| t.contains("migrations")) + || schema_name.contains("migrations") + { + self.score -= 15; + } } fn check_columns_in_stmt(&mut self, ctx: &CompletionContext) { diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index 937c11af..1bd5229c 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -1,8 +1,7 @@ use std::fmt::Display; use pgt_schema_cache::SchemaCache; -use pgt_test_utils::test_database::get_new_test_db; -use sqlx::Executor; +use sqlx::{Executor, PgPool}; use crate::{CompletionItem, CompletionItemKind, CompletionParams, complete}; @@ -34,17 +33,18 @@ impl Display for InputQuery { } pub(crate) async fn get_test_deps( - setup: &str, + setup: Option<&str>, input: InputQuery, + test_db: &PgPool, ) -> (tree_sitter::Tree, pgt_schema_cache::SchemaCache) { - let test_db = get_new_test_db().await; - - test_db - .execute(setup) - .await - .expect("Failed to execute setup query"); + if let Some(setup) = setup { + test_db + .execute(setup) + .await + .expect("Failed to execute setup query"); + } - let schema_cache = SchemaCache::load(&test_db) + let schema_cache = SchemaCache::load(test_db) .await .expect("Failed to load Schema Cache"); @@ -206,9 +206,10 @@ impl CompletionAssertion { pub(crate) async fn assert_complete_results( query: &str, assertions: Vec, - setup: &str, + setup: Option<&str>, + pool: &PgPool, ) { - let (tree, cache) = get_test_deps(setup, query.into()).await; + let (tree, cache) = get_test_deps(setup, query.into(), pool).await; let params = get_test_params(&tree, &cache, query.into()); let items = complete(params); @@ -241,8 +242,8 @@ pub(crate) async fn assert_complete_results( }); } -pub(crate) async fn assert_no_complete_results(query: &str, setup: &str) { - let (tree, cache) = get_test_deps(setup, query.into()).await; +pub(crate) async fn assert_no_complete_results(query: &str, setup: Option<&str>, pool: &PgPool) { + let (tree, cache) = get_test_deps(setup, query.into(), pool).await; let params = get_test_params(&tree, &cache, query.into()); let items = complete(params); diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 581ea1fe..19b65b06 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -13,13 +13,13 @@ use pgt_configuration::database::PartialDatabaseConfiguration; use pgt_fs::MemoryFileSystem; use pgt_lsp::LSPServer; use pgt_lsp::ServerFactory; -use pgt_test_utils::test_database::get_new_test_db; use pgt_workspace::DynRef; use serde::Serialize; use serde::de::DeserializeOwned; use serde_json::Value; use serde_json::{from_value, to_value}; use sqlx::Executor; +use sqlx::PgPool; use std::any::type_name; use std::fmt::Display; use std::time::Duration; @@ -345,11 +345,10 @@ async fn basic_lifecycle() -> Result<()> { Ok(()) } -#[tokio::test] -async fn test_database_connection() -> Result<()> { +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_database_connection(test_db: PgPool) -> Result<()> { let factory = ServerFactory::default(); let mut fs = MemoryFileSystem::default(); - let test_db = get_new_test_db().await; let setup = r#" create table public.users ( @@ -457,11 +456,10 @@ async fn server_shutdown() -> Result<()> { Ok(()) } -#[tokio::test] -async fn test_completions() -> Result<()> { +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_completions(test_db: PgPool) -> Result<()> { let factory = ServerFactory::default(); let mut fs = MemoryFileSystem::default(); - let test_db = get_new_test_db().await; let setup = r#" create table public.users ( @@ -558,11 +556,10 @@ async fn test_completions() -> Result<()> { Ok(()) } -#[tokio::test] -async fn test_issue_271() -> Result<()> { +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_issue_271(test_db: PgPool) -> Result<()> { let factory = ServerFactory::default(); let mut fs = MemoryFileSystem::default(); - let test_db = get_new_test_db().await; let setup = r#" create table public.users ( @@ -760,11 +757,10 @@ async fn test_issue_271() -> Result<()> { Ok(()) } -#[tokio::test] -async fn test_execute_statement() -> Result<()> { +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_execute_statement(test_db: PgPool) -> Result<()> { let factory = ServerFactory::default(); let mut fs = MemoryFileSystem::default(); - let test_db = get_new_test_db().await; let database = test_db .connect_options() @@ -899,11 +895,10 @@ async fn test_execute_statement() -> Result<()> { Ok(()) } -#[tokio::test] -async fn test_issue_281() -> Result<()> { +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_issue_281(test_db: PgPool) -> Result<()> { let factory = ServerFactory::default(); let mut fs = MemoryFileSystem::default(); - let test_db = get_new_test_db().await; let setup = r#" create table public.users ( @@ -983,11 +978,10 @@ async fn test_issue_281() -> Result<()> { Ok(()) } -#[tokio::test] -async fn test_issue_303() -> Result<()> { +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_issue_303(test_db: PgPool) -> Result<()> { let factory = ServerFactory::default(); let mut fs = MemoryFileSystem::default(); - let test_db = get_new_test_db().await; let setup = r#" create table public.users ( diff --git a/crates/pgt_schema_cache/src/columns.rs b/crates/pgt_schema_cache/src/columns.rs index 60d422fd..01f9b41c 100644 --- a/crates/pgt_schema_cache/src/columns.rs +++ b/crates/pgt_schema_cache/src/columns.rs @@ -82,15 +82,12 @@ impl SchemaCacheItem for Column { #[cfg(test)] mod tests { - use pgt_test_utils::test_database::get_new_test_db; - use sqlx::Executor; + use sqlx::{Executor, PgPool}; use crate::{SchemaCache, columns::ColumnClassKind}; - #[tokio::test] - async fn loads_columns() { - let test_db = get_new_test_db().await; - + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn loads_columns(test_db: PgPool) { let setup = r#" create table public.users ( id serial primary key, @@ -129,7 +126,7 @@ mod tests { let public_schema_columns = cache .columns .iter() - .filter(|c| c.schema_name.as_str() == "public") + .filter(|c| c.schema_name.as_str() == "public" && !c.table_name.contains("migrations")) .count(); assert_eq!(public_schema_columns, 4); diff --git a/crates/pgt_schema_cache/src/policies.rs b/crates/pgt_schema_cache/src/policies.rs index 85cd7821..8e2ee4d7 100644 --- a/crates/pgt_schema_cache/src/policies.rs +++ b/crates/pgt_schema_cache/src/policies.rs @@ -80,27 +80,14 @@ impl SchemaCacheItem for Policy { #[cfg(test)] mod tests { - use pgt_test_utils::test_database::get_new_test_db; - use sqlx::Executor; - use crate::{SchemaCache, policies::PolicyCommand}; + use sqlx::{Executor, PgPool}; - #[tokio::test] - async fn loads_policies() { - let test_db = get_new_test_db().await; + use crate::{SchemaCache, policies::PolicyCommand}; + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn loads_policies(test_db: PgPool) { let setup = r#" - do $$ - begin - if not exists ( - select from pg_catalog.pg_roles - where rolname = 'admin' - ) then - create role admin; - end if; - end $$; - - create table public.users ( id serial primary key, name varchar(255) not null @@ -125,22 +112,12 @@ mod tests { to public with check (true); - create policy admin_policy + create policy owner_policy on public.users for all - to admin + to owner with check (true); - do $$ - begin - if not exists ( - select from pg_catalog.pg_roles - where rolname = 'owner' - ) then - create role owner; - end if; - end $$; - create schema real_estate; create table real_estate.properties ( @@ -148,10 +125,10 @@ mod tests { owner_id int not null ); - create policy owner_policy + create policy test_nologin_policy on real_estate.properties for update - to owner + to test_nologin using (owner_id = current_user::int); "#; @@ -193,29 +170,29 @@ mod tests { assert_eq!(public_policy.security_qualification, Some("true".into())); assert_eq!(public_policy.with_check, None); - let admin_policy = cache + let owner_policy = cache .policies .iter() - .find(|p| p.name == "admin_policy") + .find(|p| p.name == "owner_policy") .unwrap(); - assert_eq!(admin_policy.table_name, "users"); - assert_eq!(admin_policy.schema_name, "public"); - assert!(admin_policy.is_permissive); - assert_eq!(admin_policy.command, PolicyCommand::All); - assert_eq!(admin_policy.role_names, vec!["admin"]); - assert_eq!(admin_policy.security_qualification, None); - assert_eq!(admin_policy.with_check, Some("true".into())); + assert_eq!(owner_policy.table_name, "users"); + assert_eq!(owner_policy.schema_name, "public"); + assert!(owner_policy.is_permissive); + assert_eq!(owner_policy.command, PolicyCommand::All); + assert_eq!(owner_policy.role_names, vec!["owner"]); + assert_eq!(owner_policy.security_qualification, None); + assert_eq!(owner_policy.with_check, Some("true".into())); let owner_policy = cache .policies .iter() - .find(|p| p.name == "owner_policy") + .find(|p| p.name == "test_nologin_policy") .unwrap(); assert_eq!(owner_policy.table_name, "properties"); assert_eq!(owner_policy.schema_name, "real_estate"); assert!(owner_policy.is_permissive); assert_eq!(owner_policy.command, PolicyCommand::Update); - assert_eq!(owner_policy.role_names, vec!["owner"]); + assert_eq!(owner_policy.role_names, vec!["test_nologin"]); assert_eq!( owner_policy.security_qualification, Some("(owner_id = (CURRENT_USER)::integer)".into()) diff --git a/crates/pgt_schema_cache/src/roles.rs b/crates/pgt_schema_cache/src/roles.rs index c212b791..7ced66f9 100644 --- a/crates/pgt_schema_cache/src/roles.rs +++ b/crates/pgt_schema_cache/src/roles.rs @@ -21,50 +21,19 @@ impl SchemaCacheItem for Role { #[cfg(test)] mod tests { - use crate::SchemaCache; - use pgt_test_utils::test_database::get_new_test_db; - use sqlx::Executor; - - #[tokio::test] - async fn loads_roles() { - let test_db = get_new_test_db().await; - - let setup = r#" - do $$ - begin - if not exists ( - select from pg_catalog.pg_roles - where rolname = 'test_super' - ) then - create role test_super superuser createdb login bypassrls; - end if; - if not exists ( - select from pg_catalog.pg_roles - where rolname = 'test_nologin' - ) then - create role test_nologin; - end if; - if not exists ( - select from pg_catalog.pg_roles - where rolname = 'test_login' - ) then - create role test_login login; - end if; - end $$; - "#; + use sqlx::PgPool; - test_db - .execute(setup) - .await - .expect("Failed to setup test database"); + use crate::SchemaCache; + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn loads_roles(test_db: PgPool) { let cache = SchemaCache::load(&test_db) .await .expect("Failed to load Schema Cache"); let roles = &cache.roles; - let super_role = roles.iter().find(|r| r.name == "test_super").unwrap(); + let super_role = roles.iter().find(|r| r.name == "owner").unwrap(); assert!(super_role.is_super_user); assert!(super_role.can_create_db); assert!(super_role.can_login); diff --git a/crates/pgt_schema_cache/src/schema_cache.rs b/crates/pgt_schema_cache/src/schema_cache.rs index 516b37e6..8fb9683b 100644 --- a/crates/pgt_schema_cache/src/schema_cache.rs +++ b/crates/pgt_schema_cache/src/schema_cache.rs @@ -93,14 +93,12 @@ pub trait SchemaCacheItem { #[cfg(test)] mod tests { - use pgt_test_utils::test_database::get_new_test_db; + use sqlx::PgPool; use crate::SchemaCache; - #[tokio::test] - async fn it_loads() { - let test_db = get_new_test_db().await; - + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn it_loads(test_db: PgPool) { SchemaCache::load(&test_db) .await .expect("Couldnt' load Schema Cache"); diff --git a/crates/pgt_schema_cache/src/tables.rs b/crates/pgt_schema_cache/src/tables.rs index a0a40d6a..16b86c54 100644 --- a/crates/pgt_schema_cache/src/tables.rs +++ b/crates/pgt_schema_cache/src/tables.rs @@ -79,14 +79,12 @@ impl SchemaCacheItem for Table { #[cfg(test)] mod tests { - use crate::{SchemaCache, tables::TableKind}; - use pgt_test_utils::test_database::get_new_test_db; - use sqlx::Executor; + use sqlx::{Executor, PgPool}; - #[tokio::test] - async fn includes_views_in_query() { - let test_db = get_new_test_db().await; + use crate::{SchemaCache, tables::TableKind}; + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn includes_views_in_query(test_db: PgPool) { let setup = r#" create table public.base_table ( id serial primary key, @@ -116,10 +114,8 @@ mod tests { assert_eq!(view.schema, "public"); } - #[tokio::test] - async fn includes_materialized_views_in_query() { - let test_db = get_new_test_db().await; - + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn includes_materialized_views_in_query(test_db: PgPool) { let setup = r#" create table public.base_table ( id serial primary key, diff --git a/crates/pgt_schema_cache/src/triggers.rs b/crates/pgt_schema_cache/src/triggers.rs index 0a5241d6..2b2a3aff 100644 --- a/crates/pgt_schema_cache/src/triggers.rs +++ b/crates/pgt_schema_cache/src/triggers.rs @@ -126,18 +126,16 @@ impl SchemaCacheItem for Trigger { #[cfg(test)] mod tests { - use pgt_test_utils::test_database::get_new_test_db; - use sqlx::Executor; + + use sqlx::{Executor, PgPool}; use crate::{ SchemaCache, triggers::{TriggerAffected, TriggerEvent, TriggerTiming}, }; - #[tokio::test] - async fn loads_triggers() { - let test_db = get_new_test_db().await; - + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn loads_triggers(test_db: PgPool) { let setup = r#" create table public.users ( id serial primary key, @@ -219,10 +217,8 @@ mod tests { assert_eq!(delete_trigger.proc_name, "log_user_insert"); } - #[tokio::test] - async fn loads_instead_and_truncate_triggers() { - let test_db = get_new_test_db().await; - + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn loads_instead_and_truncate_triggers(test_db: PgPool) { let setup = r#" create table public.docs ( id serial primary key, diff --git a/crates/pgt_test_utils/src/lib.rs b/crates/pgt_test_utils/src/lib.rs index 4d6d3070..e21c6ce4 100644 --- a/crates/pgt_test_utils/src/lib.rs +++ b/crates/pgt_test_utils/src/lib.rs @@ -1 +1 @@ -pub mod test_database; +pub static MIGRATIONS: sqlx::migrate::Migrator = sqlx::migrate!("./testdb_migrations"); diff --git a/crates/pgt_test_utils/src/test_database.rs b/crates/pgt_test_utils/src/test_database.rs deleted file mode 100644 index 67415c4a..00000000 --- a/crates/pgt_test_utils/src/test_database.rs +++ /dev/null @@ -1,42 +0,0 @@ -use sqlx::{Executor, PgPool, postgres::PgConnectOptions}; -use uuid::Uuid; - -// TODO: Work with proper config objects instead of a connection_string. -// With the current implementation, we can't parse the password from the connection string. -pub async fn get_new_test_db() -> PgPool { - dotenv::dotenv().expect("Unable to load .env file for tests"); - - let connection_string = std::env::var("DATABASE_URL").expect("DATABASE_URL not set"); - let password = std::env::var("DB_PASSWORD").unwrap_or("postgres".into()); - - let options_from_conn_str: PgConnectOptions = connection_string - .parse() - .expect("Invalid Connection String"); - - let host = options_from_conn_str.get_host(); - assert!( - host == "localhost" || host == "127.0.0.1", - "Running tests against non-local database!" - ); - - let options_without_db_name = PgConnectOptions::new() - .host(host) - .port(options_from_conn_str.get_port()) - .username(options_from_conn_str.get_username()) - .password(&password); - - let postgres = sqlx::PgPool::connect_with(options_without_db_name.clone()) - .await - .expect("Unable to connect to test postgres instance"); - - let database_name = Uuid::new_v4().to_string(); - - postgres - .execute(format!(r#"create database "{}";"#, database_name).as_str()) - .await - .expect("Failed to create test database."); - - sqlx::PgPool::connect_with(options_without_db_name.database(&database_name)) - .await - .expect("Could not connect to test database") -} diff --git a/crates/pgt_test_utils/testdb_migrations/0001_setup-roles.sql b/crates/pgt_test_utils/testdb_migrations/0001_setup-roles.sql new file mode 100644 index 00000000..1f1d50b3 --- /dev/null +++ b/crates/pgt_test_utils/testdb_migrations/0001_setup-roles.sql @@ -0,0 +1,32 @@ +do $$ +begin + +begin + create role owner superuser createdb login bypassrls; +exception + when duplicate_object then + null; + when unique_violation then + null; +end; + +begin + create role test_login login; +exception + when duplicate_object then + null; + when unique_violation then + null; +end; + +begin + create role test_nologin; +exception + when duplicate_object then + null; + when unique_violation then + null; +end; + +end +$$; \ No newline at end of file diff --git a/crates/pgt_typecheck/src/typed_identifier.rs b/crates/pgt_typecheck/src/typed_identifier.rs index 5efe0421..710b2fe9 100644 --- a/crates/pgt_typecheck/src/typed_identifier.rs +++ b/crates/pgt_typecheck/src/typed_identifier.rs @@ -231,11 +231,10 @@ fn resolve_type<'a>( #[cfg(test)] mod tests { - use pgt_test_utils::test_database::get_new_test_db; - use sqlx::Executor; + use sqlx::{Executor, PgPool}; - #[tokio::test] - async fn test_apply_identifiers() { + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_apply_identifiers(test_db: PgPool) { let input = "select v_test + fn_name.custom_type.v_test2 + $3 + custom_type.v_test3 + fn_name.v_test2 + enum_type"; let identifiers = vec![ @@ -295,8 +294,6 @@ mod tests { }, ]; - let test_db = get_new_test_db().await; - let setup = r#" CREATE TYPE "public"."custom_type" AS ( v_test2 integer, diff --git a/crates/pgt_typecheck/tests/diagnostics.rs b/crates/pgt_typecheck/tests/diagnostics.rs index 9628962d..a7448503 100644 --- a/crates/pgt_typecheck/tests/diagnostics.rs +++ b/crates/pgt_typecheck/tests/diagnostics.rs @@ -3,13 +3,10 @@ use pgt_console::{ markup, }; use pgt_diagnostics::PrintDiagnostic; -use pgt_test_utils::test_database::get_new_test_db; use pgt_typecheck::{TypecheckParams, check_sql}; -use sqlx::Executor; - -async fn test(name: &str, query: &str, setup: Option<&str>) { - let test_db = get_new_test_db().await; +use sqlx::{Executor, PgPool}; +async fn test(name: &str, query: &str, setup: Option<&str>, test_db: &PgPool) { if let Some(setup) = setup { test_db .execute(setup) @@ -22,7 +19,7 @@ async fn test(name: &str, query: &str, setup: Option<&str>) { .set_language(tree_sitter_sql::language()) .expect("Error loading sql language"); - let schema_cache = pgt_schema_cache::SchemaCache::load(&test_db) + let schema_cache = pgt_schema_cache::SchemaCache::load(test_db) .await .expect("Failed to load Schema Cache"); @@ -58,8 +55,8 @@ async fn test(name: &str, query: &str, setup: Option<&str>) { }); } -#[tokio::test] -async fn invalid_column() { +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn invalid_column(pool: PgPool) { test( "invalid_column", "select id, unknown from contacts;", @@ -73,6 +70,7 @@ async fn invalid_column() { ); "#, ), + &pool, ) .await; } From 3698af51008081638932adbb8bda79db5e9dac56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sun, 1 Jun 2025 21:40:44 +0200 Subject: [PATCH 064/114] feat: workspace support (#408) --- Cargo.lock | 14 + Cargo.toml | 2 + crates/pgt_cli/src/commands/mod.rs | 8 +- crates/pgt_cli/src/diagnostics.rs | 2 +- crates/pgt_configuration/Cargo.toml | 1 + crates/pgt_configuration/src/diagnostics.rs | 53 ++ crates/pgt_configuration/src/lib.rs | 6 + crates/pgt_diagnostics/Cargo.toml | 1 + crates/pgt_diagnostics/src/adapters.rs | 25 + crates/pgt_fs/Cargo.toml | 1 + crates/pgt_fs/src/fs.rs | 15 + crates/pgt_fs/src/fs/memory.rs | 10 + crates/pgt_fs/src/fs/os.rs | 23 +- crates/pgt_lsp/src/server.rs | 52 +- crates/pgt_lsp/src/session.rs | 42 +- crates/pgt_lsp/tests/server.rs | 451 +++++++++++++++++- crates/pgt_workspace/Cargo.toml | 1 + crates/pgt_workspace/src/configuration.rs | 318 +++++++++++- crates/pgt_workspace/src/diagnostics.rs | 7 + crates/pgt_workspace/src/lib.rs | 1 + crates/pgt_workspace/src/settings.rs | 234 ++++++--- crates/pgt_workspace/src/workspace.rs | 103 ++++ crates/pgt_workspace/src/workspace/client.rs | 19 +- crates/pgt_workspace/src/workspace/server.rs | 229 ++++++--- .../src/workspace/server/connection_key.rs | 44 ++ .../workspace/server/connection_manager.rs | 95 ++++ .../src/workspace/server/db_connection.rs | 40 -- .../workspace/server/schema_cache_manager.rs | 106 ++-- crates/pgt_workspace/src/workspace_types.rs | 3 +- docs/schemas/0.0.0/schema.json | 11 + docs/schemas/latest/schema.json | 11 + justfile | 15 +- .../backend-jsonrpc/src/workspace.ts | 32 +- .../backend-jsonrpc/tests/workspace.test.mjs | 5 +- 34 files changed, 1688 insertions(+), 292 deletions(-) create mode 100644 crates/pgt_workspace/src/workspace/server/connection_key.rs create mode 100644 crates/pgt_workspace/src/workspace/server/connection_manager.rs delete mode 100644 crates/pgt_workspace/src/workspace/server/db_connection.rs diff --git a/Cargo.lock b/Cargo.lock index 4771c8a1..875d4d9f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2599,6 +2599,7 @@ dependencies = [ "biome_deserialize_macros 0.6.0", "bpaf", "indexmap 2.7.0", + "oxc_resolver", "pgt_analyse", "pgt_analyser", "pgt_console", @@ -2631,6 +2632,7 @@ dependencies = [ "backtrace", "bpaf", "enumflags2", + "oxc_resolver", "pgt_console", "pgt_diagnostics_categories", "pgt_diagnostics_macros", @@ -2676,6 +2678,7 @@ dependencies = [ "crossbeam", "directories", "enumflags2", + "oxc_resolver", "parking_lot", "pgt_diagnostics", "rayon", @@ -2912,6 +2915,7 @@ dependencies = [ "schemars", "serde", "serde_json", + "slotmap", "sqlx", "strum", "tempfile", @@ -3701,6 +3705,16 @@ dependencies = [ "autocfg", ] +[[package]] +name = "slotmap" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbff4acf519f630b3a3ddcfaea6c06b42174d9a44bc70c620e9ed1649d58b82a" +dependencies = [ + "serde", + "version_check", +] + [[package]] name = "smallvec" version = "1.13.2" diff --git a/Cargo.toml b/Cargo.toml index aaaa9035..fe00d7ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,7 @@ enumflags2 = "0.7.11" ignore = "0.4.23" indexmap = { version = "2.6.0", features = ["serde"] } insta = "1.31.0" +oxc_resolver = "1.12.0" pg_query = "6.1.0" proc-macro2 = "1.0.66" quote = "1.0.33" @@ -38,6 +39,7 @@ schemars = { version = "0.8.22", features = ["indexmap2", "small serde = "1.0.195" serde_json = "1.0.114" similar = "2.6.0" +slotmap = "1.0.7" smallvec = { version = "1.13.2", features = ["union", "const_new", "serde"] } strum = { version = "0.27.1", features = ["derive"] } # this will use tokio if available, otherwise async-std diff --git a/crates/pgt_cli/src/commands/mod.rs b/crates/pgt_cli/src/commands/mod.rs index ebd16e3d..19dc56ad 100644 --- a/crates/pgt_cli/src/commands/mod.rs +++ b/crates/pgt_cli/src/commands/mod.rs @@ -9,9 +9,9 @@ use bpaf::Bpaf; use pgt_configuration::{PartialConfiguration, partial_configuration}; use pgt_console::Console; use pgt_fs::FileSystem; +use pgt_workspace::PartialConfigurationExt; use pgt_workspace::configuration::{LoadedConfiguration, load_configuration}; -use pgt_workspace::settings::PartialConfigurationExt; -use pgt_workspace::workspace::UpdateSettingsParams; +use pgt_workspace::workspace::{RegisterProjectFolderParams, UpdateSettingsParams}; use pgt_workspace::{DynRef, Workspace, WorkspaceError}; use std::ffi::OsString; use std::path::PathBuf; @@ -301,6 +301,10 @@ pub(crate) trait CommandRunner: Sized { let (vcs_base_path, gitignore_matches) = configuration.retrieve_gitignore_matches(fs, vcs_base_path.as_deref())?; let paths = self.get_files_to_process(fs, &configuration)?; + workspace.register_project_folder(RegisterProjectFolderParams { + path: fs.working_directory(), + set_as_current_workspace: true, + })?; workspace.update_settings(UpdateSettingsParams { workspace_directory: fs.working_directory(), diff --git a/crates/pgt_cli/src/diagnostics.rs b/crates/pgt_cli/src/diagnostics.rs index d24d02e9..20d32113 100644 --- a/crates/pgt_cli/src/diagnostics.rs +++ b/crates/pgt_cli/src/diagnostics.rs @@ -455,7 +455,7 @@ mod test { fn termination_diagnostic_size() { assert_eq!( std::mem::size_of::(), - 80, + 96, "you successfully decreased the size of the diagnostic!" ) } diff --git a/crates/pgt_configuration/Cargo.toml b/crates/pgt_configuration/Cargo.toml index 61da458b..3bd685fa 100644 --- a/crates/pgt_configuration/Cargo.toml +++ b/crates/pgt_configuration/Cargo.toml @@ -16,6 +16,7 @@ biome_deserialize = { workspace = true, features = ["schema"] } biome_deserialize_macros = { workspace = true } bpaf = { workspace = true } indexmap = { workspace = true } +oxc_resolver = { workspace = true } pgt_analyse = { workspace = true } pgt_analyser = { workspace = true } pgt_console = { workspace = true } diff --git a/crates/pgt_configuration/src/diagnostics.rs b/crates/pgt_configuration/src/diagnostics.rs index dc835ed7..79fd7714 100644 --- a/crates/pgt_configuration/src/diagnostics.rs +++ b/crates/pgt_configuration/src/diagnostics.rs @@ -1,5 +1,7 @@ use pgt_console::fmt::Display; use pgt_console::{MarkupBuf, markup}; +use pgt_diagnostics::adapters::ResolveError; + use pgt_diagnostics::{Advices, Diagnostic, Error, LogCategory, MessageAndDescription, Visit}; use serde::{Deserialize, Serialize}; use std::fmt::{Debug, Formatter}; @@ -21,6 +23,12 @@ pub enum ConfigurationDiagnostic { /// Thrown when the pattern inside the `ignore` field errors InvalidIgnorePattern(InvalidIgnorePattern), + + /// Thrown when there's something wrong with the files specified inside `"extends"` + CantLoadExtendFile(CantLoadExtendFile), + + /// Thrown when a configuration file can't be resolved from `node_modules` + CantResolve(CantResolve), } impl ConfigurationDiagnostic { @@ -72,6 +80,18 @@ impl ConfigurationDiagnostic { message: MessageAndDescription::from(markup! {{message}}.to_owned()), }) } + + pub fn cant_resolve(path: impl Display, source: oxc_resolver::ResolveError) -> Self { + Self::CantResolve(CantResolve { + message: MessageAndDescription::from( + markup! { + "Failed to resolve the configuration from "{{path}} + } + .to_owned(), + ), + source: Some(Error::from(ResolveError::from(source))), + }) + } } impl Debug for ConfigurationDiagnostic { @@ -168,3 +188,36 @@ pub struct CantResolve { #[source] source: Option, } + +#[derive(Debug, Serialize, Deserialize, Diagnostic)] +#[diagnostic( + category = "configuration", + severity = Error, +)] +pub struct CantLoadExtendFile { + #[location(resource)] + file_path: String, + #[message] + #[description] + message: MessageAndDescription, + + #[verbose_advice] + verbose_advice: ConfigurationAdvices, +} + +impl CantLoadExtendFile { + pub fn new(file_path: impl Into, message: impl Display) -> Self { + Self { + file_path: file_path.into(), + message: MessageAndDescription::from(markup! {{message}}.to_owned()), + verbose_advice: ConfigurationAdvices::default(), + } + } + + pub fn with_verbose_advice(mut self, messsage: impl Display) -> Self { + self.verbose_advice + .messages + .push(markup! {{messsage}}.to_owned()); + self + } +} diff --git a/crates/pgt_configuration/src/lib.rs b/crates/pgt_configuration/src/lib.rs index fcf0b5c6..b862dce4 100644 --- a/crates/pgt_configuration/src/lib.rs +++ b/crates/pgt_configuration/src/lib.rs @@ -22,6 +22,7 @@ pub use analyser::{ RulePlainConfiguration, RuleSelector, RuleWithFixOptions, RuleWithOptions, Rules, partial_linter_configuration, }; +use biome_deserialize::StringSet; use biome_deserialize_macros::{Merge, Partial}; use bpaf::Bpaf; use database::{ @@ -50,6 +51,10 @@ pub struct Configuration { #[partial(bpaf(hide))] pub schema: String, + /// A list of paths to other JSON files, used to extends the current configuration. + #[partial(bpaf(hide))] + pub extends: StringSet, + /// The configuration of the VCS integration #[partial(type, bpaf(external(partial_vcs_configuration), optional, hide_usage))] pub vcs: VcsConfiguration, @@ -85,6 +90,7 @@ impl PartialConfiguration { pub fn init() -> Self { Self { schema: Some(format!("https://pgtools.dev/schemas/{VERSION}/schema.json")), + extends: Some(StringSet::default()), files: Some(PartialFilesConfiguration { ignore: Some(Default::default()), ..Default::default() diff --git a/crates/pgt_diagnostics/Cargo.toml b/crates/pgt_diagnostics/Cargo.toml index 190b25f0..06c6f8dc 100644 --- a/crates/pgt_diagnostics/Cargo.toml +++ b/crates/pgt_diagnostics/Cargo.toml @@ -15,6 +15,7 @@ version = "0.0.0" backtrace = "0.3.74" bpaf = { workspace = true } enumflags2 = { workspace = true } +oxc_resolver = { workspace = true } pgt_console = { workspace = true, features = ["serde"] } pgt_diagnostics_categories = { workspace = true, features = ["serde"] } pgt_diagnostics_macros = { workspace = true } diff --git a/crates/pgt_diagnostics/src/adapters.rs b/crates/pgt_diagnostics/src/adapters.rs index ca627d3b..5c3dcdd5 100644 --- a/crates/pgt_diagnostics/src/adapters.rs +++ b/crates/pgt_diagnostics/src/adapters.rs @@ -134,3 +134,28 @@ impl Diagnostic for SerdeJsonError { fmt.write_markup(markup!({ AsConsoleDisplay(&self.error) })) } } + +#[derive(Debug)] +pub struct ResolveError { + error: oxc_resolver::ResolveError, +} + +impl From for ResolveError { + fn from(error: oxc_resolver::ResolveError) -> Self { + Self { error } + } +} + +impl Diagnostic for ResolveError { + fn category(&self) -> Option<&'static Category> { + Some(category!("internalError/io")) + } + + fn description(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(fmt, "{}", self.error) + } + + fn message(&self, fmt: &mut fmt::Formatter<'_>) -> io::Result<()> { + fmt.write_markup(markup!({ AsConsoleDisplay(&self.error) })) + } +} diff --git a/crates/pgt_fs/Cargo.toml b/crates/pgt_fs/Cargo.toml index 1e4a7b4f..40478934 100644 --- a/crates/pgt_fs/Cargo.toml +++ b/crates/pgt_fs/Cargo.toml @@ -15,6 +15,7 @@ version = "0.0.0" crossbeam = { workspace = true } directories = "5.0.1" enumflags2 = { workspace = true } +oxc_resolver = { workspace = true } parking_lot = { version = "0.12.3", features = ["arc_lock"] } pgt_diagnostics = { workspace = true } rayon = { workspace = true } diff --git a/crates/pgt_fs/src/fs.rs b/crates/pgt_fs/src/fs.rs index b73aef6e..2bfd2e51 100644 --- a/crates/pgt_fs/src/fs.rs +++ b/crates/pgt_fs/src/fs.rs @@ -1,6 +1,7 @@ use crate::{PathInterner, PgTPath}; pub use memory::{ErrorEntry, MemoryFileSystem}; pub use os::OsFileSystem; +use oxc_resolver::{Resolution, ResolveError}; use pgt_diagnostics::{Advices, Diagnostic, LogCategory, Visit, console}; use pgt_diagnostics::{Error, Severity}; use serde::{Deserialize, Serialize}; @@ -164,6 +165,12 @@ pub trait FileSystem: Send + Sync + RefUnwindSafe { fn get_changed_files(&self, base: &str) -> io::Result>; fn get_staged_files(&self) -> io::Result>; + + fn resolve_configuration( + &self, + specifier: &str, + path: &Path, + ) -> Result; } /// Result of the auto search @@ -355,6 +362,14 @@ where fn get_staged_files(&self) -> io::Result> { T::get_staged_files(self) } + + fn resolve_configuration( + &self, + specifier: &str, + path: &Path, + ) -> Result { + T::resolve_configuration(self, specifier, path) + } } #[derive(Debug, Diagnostic, Deserialize, Serialize)] diff --git a/crates/pgt_fs/src/fs/memory.rs b/crates/pgt_fs/src/fs/memory.rs index baffe0ab..a744e575 100644 --- a/crates/pgt_fs/src/fs/memory.rs +++ b/crates/pgt_fs/src/fs/memory.rs @@ -1,3 +1,4 @@ +use oxc_resolver::{Resolution, ResolveError}; use rustc_hash::FxHashMap; use std::collections::hash_map::{Entry, IntoIter}; use std::io; @@ -227,6 +228,15 @@ impl FileSystem for MemoryFileSystem { Ok(cb()) } + + fn resolve_configuration( + &self, + _specifier: &str, + _path: &Path, + ) -> Result { + // not needed for the memory file system + todo!() + } } struct MemoryFile { diff --git a/crates/pgt_fs/src/fs/os.rs b/crates/pgt_fs/src/fs/os.rs index a2e40695..5033f296 100644 --- a/crates/pgt_fs/src/fs/os.rs +++ b/crates/pgt_fs/src/fs/os.rs @@ -5,9 +5,11 @@ use crate::{ FileSystem, PgTPath, fs::{TraversalContext, TraversalScope}, }; +use oxc_resolver::{Resolution, ResolveError, ResolveOptions, Resolver}; use pgt_diagnostics::{DiagnosticExt, Error, Severity, adapters::IoError}; use rayon::{Scope, scope}; use std::fs::{DirEntry, FileType}; +use std::panic::AssertUnwindSafe; use std::process::Command; use std::{ env, fs, @@ -21,12 +23,18 @@ const MAX_SYMLINK_DEPTH: u8 = 3; /// Implementation of [FileSystem] that directly calls through to the underlying OS pub struct OsFileSystem { pub working_directory: Option, + pub configuration_resolver: AssertUnwindSafe, } impl OsFileSystem { pub fn new(working_directory: PathBuf) -> Self { Self { working_directory: Some(working_directory), + configuration_resolver: AssertUnwindSafe(Resolver::new(ResolveOptions { + condition_names: vec!["node".to_string(), "import".to_string()], + extensions: vec![".json".to_string(), ".jsonc".to_string()], + ..ResolveOptions::default() + })), } } } @@ -35,6 +43,11 @@ impl Default for OsFileSystem { fn default() -> Self { Self { working_directory: env::current_dir().ok(), + configuration_resolver: AssertUnwindSafe(Resolver::new(ResolveOptions { + condition_names: vec!["node".to_string(), "import".to_string()], + extensions: vec![".json".to_string(), ".jsonc".to_string()], + ..ResolveOptions::default() + })), } } } @@ -116,6 +129,14 @@ impl FileSystem for OsFileSystem { .map(|l| l.to_string()) .collect()) } + + fn resolve_configuration( + &self, + specifier: &str, + path: &Path, + ) -> Result { + self.configuration_resolver.resolve(path, specifier) + } } struct OsFile { @@ -387,8 +408,6 @@ fn follow_symlink( path: &Path, ctx: &dyn TraversalContext, ) -> Result<(PathBuf, FileType), SymlinkExpansionError> { - tracing::info!("Translating symlink: {path:?}"); - let target_path = fs::read_link(path).map_err(|err| { ctx.push_diagnostic(IoError::from(err).with_file_path(path.to_string_lossy().to_string())); SymlinkExpansionError diff --git a/crates/pgt_lsp/src/server.rs b/crates/pgt_lsp/src/server.rs index 4c05c0e4..6420c511 100644 --- a/crates/pgt_lsp/src/server.rs +++ b/crates/pgt_lsp/src/server.rs @@ -1,10 +1,13 @@ use crate::capabilities::server_capabilities; use crate::handlers; -use crate::session::{CapabilitySet, CapabilityStatus, Session, SessionHandle, SessionKey}; +use crate::session::{ + CapabilitySet, CapabilityStatus, ClientInformation, Session, SessionHandle, SessionKey, +}; use crate::utils::{into_lsp_error, panic_to_lsp_error}; use futures::FutureExt; use futures::future::ready; use pgt_fs::{ConfigName, FileSystem, OsFileSystem}; +use pgt_workspace::workspace::{RegisterProjectFolderParams, UnregisterProjectFolderParams}; use pgt_workspace::{DynRef, Workspace, workspace}; use rustc_hash::FxHashMap; use serde_json::json; @@ -107,6 +110,10 @@ impl LanguageServer for LSPServer { self.session.initialize( params.capabilities, + params.client_info.map(|client_info| ClientInformation { + name: client_info.name, + version: client_info.version, + }), params.root_uri, params.workspace_folders, ); @@ -217,6 +224,47 @@ impl LanguageServer for LSPServer { .ok(); } + async fn did_change_workspace_folders(&self, params: DidChangeWorkspaceFoldersParams) { + for removed in ¶ms.event.removed { + if let Ok(project_path) = self.session.file_path(&removed.uri) { + let result = self + .session + .workspace + .unregister_project_folder(UnregisterProjectFolderParams { path: project_path }) + .map_err(into_lsp_error); + + if let Err(err) = result { + error!("Failed to remove project from the workspace: {}", err); + self.session + .client + .log_message(MessageType::ERROR, err) + .await; + } + } + } + + for added in ¶ms.event.added { + if let Ok(project_path) = self.session.file_path(&added.uri) { + let result = self + .session + .workspace + .register_project_folder(RegisterProjectFolderParams { + path: Some(project_path.to_path_buf()), + set_as_current_workspace: true, + }) + .map_err(into_lsp_error); + + if let Err(err) = result { + error!("Failed to add project to the workspace: {}", err); + self.session + .client + .log_message(MessageType::ERROR, err) + .await; + } + } + } + } + #[tracing::instrument(level = "trace", skip_all)] async fn completion(&self, params: CompletionParams) -> LspResult> { match handlers::completions::get_completions(&self.session, params) { @@ -398,6 +446,8 @@ impl ServerFactory { workspace_method!(builder, close_file); workspace_method!(builder, pull_diagnostics); workspace_method!(builder, get_completions); + workspace_method!(builder, register_project_folder); + workspace_method!(builder, unregister_project_folder); let (service, socket) = builder.finish(); ServerConnection { socket, service } diff --git a/crates/pgt_lsp/src/session.rs b/crates/pgt_lsp/src/session.rs index 7ccf2bab..fd5af2da 100644 --- a/crates/pgt_lsp/src/session.rs +++ b/crates/pgt_lsp/src/session.rs @@ -10,11 +10,11 @@ use pgt_analyse::RuleCategoriesBuilder; use pgt_configuration::{ConfigurationPathHint, PartialConfiguration}; use pgt_diagnostics::{DiagnosticExt, Error}; use pgt_fs::{FileSystem, PgTPath}; +use pgt_workspace::PartialConfigurationExt; use pgt_workspace::Workspace; use pgt_workspace::configuration::{LoadedConfiguration, load_configuration}; use pgt_workspace::features; -use pgt_workspace::settings::PartialConfigurationExt; -use pgt_workspace::workspace::UpdateSettingsParams; +use pgt_workspace::workspace::{RegisterProjectFolderParams, UpdateSettingsParams}; use pgt_workspace::{DynRef, WorkspaceError}; use rustc_hash::FxHashMap; use serde_json::Value; @@ -31,6 +31,14 @@ use tower_lsp::lsp_types::{MessageType, Registration}; use tower_lsp::lsp_types::{Unregistration, WorkspaceFolder}; use tracing::{error, info}; +pub(crate) struct ClientInformation { + /// The name of the client + pub(crate) name: String, + + /// The version of the client + pub(crate) version: Option, +} + /// Key, uniquely identifying a LSP session. #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug)] pub(crate) struct SessionKey(pub u64); @@ -68,6 +76,7 @@ pub(crate) struct Session { struct InitializeParams { /// The capabilities provided by the client as part of [`lsp_types::InitializeParams`] client_capabilities: lsp_types::ClientCapabilities, + client_information: Option, root_uri: Option, #[allow(unused)] workspace_folders: Option>, @@ -164,11 +173,13 @@ impl Session { pub(crate) fn initialize( &self, client_capabilities: lsp_types::ClientCapabilities, + client_information: Option, root_uri: Option, workspace_folders: Option>, ) { let result = self.initialize_params.set(InitializeParams { client_capabilities, + client_information, root_uri, workspace_folders, }); @@ -446,6 +457,8 @@ impl Session { info!("Configuration loaded successfully from disk."); info!("Update workspace settings."); + let fs = &self.fs; + if let Some(ws_configuration) = extra_config { fs_configuration.merge_with(ws_configuration); } @@ -455,6 +468,31 @@ impl Session { match result { Ok((vcs_base_path, gitignore_matches)) => { + let register_result = + if let ConfigurationPathHint::FromWorkspace(path) = &base_path { + // We don't need the key + self.workspace + .register_project_folder(RegisterProjectFolderParams { + path: Some(path.clone()), + // This is naive, but we don't know if the user has a file already open or not, so we register every project as the current one. + // The correct one is actually set when the LSP calls `textDocument/didOpen` + set_as_current_workspace: true, + }) + .err() + } else { + self.workspace + .register_project_folder(RegisterProjectFolderParams { + path: fs.working_directory(), + set_as_current_workspace: true, + }) + .err() + }; + if let Some(error) = register_result { + error!("Failed to register the project folder: {}", error); + self.client.log_message(MessageType::ERROR, &error).await; + return ConfigurationStatus::Error; + } + let result = self.workspace.update_settings(UpdateSettingsParams { workspace_directory: self.fs.working_directory(), configuration: fs_configuration, diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 19b65b06..438c7298 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -3,6 +3,7 @@ use anyhow::Error; use anyhow::Result; use anyhow::bail; use biome_deserialize::Merge; +use biome_deserialize::StringSet; use futures::Sink; use futures::SinkExt; use futures::Stream; @@ -40,6 +41,7 @@ use tower_lsp::lsp_types::Position; use tower_lsp::lsp_types::Range; use tower_lsp::lsp_types::TextDocumentPositionParams; use tower_lsp::lsp_types::WorkDoneProgressParams; +use tower_lsp::lsp_types::WorkspaceFolder; use tower_lsp::lsp_types::{ ClientCapabilities, DidChangeConfigurationParams, DidChangeTextDocumentParams, DidCloseTextDocumentParams, DidOpenTextDocumentParams, InitializeResult, InitializedParams, @@ -164,6 +166,42 @@ impl Server { Ok(()) } + /// It creates two workspaces, one at folder `test_one` and the other in `test_two`. + /// + /// Hence, the two roots will be `/workspace/test_one` and `/workspace/test_two` + #[allow(deprecated)] + async fn initialize_workspaces(&mut self) -> Result<()> { + let _res: InitializeResult = self + .request( + "initialize", + "_init", + InitializeParams { + process_id: None, + root_path: None, + root_uri: Some(url!("/")), + initialization_options: None, + capabilities: ClientCapabilities::default(), + trace: None, + workspace_folders: Some(vec![ + WorkspaceFolder { + name: "test_one".to_string(), + uri: url!("test_one"), + }, + WorkspaceFolder { + name: "test_two".to_string(), + uri: url!("test_two"), + }, + ]), + client_info: None, + locale: None, + }, + ) + .await? + .context("initialize returned None")?; + + Ok(()) + } + /// Basic implementation of the `initialized` notification for tests async fn initialized(&mut self) -> Result<()> { self.notify("initialized", InitializedParams {}).await @@ -204,13 +242,18 @@ impl Server { } /// Opens a document with given contents and given name. The name must contain the extension too - async fn open_named_document(&mut self, text: impl Display, document_name: Url) -> Result<()> { + async fn open_named_document( + &mut self, + text: impl Display, + document_name: Url, + language: impl Display, + ) -> Result<()> { self.notify( "textDocument/didOpen", DidOpenTextDocumentParams { text_document: TextDocumentItem { uri: document_name, - language_id: String::from("sql"), + language_id: language.to_string(), version: 0, text: text.to_string(), }, @@ -230,24 +273,31 @@ impl Server { .await } - async fn change_document( + async fn change_named_document( &mut self, + uri: Url, version: i32, content_changes: Vec, ) -> Result<()> { self.notify( "textDocument/didChange", DidChangeTextDocumentParams { - text_document: VersionedTextDocumentIdentifier { - uri: url!("document.sql"), - version, - }, + text_document: VersionedTextDocumentIdentifier { uri, version }, content_changes, }, ) .await } + async fn change_document( + &mut self, + version: i32, + content_changes: Vec, + ) -> Result<()> { + self.change_named_document(url!("document.sql"), version, content_changes) + .await + } + #[allow(unused)] async fn close_document(&mut self) -> Result<()> { self.notify( @@ -831,7 +881,7 @@ async fn test_execute_statement(test_db: PgPool) -> Result<()> { let doc_url = url!("test.sql"); server - .open_named_document(doc_content.to_string(), doc_url.clone()) + .open_named_document(doc_content.to_string(), doc_url.clone(), "sql") .await?; let code_actions_response = server @@ -1107,3 +1157,388 @@ async fn test_issue_303(test_db: PgPool) -> Result<()> { Ok(()) } + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn multiple_projects(test_db: PgPool) -> Result<()> { + let factory = ServerFactory::default(); + let mut fs = MemoryFileSystem::default(); + + let setup = r#" + create table public.users ( + id serial primary key, + name varchar(255) not null + ); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + // Setup configurations + // - test_one with db connection + let mut conf_with_db = PartialConfiguration::init(); + conf_with_db.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + fs.insert( + url!("test_one/postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_with_db).unwrap(), + ); + + // -- test_two without db connection + let mut conf_without_db = PartialConfiguration::init(); + conf_without_db.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + disable_connection: Some(true), + ..Default::default() + }), + ..Default::default() + }); + fs.insert( + url!("test_two/postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_without_db).unwrap(), + ); + + let (service, client) = factory + .create_with_fs(None, DynRef::Owned(Box::new(fs))) + .into_inner(); + + let (stream, sink) = client.split(); + let mut server = Server::new(service); + + let (sender, _) = channel(CHANNEL_BUFFER_SIZE); + let reader = tokio::spawn(client_handler(stream, sink, sender)); + + server.initialize_workspaces().await?; + server.initialized().await?; + + server.load_configuration().await?; + + // do the same change in both workspaces and request completions in both workspaces + + server + .open_named_document( + "select from public.users;\n", + url!("test_one/document.sql"), + "sql", + ) + .await?; + + server + .change_named_document( + url!("test_one/document.sql"), + 3, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 7, + }, + end: Position { + line: 0, + character: 7, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + let res_ws_one = server + .get_completion(CompletionParams { + work_done_progress_params: WorkDoneProgressParams::default(), + partial_result_params: PartialResultParams::default(), + context: None, + text_document_position: TextDocumentPositionParams { + text_document: TextDocumentIdentifier { + uri: url!("test_one/document.sql"), + }, + position: Position { + line: 0, + character: 8, + }, + }, + }) + .await? + .unwrap(); + + server + .open_named_document( + "select from public.users;\n", + url!("test_two/document.sql"), + "sql", + ) + .await?; + + server + .change_named_document( + url!("test_two/document.sql"), + 3, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 7, + }, + end: Position { + line: 0, + character: 7, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + let res_ws_two = server + .get_completion(CompletionParams { + work_done_progress_params: WorkDoneProgressParams::default(), + partial_result_params: PartialResultParams::default(), + context: None, + text_document_position: TextDocumentPositionParams { + text_document: TextDocumentIdentifier { + uri: url!("test_two/document.sql"), + }, + position: Position { + line: 0, + character: 8, + }, + }, + }) + .await? + .unwrap(); + + // only the first one has a db connection and should return completion items + assert!(!match res_ws_one { + CompletionResponse::Array(a) => a.is_empty(), + CompletionResponse::List(l) => l.items.is_empty(), + }); + assert!(match res_ws_two { + CompletionResponse::Array(a) => a.is_empty(), + CompletionResponse::List(l) => l.items.is_empty(), + }); + + server.shutdown().await?; + reader.abort(); + + Ok(()) +} + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn extends_config(test_db: PgPool) -> Result<()> { + let factory = ServerFactory::default(); + let mut fs = MemoryFileSystem::default(); + + let setup = r#" + create table public.extends_config_test ( + id serial primary key, + name varchar(255) not null + ); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + // shared config with default db connection + let conf_with_db = PartialConfiguration::init(); + fs.insert( + url!("postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_with_db).unwrap(), + ); + + let relative_path = if cfg!(windows) { + "..\\postgrestools.jsonc" + } else { + "../postgrestools.jsonc" + }; + + // test_one extends the shared config but sets our test db + let mut conf_with_db = PartialConfiguration::init(); + conf_with_db.merge_with(PartialConfiguration { + extends: Some(StringSet::from_iter([relative_path.to_string()])), + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + + fs.insert( + url!("test_one/postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_with_db).unwrap(), + ); + + // test_two extends it but keeps the default one + let mut conf_without_db = PartialConfiguration::init(); + conf_without_db.merge_with(PartialConfiguration { + extends: Some(StringSet::from_iter([relative_path.to_string()])), + ..Default::default() + }); + fs.insert( + url!("test_two/postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_without_db).unwrap(), + ); + + let (service, client) = factory + .create_with_fs(None, DynRef::Owned(Box::new(fs))) + .into_inner(); + + let (stream, sink) = client.split(); + let mut server = Server::new(service); + + let (sender, _) = channel(CHANNEL_BUFFER_SIZE); + let reader = tokio::spawn(client_handler(stream, sink, sender)); + + server.initialize_workspaces().await?; + server.initialized().await?; + + server.load_configuration().await?; + + server + .open_named_document( + "select from public.extends_config_test;\n", + url!("test_one/document.sql"), + "sql", + ) + .await?; + + server + .change_named_document( + url!("test_one/document.sql"), + 3, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 7, + }, + end: Position { + line: 0, + character: 7, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + let res_ws_one = server + .get_completion(CompletionParams { + work_done_progress_params: WorkDoneProgressParams::default(), + partial_result_params: PartialResultParams::default(), + context: None, + text_document_position: TextDocumentPositionParams { + text_document: TextDocumentIdentifier { + uri: url!("test_one/document.sql"), + }, + position: Position { + line: 0, + character: 8, + }, + }, + }) + .await? + .unwrap(); + + server + .open_named_document( + "select from public.users;\n", + url!("test_two/document.sql"), + "sql", + ) + .await?; + + server + .change_named_document( + url!("test_two/document.sql"), + 3, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 7, + }, + end: Position { + line: 0, + character: 7, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + let res_ws_two = server + .get_completion(CompletionParams { + work_done_progress_params: WorkDoneProgressParams::default(), + partial_result_params: PartialResultParams::default(), + context: None, + text_document_position: TextDocumentPositionParams { + text_document: TextDocumentIdentifier { + uri: url!("test_two/document.sql"), + }, + position: Position { + line: 0, + character: 8, + }, + }, + }) + .await? + .unwrap(); + + let items_one = match res_ws_one { + CompletionResponse::Array(ref a) => a, + CompletionResponse::List(ref l) => &l.items, + }; + + // test one should have our test db connection and should return the completion items for the `extends_config_test` table + assert!(items_one.iter().any(|item| { + item.label_details.clone().is_some_and(|details| { + details + .description + .is_some_and(|desc| desc.contains("public.extends_config_test")) + }) + })); + + let items_two = match res_ws_two { + CompletionResponse::Array(ref a) => a, + CompletionResponse::List(ref l) => &l.items, + }; + + // test two should not have a db connection and should not return the completion items for the `extends_config_test` table + assert!(!items_two.iter().any(|item| { + item.label_details.clone().is_some_and(|details| { + details + .description + .is_some_and(|desc| desc.contains("public.extends_config_test")) + }) + })); + + server.shutdown().await?; + reader.abort(); + + Ok(()) +} diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index 5f598b2d..bfa413e3 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -35,6 +35,7 @@ rustc-hash = { workspace = true } schemars = { workspace = true, optional = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, features = ["raw_value"] } +slotmap = { workspace = true, features = ["serde"] } sqlx.workspace = true strum = { workspace = true } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } diff --git a/crates/pgt_workspace/src/configuration.rs b/crates/pgt_workspace/src/configuration.rs index 88c04eec..1baebcc7 100644 --- a/crates/pgt_workspace/src/configuration.rs +++ b/crates/pgt_workspace/src/configuration.rs @@ -1,14 +1,17 @@ use std::{ + ffi::OsStr, io::ErrorKind, ops::Deref, path::{Path, PathBuf}, }; +use biome_deserialize::Merge; use pgt_analyse::AnalyserRules; use pgt_configuration::{ ConfigurationDiagnostic, ConfigurationPathHint, ConfigurationPayload, PartialConfiguration, - VERSION, push_to_analyser_rules, + VERSION, diagnostics::CantLoadExtendFile, push_to_analyser_rules, }; +use pgt_console::markup; use pgt_fs::{AutoSearchResult, ConfigName, FileSystem, OpenOptions}; use crate::{DynRef, WorkspaceError, settings::Settings}; @@ -28,34 +31,41 @@ pub struct LoadedConfiguration { } impl LoadedConfiguration { - /// Return the path of the **directory** where the configuration is - pub fn directory_path(&self) -> Option<&Path> { - self.directory_path.as_deref() - } - - /// Return the path of the **file** where the configuration is - pub fn file_path(&self) -> Option<&Path> { - self.file_path.as_deref() - } -} - -impl From> for LoadedConfiguration { - fn from(value: Option) -> Self { + fn try_from_payload( + value: Option, + fs: &DynRef<'_, dyn FileSystem>, + ) -> Result { let Some(value) = value else { - return LoadedConfiguration::default(); + return Ok(LoadedConfiguration::default()); }; let ConfigurationPayload { + external_resolution_base_path, configuration_file_path, - deserialized: partial_configuration, - .. + deserialized: mut partial_configuration, } = value; - LoadedConfiguration { + partial_configuration.apply_extends( + fs, + &configuration_file_path, + &external_resolution_base_path, + )?; + + Ok(Self { configuration: partial_configuration, directory_path: configuration_file_path.parent().map(PathBuf::from), file_path: Some(configuration_file_path), - } + }) + } + + /// Return the path of the **directory** where the configuration is + pub fn directory_path(&self) -> Option<&Path> { + self.directory_path.as_deref() + } + + /// Return the path of the **file** where the configuration is + pub fn file_path(&self) -> Option<&Path> { + self.file_path.as_deref() } } @@ -65,7 +75,7 @@ pub fn load_configuration( config_path: ConfigurationPathHint, ) -> Result { let config = load_config(fs, config_path)?; - Ok(LoadedConfiguration::from(config)) + LoadedConfiguration::try_from_payload(config, fs) } /// - [Result]: if an error occurred while loading the configuration file. @@ -120,7 +130,7 @@ fn load_config( ConfigurationPathHint::None => file_system.working_directory().unwrap_or_default(), }; - // We first search for `postgrestools.jsonc` + // We first search for `postgrestools.jsonc` files if let Some(auto_search_result) = file_system.auto_search( &configuration_directory, ConfigName::file_names().as_slice(), @@ -265,10 +275,276 @@ pub fn strip_jsonc_comments(jsonc_input: &str) -> String { json_output } +pub trait PartialConfigurationExt { + fn apply_extends( + &mut self, + fs: &DynRef<'_, dyn FileSystem>, + file_path: &Path, + external_resolution_base_path: &Path, + ) -> Result<(), WorkspaceError>; + + fn deserialize_extends( + &mut self, + fs: &DynRef<'_, dyn FileSystem>, + relative_resolution_base_path: &Path, + external_resolution_base_path: &Path, + ) -> Result, WorkspaceError>; + + fn retrieve_gitignore_matches( + &self, + file_system: &DynRef<'_, dyn FileSystem>, + vcs_base_path: Option<&Path>, + ) -> Result<(Option, Vec), WorkspaceError>; +} + +impl PartialConfigurationExt for PartialConfiguration { + /// Mutates the configuration so that any fields that have not been configured explicitly are + /// filled in with their values from configs listed in the `extends` field. + /// + /// The `extends` configs are applied from left to right. + /// + /// If a configuration can't be resolved from the file system, the operation will fail. + fn apply_extends( + &mut self, + fs: &DynRef<'_, dyn FileSystem>, + file_path: &Path, + external_resolution_base_path: &Path, + ) -> Result<(), WorkspaceError> { + let configurations = self.deserialize_extends( + fs, + file_path.parent().expect("file path should have a parent"), + external_resolution_base_path, + )?; + + let extended_configuration = configurations.into_iter().reduce( + |mut previous_configuration, current_configuration| { + previous_configuration.merge_with(current_configuration); + previous_configuration + }, + ); + if let Some(mut extended_configuration) = extended_configuration { + // We swap them to avoid having to clone `self.configuration` to merge it. + std::mem::swap(self, &mut extended_configuration); + self.merge_with(extended_configuration) + } + + Ok(()) + } + + /// It attempts to deserialize all the configuration files that were specified in the `extends` property + fn deserialize_extends( + &mut self, + fs: &DynRef<'_, dyn FileSystem>, + relative_resolution_base_path: &Path, + external_resolution_base_path: &Path, + ) -> Result, WorkspaceError> { + let Some(extends) = &self.extends else { + return Ok(Vec::new()); + }; + + let mut deserialized_configurations = vec![]; + for extend_entry in extends.iter() { + let extend_entry_as_path = Path::new(extend_entry); + + let extend_configuration_file_path = if extend_entry_as_path.starts_with(".") + || matches!( + extend_entry_as_path + .extension() + .map(OsStr::as_encoded_bytes), + Some(b"jsonc") + ) { + // Normalize the path to handle relative segments like "../" + normalize_path(&relative_resolution_base_path.join(extend_entry)) + } else { + fs.resolve_configuration(extend_entry.as_str(), external_resolution_base_path) + .map_err(|error| { + ConfigurationDiagnostic::cant_resolve( + external_resolution_base_path.display().to_string(), + error, + ) + })? + .into_path_buf() + }; + + let mut file = fs + .open_with_options( + extend_configuration_file_path.as_path(), + OpenOptions::default().read(true), + ) + .map_err(|err| { + CantLoadExtendFile::new( + extend_configuration_file_path.display().to_string(), + err.to_string(), + ) + .with_verbose_advice(markup! { + "Postgres Tools tried to load the configuration file \""{ + extend_configuration_file_path.display().to_string() + }"\" in \"extends\" using \""{ + external_resolution_base_path.display().to_string() + }"\" as the base path." + }) + })?; + + let mut content = String::new(); + file.read_to_string(&mut content).map_err(|err| { + CantLoadExtendFile::new(extend_configuration_file_path.display().to_string(), err.to_string()).with_verbose_advice( + markup!{ + "It's possible that the file was created with a different user/group. Make sure you have the rights to read the file." + } + ) + + })?; + + let deserialized = serde_json::from_str::(&content) + .map_err(ConfigurationDiagnostic::new_deserialization_error)?; + deserialized_configurations.push(deserialized) + } + Ok(deserialized_configurations) + } + + /// This function checks if the VCS integration is enabled, and if so, it will attempts to resolve the + /// VCS root directory and the `.gitignore` file. + /// + /// ## Returns + /// + /// A tuple with VCS root folder and the contents of the `.gitignore` file + fn retrieve_gitignore_matches( + &self, + file_system: &DynRef<'_, dyn FileSystem>, + vcs_base_path: Option<&Path>, + ) -> Result<(Option, Vec), WorkspaceError> { + let Some(vcs) = &self.vcs else { + return Ok((None, vec![])); + }; + if vcs.is_enabled() { + let vcs_base_path = match (vcs_base_path, &vcs.root) { + (Some(vcs_base_path), Some(root)) => vcs_base_path.join(root), + (None, Some(root)) => PathBuf::from(root), + (Some(vcs_base_path), None) => PathBuf::from(vcs_base_path), + (None, None) => return Err(WorkspaceError::vcs_disabled()), + }; + if let Some(client_kind) = &vcs.client_kind { + if !vcs.ignore_file_disabled() { + let result = file_system + .auto_search(&vcs_base_path, &[client_kind.ignore_file()], false) + .map_err(WorkspaceError::from)?; + + if let Some(result) = result { + return Ok(( + result.file_path.parent().map(PathBuf::from), + result + .content + .lines() + .map(String::from) + .collect::>(), + )); + } + } + } + } + Ok((None, vec![])) + } +} + +/// Normalizes a path, resolving '..' and '.' segments without requiring the path to exist +fn normalize_path(path: &Path) -> PathBuf { + let mut components = Vec::new(); + let mut prefix_component = None; + let mut is_absolute = false; + + for component in path.components() { + match component { + std::path::Component::Prefix(_prefix) => { + prefix_component = Some(component); + components.clear(); + } + std::path::Component::RootDir => { + is_absolute = true; + components.clear(); + } + std::path::Component::ParentDir => { + if !components.is_empty() { + components.pop(); + } else if !is_absolute && prefix_component.is_none() { + // Only keep parent dir if we're not absolute and have no prefix + components.push(component.as_os_str()); + } + } + std::path::Component::Normal(c) => { + components.push(c); + } + std::path::Component::CurDir => { + // Skip current directory components + } + } + } + + let mut result = PathBuf::new(); + + // Add prefix component (like C: on Windows) + if let Some(prefix) = prefix_component { + result.push(prefix.as_os_str()); + } + + // Add root directory if path is absolute + if is_absolute { + result.push(std::path::Component::RootDir.as_os_str()); + } + + // Add normalized components + for component in components { + result.push(component); + } + + // Handle edge cases + if result.as_os_str().is_empty() { + if prefix_component.is_some() || is_absolute { + // This shouldn't happen with proper input, but fallback to original path's root + return path + .ancestors() + .last() + .unwrap_or(Path::new("")) + .to_path_buf(); + } else { + return PathBuf::from("."); + } + } + + result +} + #[cfg(test)] mod tests { use super::*; + #[test] + fn test_normalize_path_windows_drive() { + if cfg!(windows) { + let path = Path::new(r"z:\workspace\test_one\..\postgrestools.jsonc"); + let normalized = normalize_path(path); + assert_eq!( + normalized, + PathBuf::from(r"z:\workspace\postgrestools.jsonc") + ); + } + } + + #[test] + fn test_normalize_path_relative() { + let path = Path::new("workspace/test_one/../postgrestools.jsonc"); + let normalized = normalize_path(path); + assert_eq!(normalized, PathBuf::from("workspace/postgrestools.jsonc")); + } + + #[test] + fn test_normalize_path_multiple_parent_dirs() { + if cfg!(windows) { + let path = Path::new(r"c:\a\b\c\..\..\d"); + let normalized = normalize_path(path); + assert_eq!(normalized, PathBuf::from(r"c:\a\d")); + } + } + #[test] fn test_strip_jsonc_comments_line_comments() { let input = r#"{ diff --git a/crates/pgt_workspace/src/diagnostics.rs b/crates/pgt_workspace/src/diagnostics.rs index 9ba02a1a..5020cc62 100644 --- a/crates/pgt_workspace/src/diagnostics.rs +++ b/crates/pgt_workspace/src/diagnostics.rs @@ -1,4 +1,5 @@ use pgt_configuration::ConfigurationDiagnostic; +use pgt_configuration::diagnostics::CantLoadExtendFile; use pgt_console::fmt::Bytes; use pgt_console::markup; use pgt_diagnostics::{ @@ -354,3 +355,9 @@ impl Diagnostic for FileTooLarge { ) } } + +impl From for WorkspaceError { + fn from(value: CantLoadExtendFile) -> Self { + WorkspaceError::Configuration(ConfigurationDiagnostic::CantLoadExtendFile(value)) + } +} diff --git a/crates/pgt_workspace/src/lib.rs b/crates/pgt_workspace/src/lib.rs index 99fe063f..df8b0ba7 100644 --- a/crates/pgt_workspace/src/lib.rs +++ b/crates/pgt_workspace/src/lib.rs @@ -14,6 +14,7 @@ pub mod workspace; #[cfg(feature = "schema")] pub mod workspace_types; +pub use crate::configuration::PartialConfigurationExt; pub use crate::diagnostics::{TransportError, WorkspaceError}; pub use crate::workspace::Workspace; diff --git a/crates/pgt_workspace/src/settings.rs b/crates/pgt_workspace/src/settings.rs index f9275aa9..08854493 100644 --- a/crates/pgt_workspace/src/settings.rs +++ b/crates/pgt_workspace/src/settings.rs @@ -8,6 +8,7 @@ use std::{ sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, time::Duration, }; +use tracing::trace; use ignore::gitignore::{Gitignore, GitignoreBuilder}; use pgt_configuration::{ @@ -17,9 +18,185 @@ use pgt_configuration::{ files::FilesConfiguration, migrations::{MigrationsConfiguration, PartialMigrationsConfiguration}, }; -use pgt_fs::FileSystem; +use pgt_fs::PgTPath; -use crate::{DynRef, WorkspaceError, matcher::Matcher}; +use crate::{ + WorkspaceError, + matcher::Matcher, + workspace::{ProjectKey, WorkspaceData}, +}; + +#[derive(Debug, Default)] +/// The information tracked for each project +pub struct ProjectData { + /// The root path of the project. This path should be **absolute**. + path: PgTPath, + /// The settings of the project, usually inferred from the configuration file e.g. `biome.json`. + settings: Settings, +} + +#[derive(Debug, Default)] +/// Type that manages different projects inside the workspace. +pub struct WorkspaceSettings { + /// The data of the projects + data: WorkspaceData, + /// The ID of the current project. + current_project: ProjectKey, +} + +impl WorkspaceSettings { + pub fn get_current_project_key(&self) -> ProjectKey { + self.current_project + } + + pub fn get_current_project_path(&self) -> Option<&PgTPath> { + trace!("Current key {:?}", self.current_project); + self.data + .get(self.current_project) + .as_ref() + .map(|d| &d.path) + } + + pub fn get_current_project_data_mut(&mut self) -> &mut ProjectData { + self.data + .get_mut(self.current_project) + .expect("Current project not configured") + } + + /// Retrieves the settings of the current workspace folder + pub fn get_current_settings(&self) -> Option<&Settings> { + trace!("Current key {:?}", self.current_project); + let data = self.data.get(self.current_project); + if let Some(data) = data { + Some(&data.settings) + } else { + None + } + } + + /// Retrieves a mutable reference of the settings of the current project + pub fn get_current_settings_mut(&mut self) -> &mut Settings { + &mut self + .data + .get_mut(self.current_project) + .expect("You must have at least one workspace.") + .settings + } + + /// Register the current project using its unique key + pub fn register_current_project(&mut self, key: ProjectKey) { + self.current_project = key; + } + + /// Insert a new project using its folder. Use [WorkspaceSettings::get_current_settings_mut] to retrieve + /// a mutable reference to its [Settings] and manipulate them. + pub fn insert_project(&mut self, workspace_path: impl Into) -> ProjectKey { + let path = PgTPath::new(workspace_path.into()); + trace!("Insert workspace folder: {:?}", path); + self.data.insert(ProjectData { + path, + settings: Settings::default(), + }) + } + + /// Remove a project using its folder. + pub fn remove_project(&mut self, workspace_path: &Path) { + let keys_to_remove = { + let mut data = vec![]; + let iter = self.data.iter(); + + for (key, path_to_settings) in iter { + if path_to_settings.path.as_path() == workspace_path { + data.push(key) + } + } + + data + }; + + for key in keys_to_remove { + self.data.remove(key) + } + } + + /// Checks if the current path belongs to a registered project. + /// + /// If there's a match, and the match **isn't** the current project, it returns the new key. + pub fn path_belongs_to_current_workspace(&self, path: &PgTPath) -> Option { + if self.data.is_empty() { + return None; + } + trace!("Current key: {:?}", self.current_project); + let iter = self.data.iter(); + for (key, path_to_settings) in iter { + trace!( + "Workspace path {:?}, file path {:?}", + path_to_settings.path, path + ); + trace!("Iter key: {:?}", key); + if key == self.current_project { + continue; + } + if path.strip_prefix(path_to_settings.path.as_path()).is_ok() { + trace!("Update workspace to {:?}", key); + return Some(key); + } + } + None + } + + /// Checks if the current path belongs to a registered project. + /// + /// If there's a match, and the match **isn't** the current project, the function will mark the match as the current project. + pub fn set_current_project(&mut self, new_key: ProjectKey) { + self.current_project = new_key; + } +} + +#[derive(Debug)] +pub struct WorkspaceSettingsHandle<'a> { + inner: RwLockReadGuard<'a, WorkspaceSettings>, +} + +impl<'a> WorkspaceSettingsHandle<'a> { + pub(crate) fn new(settings: &'a RwLock) -> Self { + Self { + inner: settings.read().unwrap(), + } + } + + pub(crate) fn settings(&self) -> Option<&Settings> { + self.inner.get_current_settings() + } + + pub(crate) fn path(&self) -> Option<&PgTPath> { + self.inner.get_current_project_path() + } +} + +impl AsRef for WorkspaceSettingsHandle<'_> { + fn as_ref(&self) -> &WorkspaceSettings { + &self.inner + } +} + +pub struct WorkspaceSettingsHandleMut<'a> { + inner: RwLockWriteGuard<'a, WorkspaceSettings>, +} + +impl<'a> WorkspaceSettingsHandleMut<'a> { + pub(crate) fn new(settings: &'a RwLock) -> Self { + Self { + inner: settings.write().unwrap(), + } + } +} + +impl AsMut for WorkspaceSettingsHandleMut<'_> { + fn as_mut(&mut self) -> &mut WorkspaceSettings { + &mut self.inner + } +} /// Global settings for the entire workspace #[derive(Debug, Default)] @@ -397,59 +574,6 @@ impl Default for FilesSettings { } } -pub trait PartialConfigurationExt { - fn retrieve_gitignore_matches( - &self, - file_system: &DynRef<'_, dyn FileSystem>, - vcs_base_path: Option<&Path>, - ) -> Result<(Option, Vec), WorkspaceError>; -} - -impl PartialConfigurationExt for PartialConfiguration { - /// This function checks if the VCS integration is enabled, and if so, it will attempts to resolve the - /// VCS root directory and the `.gitignore` file. - /// - /// ## Returns - /// - /// A tuple with VCS root folder and the contents of the `.gitignore` file - fn retrieve_gitignore_matches( - &self, - file_system: &DynRef<'_, dyn FileSystem>, - vcs_base_path: Option<&Path>, - ) -> Result<(Option, Vec), WorkspaceError> { - let Some(vcs) = &self.vcs else { - return Ok((None, vec![])); - }; - if vcs.is_enabled() { - let vcs_base_path = match (vcs_base_path, &vcs.root) { - (Some(vcs_base_path), Some(root)) => vcs_base_path.join(root), - (None, Some(root)) => PathBuf::from(root), - (Some(vcs_base_path), None) => PathBuf::from(vcs_base_path), - (None, None) => return Err(WorkspaceError::vcs_disabled()), - }; - if let Some(client_kind) = &vcs.client_kind { - if !vcs.ignore_file_disabled() { - let result = file_system - .auto_search(&vcs_base_path, &[client_kind.ignore_file()], false) - .map_err(WorkspaceError::from)?; - - if let Some(result) = result { - return Ok(( - result.file_path.parent().map(PathBuf::from), - result - .content - .lines() - .map(String::from) - .collect::>(), - )); - } - } - } - } - Ok((None, vec![])) - } -} - #[cfg(test)] mod tests { use biome_deserialize::StringSet; diff --git a/crates/pgt_workspace/src/workspace.rs b/crates/pgt_workspace/src/workspace.rs index 873dd83e..61d60a49 100644 --- a/crates/pgt_workspace/src/workspace.rs +++ b/crates/pgt_workspace/src/workspace.rs @@ -5,7 +5,10 @@ use pgt_analyse::RuleCategories; use pgt_configuration::{PartialConfiguration, RuleSelector}; use pgt_fs::PgTPath; use pgt_text_size::TextRange; +#[cfg(feature = "schema")] +use schemars::{JsonSchema, SchemaGenerator, schema::Schema}; use serde::{Deserialize, Serialize}; +use slotmap::{DenseSlotMap, new_key_type}; use crate::{ WorkspaceError, @@ -92,6 +95,21 @@ pub struct ServerInfo { pub version: Option, } +#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] +#[serde(rename_all = "camelCase")] +pub struct RegisterProjectFolderParams { + pub path: Option, + pub set_as_current_workspace: bool, +} + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] +#[serde(rename_all = "camelCase")] +pub struct UnregisterProjectFolderParams { + pub path: PgTPath, +} + pub trait Workspace: Send + Sync + RefUnwindSafe { /// Retrieves the list of diagnostics associated to a file fn pull_diagnostics( @@ -110,6 +128,18 @@ pub trait Workspace: Send + Sync + RefUnwindSafe { params: GetCompletionsParams, ) -> Result; + /// Register a possible workspace project folder. Returns the key of said project. Use this key when you want to switch to different projects. + fn register_project_folder( + &self, + params: RegisterProjectFolderParams, + ) -> Result; + + /// Unregister a workspace project folder. The settings that belong to that project are deleted. + fn unregister_project_folder( + &self, + params: UnregisterProjectFolderParams, + ) -> Result<(), WorkspaceError>; + /// Update the global settings for this workspace fn update_settings(&self, params: UpdateSettingsParams) -> Result<(), WorkspaceError>; @@ -222,3 +252,76 @@ impl Drop for FileGuard<'_, W> { .ok(); } } + +new_key_type! { + pub struct ProjectKey; +} + +#[cfg(feature = "schema")] +impl JsonSchema for ProjectKey { + fn schema_name() -> String { + "ProjectKey".to_string() + } + + fn json_schema(generator: &mut SchemaGenerator) -> Schema { + ::json_schema(generator) + } +} + +#[derive(Debug, Default)] +pub struct WorkspaceData { + /// [DenseSlotMap] is the slowest type in insertion/removal, but the fastest in iteration + /// + /// Users wouldn't change workspace folders very often, + paths: DenseSlotMap, +} + +impl WorkspaceData { + /// Inserts an item + pub fn insert(&mut self, item: V) -> ProjectKey { + self.paths.insert(item) + } + + /// Removes an item + pub fn remove(&mut self, key: ProjectKey) { + self.paths.remove(key); + } + + /// Get a reference of the value + pub fn get(&self, key: ProjectKey) -> Option<&V> { + self.paths.get(key) + } + + /// Get a mutable reference of the value + pub fn get_mut(&mut self, key: ProjectKey) -> Option<&mut V> { + self.paths.get_mut(key) + } + + pub fn is_empty(&self) -> bool { + self.paths.is_empty() + } + + pub fn iter(&self) -> WorkspaceDataIterator<'_, V> { + WorkspaceDataIterator::new(self) + } +} + +pub struct WorkspaceDataIterator<'a, V> { + iterator: slotmap::dense::Iter<'a, ProjectKey, V>, +} + +impl<'a, V> WorkspaceDataIterator<'a, V> { + fn new(data: &'a WorkspaceData) -> Self { + Self { + iterator: data.paths.iter(), + } + } +} + +impl<'a, V> Iterator for WorkspaceDataIterator<'a, V> { + type Item = (ProjectKey, &'a V); + + fn next(&mut self) -> Option { + self.iterator.next() + } +} diff --git a/crates/pgt_workspace/src/workspace/client.rs b/crates/pgt_workspace/src/workspace/client.rs index d727fff6..2bd21513 100644 --- a/crates/pgt_workspace/src/workspace/client.rs +++ b/crates/pgt_workspace/src/workspace/client.rs @@ -7,7 +7,10 @@ use std::{ sync::atomic::{AtomicU64, Ordering}, }; -use super::{CloseFileParams, GetFileContentParams, IsPathIgnoredParams, OpenFileParams}; +use super::{ + CloseFileParams, GetFileContentParams, IsPathIgnoredParams, OpenFileParams, ProjectKey, + RegisterProjectFolderParams, UnregisterProjectFolderParams, +}; pub struct WorkspaceClient { transport: T, @@ -103,6 +106,20 @@ where self.request("pgt/execute_statement", params) } + fn register_project_folder( + &self, + params: RegisterProjectFolderParams, + ) -> Result { + self.request("pgt/register_project_folder", params) + } + + fn unregister_project_folder( + &self, + params: UnregisterProjectFolderParams, + ) -> Result<(), WorkspaceError> { + self.request("pgt/unregister_project_folder", params) + } + fn open_file(&self, params: OpenFileParams) -> Result<(), WorkspaceError> { self.request("pgt/open_file", params) } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 82e79e10..67a3713c 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -1,14 +1,14 @@ use std::{ fs, panic::RefUnwindSafe, - path::Path, + path::{Path, PathBuf}, sync::{Arc, RwLock}, }; use analyser::AnalyserVisitorBuilder; use async_helper::run_async; +use connection_manager::ConnectionManager; use dashmap::DashMap; -use db_connection::DbConnection; use document::Document; use futures::{StreamExt, stream}; use parsed_document::{ @@ -23,8 +23,8 @@ use pgt_diagnostics::{ use pgt_fs::{ConfigName, PgTPath}; use pgt_typecheck::{IdentifierType, TypecheckParams, TypedIdentifier}; use schema_cache_manager::SchemaCacheManager; -use sqlx::Executor; -use tracing::info; +use sqlx::{Executor, PgPool}; +use tracing::{debug, info}; use crate::{ WorkspaceError, @@ -37,11 +37,12 @@ use crate::{ completions::{CompletionsResult, GetCompletionsParams, get_statement_for_completions}, diagnostics::{PullDiagnosticsParams, PullDiagnosticsResult}, }, - settings::{Settings, SettingsHandle, SettingsHandleMut}, + settings::{WorkspaceSettings, WorkspaceSettingsHandle, WorkspaceSettingsHandleMut}, }; use super::{ - GetFileContentParams, IsPathIgnoredParams, OpenFileParams, ServerInfo, UpdateSettingsParams, + GetFileContentParams, IsPathIgnoredParams, OpenFileParams, ProjectKey, + RegisterProjectFolderParams, ServerInfo, UnregisterProjectFolderParams, UpdateSettingsParams, Workspace, }; @@ -51,7 +52,8 @@ mod analyser; mod annotation; mod async_helper; mod change; -mod db_connection; +mod connection_key; +mod connection_manager; pub(crate) mod document; mod migration; pub(crate) mod parsed_document; @@ -63,14 +65,14 @@ mod tree_sitter; pub(super) struct WorkspaceServer { /// global settings object for this workspace - settings: RwLock, + settings: RwLock, /// Stores the schema cache for this workspace schema_cache: SchemaCacheManager, parsed_documents: DashMap, - connection: RwLock, + connection: ConnectionManager, } /// The `Workspace` object is long-lived, so we want it to be able to cross @@ -91,23 +93,60 @@ impl WorkspaceServer { Self { settings: RwLock::default(), parsed_documents: DashMap::default(), - schema_cache: SchemaCacheManager::default(), - connection: RwLock::default(), + schema_cache: SchemaCacheManager::new(), + connection: ConnectionManager::new(), } } /// Provides a reference to the current settings - fn settings(&self) -> SettingsHandle { - SettingsHandle::new(&self.settings) + fn workspaces(&self) -> WorkspaceSettingsHandle { + WorkspaceSettingsHandle::new(&self.settings) } - fn settings_mut(&self) -> SettingsHandleMut { - SettingsHandleMut::new(&self.settings) + fn workspaces_mut(&self) -> WorkspaceSettingsHandleMut { + WorkspaceSettingsHandleMut::new(&self.settings) + } + + fn get_current_connection(&self) -> Option { + let settings = self.workspaces(); + let settings = settings.settings()?; + self.connection.get_pool(&settings.db) + } + + /// Register a new project in the current workspace + fn register_project(&self, path: PathBuf) -> ProjectKey { + let mut workspace = self.workspaces_mut(); + let workspace_mut = workspace.as_mut(); + workspace_mut.insert_project(path.clone()) + } + + /// Retrieves the current project path + fn get_current_project_path(&self) -> Option { + self.workspaces().path().cloned() + } + + /// Sets the current project of the current workspace + fn set_current_project(&self, project_key: ProjectKey) { + let mut workspace = self.workspaces_mut(); + let workspace_mut = workspace.as_mut(); + workspace_mut.set_current_project(project_key); + } + + /// Checks whether the current path belongs to the current project. + /// + /// If there's a match, and the match **isn't** the current project, it returns the new key. + fn path_belongs_to_current_workspace(&self, path: &PgTPath) -> Option { + let workspaces = self.workspaces(); + workspaces.as_ref().path_belongs_to_current_workspace(path) } fn is_ignored_by_migration_config(&self, path: &Path) -> bool { - let set = self.settings(); - set.as_ref() + let settings = self.workspaces(); + let settings = settings.settings(); + let Some(settings) = settings else { + return false; + }; + settings .migrations .as_ref() .and_then(|migration_settings| { @@ -131,8 +170,12 @@ impl WorkspaceServer { /// Check whether a file is ignored in the top-level config `files.ignore`/`files.include` fn is_ignored_by_top_level_config(&self, path: &Path) -> bool { - let set = self.settings(); - let settings = set.as_ref(); + let settings = self.workspaces(); + let settings = settings.settings(); + let Some(settings) = settings else { + return false; + }; + let is_included = settings.files.included_files.is_empty() || is_dir(path) || settings.files.included_files.matches_path(path); @@ -155,6 +198,48 @@ impl WorkspaceServer { } impl Workspace for WorkspaceServer { + fn register_project_folder( + &self, + params: RegisterProjectFolderParams, + ) -> Result { + let current_project_path = self.get_current_project_path(); + debug!( + "Compare the current project with the new one {:?} {:?} {:?}", + current_project_path, + params.path.as_ref(), + current_project_path.as_deref() != params.path.as_ref() + ); + + let is_new_path = match (current_project_path.as_deref(), params.path.as_ref()) { + (Some(current_project_path), Some(params_path)) => current_project_path != params_path, + (Some(_), None) => { + // If the current project is set, but no path is provided, we assume it's a new project + true + } + _ => true, + }; + + if is_new_path { + let path = params.path.unwrap_or_default(); + let key = self.register_project(path.clone()); + if params.set_as_current_workspace { + self.set_current_project(key); + } + Ok(key) + } else { + Ok(self.workspaces().as_ref().get_current_project_key()) + } + } + + fn unregister_project_folder( + &self, + params: UnregisterProjectFolderParams, + ) -> Result<(), WorkspaceError> { + let mut workspace = self.workspaces_mut(); + workspace.as_mut().remove_project(params.path.as_path()); + Ok(()) + } + /// Update the global settings for this workspace /// /// ## Panics @@ -162,24 +247,17 @@ impl Workspace for WorkspaceServer { /// by another thread having previously panicked while holding the lock #[tracing::instrument(level = "trace", skip(self), err)] fn update_settings(&self, params: UpdateSettingsParams) -> Result<(), WorkspaceError> { - tracing::info!("Updating settings in workspace"); - - self.settings_mut().as_mut().merge_with_configuration( - params.configuration, - params.workspace_directory, - params.vcs_base_path, - params.gitignore_matches.as_slice(), - )?; - - tracing::info!("Updated settings in workspace"); - tracing::debug!("Updated settings are {:#?}", self.settings()); - - self.connection - .write() - .unwrap() - .set_conn_settings(&self.settings().as_ref().db); - - tracing::info!("Updated Db connection settings"); + let mut workspace = self.workspaces_mut(); + + workspace + .as_mut() + .get_current_settings_mut() + .merge_with_configuration( + params.configuration, + params.workspace_directory, + params.vcs_base_path, + params.gitignore_matches.as_slice(), + )?; Ok(()) } @@ -193,6 +271,10 @@ impl Workspace for WorkspaceServer { ParsedDocument::new(params.path.clone(), params.content, params.version) }); + if let Some(project_key) = self.path_belongs_to_current_workspace(¶ms.path) { + self.set_current_project(project_key); + } + Ok(()) } @@ -250,15 +332,13 @@ impl Workspace for WorkspaceServer { .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; - let settings = self - .settings - .read() - .expect("Unable to read settings for Code Actions"); + let settings = self.workspaces(); + let settings = settings.settings(); - let disabled_reason: Option = if settings.db.allow_statement_executions { - None - } else { - Some("Statement execution not allowed against database.".into()) + let disabled_reason = match settings { + Some(settings) if settings.db.allow_statement_executions => None, + Some(_) => Some("Statement execution is disabled in the settings.".into()), + None => Some("Statement execution not allowed against database.".into()), }; let actions = parser @@ -310,15 +390,13 @@ impl Workspace for WorkspaceServer { }); }; - let conn = self.connection.read().unwrap(); - let pool = match conn.get_pool() { - Some(p) => p, - None => { - return Ok(ExecuteStatementResult { - message: "Not connected to database.".into(), - }); - } - }; + let pool = self.get_current_connection(); + if pool.is_none() { + return Ok(ExecuteStatementResult { + message: "No database connection available.".into(), + }); + } + let pool = pool.unwrap(); let result = run_async(async move { pool.execute(sqlx::query(&content)).await })??; @@ -334,16 +412,29 @@ impl Workspace for WorkspaceServer { &self, params: PullDiagnosticsParams, ) -> Result { - let settings = self.settings(); + let settings = self.workspaces(); + + let settings = match settings.settings() { + Some(settings) => settings, + None => { + // return an empty result if no settings are available + // we might want to return an error here in the future + return Ok(PullDiagnosticsResult { + diagnostics: Vec::new(), + errors: 0, + skipped_diagnostics: 0, + }); + } + }; // create analyser for this run // first, collect enabled and disabled rules from the workspace settings - let (enabled_rules, disabled_rules) = AnalyserVisitorBuilder::new(settings.as_ref()) + let (enabled_rules, disabled_rules) = AnalyserVisitorBuilder::new(settings) .with_linter_rules(¶ms.only, ¶ms.skip) .finish(); // then, build a map that contains all options let options = AnalyserOptions { - rules: to_analyser_rules(settings.as_ref()), + rules: to_analyser_rules(settings), }; // next, build the analysis filter which will be used to match rules let filter = AnalysisFilter { @@ -364,15 +455,9 @@ impl Workspace for WorkspaceServer { let mut diagnostics: Vec = parser.document_diagnostics().to_vec(); - if let Some(pool) = self - .connection - .read() - .expect("DbConnection RwLock panicked") - .get_pool() - { + if let Some(pool) = self.get_current_connection() { let path_clone = params.path.clone(); let schema_cache = self.schema_cache.load(pool.clone())?; - let schema_cache_arc = schema_cache.get_arc(); let input = parser.iter(AsyncDiagnosticsMapper).collect::>(); // sorry for the ugly code :( let async_results = run_async(async move { @@ -380,7 +465,7 @@ impl Workspace for WorkspaceServer { .map(|(_id, range, content, ast, cst, sign)| { let pool = pool.clone(); let path = path_clone.clone(); - let schema_cache = Arc::clone(&schema_cache_arc); + let schema_cache = Arc::clone(&schema_cache); async move { if let Some(ast) = ast { pgt_typecheck::check_sql(TypecheckParams { @@ -461,7 +546,6 @@ impl Workspace for WorkspaceServer { || d.severity(), |category| { settings - .as_ref() .get_severity_from_rule_code(category) .unwrap_or(Severity::Warning) }, @@ -503,13 +587,12 @@ impl Workspace for WorkspaceServer { .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; - let pool = match self.connection.read().unwrap().get_pool() { - Some(pool) => pool, - None => { - tracing::debug!("No connection to database. Skipping completions."); - return Ok(CompletionsResult::default()); - } - }; + let pool = self.get_current_connection(); + if pool.is_none() { + tracing::debug!("No database connection available. Skipping completions."); + return Ok(CompletionsResult::default()); + } + let pool = pool.unwrap(); let schema_cache = self.schema_cache.load(pool)?; diff --git a/crates/pgt_workspace/src/workspace/server/connection_key.rs b/crates/pgt_workspace/src/workspace/server/connection_key.rs new file mode 100644 index 00000000..abdd8025 --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server/connection_key.rs @@ -0,0 +1,44 @@ +use sqlx::PgPool; + +use crate::settings::DatabaseSettings; + +/// A unique identifier for database connection settings +#[derive(Clone, PartialEq, Eq, Hash)] +pub(crate) struct ConnectionKey { + pub host: String, + pub port: u16, + pub username: String, + pub database: String, +} + +impl From<&DatabaseSettings> for ConnectionKey { + fn from(settings: &DatabaseSettings) -> Self { + Self { + host: settings.host.clone(), + port: settings.port, + username: settings.username.clone(), + database: settings.database.clone(), + } + } +} + +impl From<&PgPool> for ConnectionKey { + fn from(pool: &PgPool) -> Self { + let conn = pool.connect_options(); + + match conn.get_database() { + None => Self { + host: conn.get_host().to_string(), + port: conn.get_port(), + username: conn.get_username().to_string(), + database: String::new(), + }, + Some(db) => Self { + host: conn.get_host().to_string(), + port: conn.get_port(), + username: conn.get_username().to_string(), + database: db.to_string(), + }, + } + } +} diff --git a/crates/pgt_workspace/src/workspace/server/connection_manager.rs b/crates/pgt_workspace/src/workspace/server/connection_manager.rs new file mode 100644 index 00000000..d21988f0 --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server/connection_manager.rs @@ -0,0 +1,95 @@ +use std::time::{Duration, Instant}; + +use dashmap::DashMap; +use sqlx::{PgPool, Postgres, pool::PoolOptions, postgres::PgConnectOptions}; + +use crate::settings::DatabaseSettings; + +use super::connection_key::ConnectionKey; + +/// Cached connection pool with last access time +struct CachedPool { + pool: PgPool, + last_accessed: Instant, + idle_timeout: Duration, +} + +#[derive(Default)] +pub struct ConnectionManager { + pools: DashMap, +} + +impl ConnectionManager { + pub fn new() -> Self { + Self { + pools: DashMap::new(), + } + } + + /// Get a connection pool for the given database settings. + /// If a pool already exists for these settings, it will be returned. + /// If not, a new pool will be created if connections are enabled. + /// Will also clean up idle connections that haven't been accessed for a while. + pub(crate) fn get_pool(&self, settings: &DatabaseSettings) -> Option { + let key = ConnectionKey::from(settings); + + // Cleanup idle connections first + self.cleanup_idle_pools(&key); + + if !settings.enable_connection { + tracing::info!("Database connection disabled."); + return None; + } + + // If we have a cached pool, update its last_accessed time and return it + if let Some(mut cached_pool) = self.pools.get_mut(&key) { + cached_pool.last_accessed = Instant::now(); + return Some(cached_pool.pool.clone()); + } + + // Create a new pool + let config = PgConnectOptions::new() + .host(&settings.host) + .port(settings.port) + .username(&settings.username) + .password(&settings.password) + .database(&settings.database); + + let timeout = settings.conn_timeout_secs; + + let pool = PoolOptions::::new() + .acquire_timeout(timeout) + .acquire_slow_threshold(Duration::from_secs(2)) + .connect_lazy_with(config); + + let cached_pool = CachedPool { + pool: pool.clone(), + last_accessed: Instant::now(), + // TODO: add this to the db settings, for now default to five minutes + idle_timeout: Duration::from_secs(60 * 5), + }; + + self.pools.insert(key, cached_pool); + + Some(pool) + } + + /// Remove pools that haven't been accessed for longer than the idle timeout + fn cleanup_idle_pools(&self, ignore_key: &ConnectionKey) { + let now = Instant::now(); + + // Use retain to keep only non-idle connections + self.pools.retain(|key, cached_pool| { + let idle_duration = now.duration_since(cached_pool.last_accessed); + if idle_duration > cached_pool.idle_timeout && key != ignore_key { + tracing::debug!( + "Removing idle database connection (idle for {:?})", + idle_duration + ); + false + } else { + true + } + }); + } +} diff --git a/crates/pgt_workspace/src/workspace/server/db_connection.rs b/crates/pgt_workspace/src/workspace/server/db_connection.rs deleted file mode 100644 index d002c0a2..00000000 --- a/crates/pgt_workspace/src/workspace/server/db_connection.rs +++ /dev/null @@ -1,40 +0,0 @@ -use std::time::Duration; - -use sqlx::{PgPool, Postgres, pool::PoolOptions, postgres::PgConnectOptions}; - -use crate::settings::DatabaseSettings; - -#[derive(Default)] -pub struct DbConnection { - pool: Option, -} - -impl DbConnection { - /// There might be no pool available if the user decides to skip db checks. - pub(crate) fn get_pool(&self) -> Option { - self.pool.clone() - } - - pub(crate) fn set_conn_settings(&mut self, settings: &DatabaseSettings) { - if !settings.enable_connection { - tracing::info!("Database connection disabled."); - return; - } - - let config = PgConnectOptions::new() - .host(&settings.host) - .port(settings.port) - .username(&settings.username) - .password(&settings.password) - .database(&settings.database); - - let timeout = settings.conn_timeout_secs; - - let pool = PoolOptions::::new() - .acquire_timeout(timeout) - .acquire_slow_threshold(Duration::from_secs(2)) - .connect_lazy_with(config); - - self.pool = Some(pool); - } -} diff --git a/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs b/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs index 03cd6ded..b42dfc34 100644 --- a/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs +++ b/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs @@ -1,97 +1,47 @@ -use std::sync::{Arc, RwLock, RwLockReadGuard}; +use std::sync::Arc; +use dashmap::DashMap; use pgt_schema_cache::SchemaCache; use sqlx::PgPool; use crate::WorkspaceError; -use super::async_helper::run_async; - -pub(crate) struct SchemaCacheHandle<'a> { - inner: RwLockReadGuard<'a, SchemaCacheManagerInner>, -} - -impl<'a> SchemaCacheHandle<'a> { - pub(crate) fn new(cache: &'a RwLock) -> Self { - Self { - inner: cache.read().unwrap(), - } - } - - pub(crate) fn wrap(inner: RwLockReadGuard<'a, SchemaCacheManagerInner>) -> Self { - Self { inner } - } - - pub fn get_arc(&self) -> Arc { - Arc::clone(&self.inner.cache) - } -} - -impl AsRef for SchemaCacheHandle<'_> { - fn as_ref(&self) -> &SchemaCache { - &self.inner.cache - } -} - -#[derive(Default)] -pub(crate) struct SchemaCacheManagerInner { - cache: Arc, - conn_str: String, -} +use super::{async_helper::run_async, connection_key::ConnectionKey}; #[derive(Default)] pub struct SchemaCacheManager { - inner: RwLock, + schemas: DashMap>, } impl SchemaCacheManager { - pub fn load(&self, pool: PgPool) -> Result { - let new_conn_str = pool_to_conn_str(&pool); - - { - // return early if the connection string is the same - let inner = self.inner.read().unwrap(); - if new_conn_str == inner.conn_str { - tracing::info!("Same connection string, no updates."); - return Ok(SchemaCacheHandle::wrap(inner)); - } + pub fn new() -> Self { + Self { + schemas: DashMap::new(), } + } - let maybe_refreshed = run_async(async move { SchemaCache::load(&pool).await })?; - let refreshed = maybe_refreshed?; - - { - // write lock must be dropped before we return the reference below, hence the block - let mut inner = self.inner.write().unwrap(); + pub fn load(&self, pool: PgPool) -> Result, WorkspaceError> { + let key: ConnectionKey = (&pool).into(); - // Double-check that we still need to refresh (another thread might have done it) - if new_conn_str != inner.conn_str { - inner.cache = Arc::new(refreshed); - inner.conn_str = new_conn_str; - tracing::info!("Refreshed connection."); - } + if let Some(cache) = self.schemas.get(&key) { + return Ok(Arc::clone(&*cache)); } - Ok(SchemaCacheHandle::new(&self.inner)) - } -} - -fn pool_to_conn_str(pool: &PgPool) -> String { - let conn = pool.connect_options(); - - match conn.get_database() { - None => format!( - "postgres://{}:@{}:{}", - conn.get_username(), - conn.get_host(), - conn.get_port() - ), - Some(db) => format!( - "postgres://{}:@{}:{}/{}", - conn.get_username(), - conn.get_host(), - conn.get_port(), - db - ), + let schema_cache = self + .schemas + .entry(key) + .or_try_insert_with::(|| { + // This closure will only be called once per key if multiple threads + // try to access the same key simultaneously + let pool_clone = pool.clone(); + let schema_cache = + Arc::new(run_async( + async move { SchemaCache::load(&pool_clone).await }, + )??); + + Ok(schema_cache) + })?; + + Ok(Arc::clone(&schema_cache)) } } diff --git a/crates/pgt_workspace/src/workspace_types.rs b/crates/pgt_workspace/src/workspace_types.rs index 02215e79..b902fad6 100644 --- a/crates/pgt_workspace/src/workspace_types.rs +++ b/crates/pgt_workspace/src/workspace_types.rs @@ -457,9 +457,10 @@ macro_rules! workspace_method { } /// Returns a list of signature for all the methods in the [Workspace] trait -pub fn methods() -> [WorkspaceMethod; 8] { +pub fn methods() -> [WorkspaceMethod; 9] { [ workspace_method!(is_path_ignored), + workspace_method!(register_project_folder), workspace_method!(get_file_content), workspace_method!(pull_diagnostics), workspace_method!(get_completions), diff --git a/docs/schemas/0.0.0/schema.json b/docs/schemas/0.0.0/schema.json index faba3b5c..8c478d0a 100644 --- a/docs/schemas/0.0.0/schema.json +++ b/docs/schemas/0.0.0/schema.json @@ -22,6 +22,17 @@ } ] }, + "extends": { + "description": "A list of paths to other JSON files, used to extends the current configuration.", + "anyOf": [ + { + "$ref": "#/definitions/StringSet" + }, + { + "type": "null" + } + ] + }, "files": { "description": "The configuration of the filesystem", "anyOf": [ diff --git a/docs/schemas/latest/schema.json b/docs/schemas/latest/schema.json index faba3b5c..8c478d0a 100644 --- a/docs/schemas/latest/schema.json +++ b/docs/schemas/latest/schema.json @@ -22,6 +22,17 @@ } ] }, + "extends": { + "description": "A list of paths to other JSON files, used to extends the current configuration.", + "anyOf": [ + { + "$ref": "#/definitions/StringSet" + }, + { + "type": "null" + } + ] + }, "files": { "description": "The configuration of the filesystem", "anyOf": [ diff --git a/justfile b/justfile index a55207ae..c868a122 100644 --- a/justfile +++ b/justfile @@ -6,6 +6,7 @@ alias r := ready alias l := lint alias t := test alias rg := reset-git +alias qm := quick-modify # Installs the tools needed to develop install-tools: @@ -132,10 +133,22 @@ merge-main: git fetch origin main:main git merge main +quick-create branch commit: + git checkout -b {{branch}} + git add -A + git commit -m "{{commit}}" + git push + gh pr create --fill + +quick-modify: + just format + git add -A + git commit -m "progress" + git push # Make sure to set your PGT_LOG_PATH in your shell profile. # You can use the PGT_LOG_LEVEL to set your log level. # We recommend to install `bunyan` (npm i -g bunyan) and pipe the output through there for color-coding: # just show-logs | bunyan show-logs: - tail -f $(ls $PGT_LOG_PATH/server.log.* | sort -t- -k2,2 -k3,3 -k4,4 | tail -n 1) \ No newline at end of file + tail -f $(ls $PGT_LOG_PATH/server.log.* | sort -t- -k2,2 -k3,3 -k4,4 | tail -n 1) diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index a35dad81..aaa5a42a 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -19,6 +19,11 @@ export type FileKind = FileKind2[]; * The priority of the file */ export type FileKind2 = "Config" | "Ignore" | "Inspectable" | "Handleable"; +export interface RegisterProjectFolderParams { + path?: string; + setAsCurrentWorkspace: boolean; +} +export type ProjectKey = string; export interface GetFileContentParams { path: PgTPath; } @@ -92,7 +97,7 @@ export type DiagnosticTags = DiagnosticTag[]; /** * Serializable representation of a [Diagnostic](super::Diagnostic) advice -See the [Visitor] trait for additional documentation on all the supported advice types. +See the [Visitor] trait for additional documentation on all the supported advice types. */ export type Advice = | { log: [LogCategory, MarkupBuf] } @@ -185,6 +190,7 @@ export interface CompletionsResult { export interface CompletionItem { completion_text?: CompletionText; description: string; + detail?: string; kind: CompletionItemKind; label: string; preselected: boolean; @@ -196,16 +202,22 @@ export interface CompletionItem { /** * The text that the editor should fill in. If `None`, the `label` should be used. Tables, for example, might have different completion_texts: -label: "users", description: "Schema: auth", completion_text: "auth.users". +label: "users", description: "Schema: auth", completion_text: "auth.users". */ export interface CompletionText { + is_snippet: boolean; /** * A `range` is required because some editors replace the current token, others naively insert the text. Having a range where start == end makes it an insertion. */ range: TextRange; text: string; } -export type CompletionItemKind = "table" | "function" | "column" | "schema"; +export type CompletionItemKind = + | "table" + | "function" + | "column" + | "schema" + | "policy"; export interface UpdateSettingsParams { configuration: PartialConfiguration; gitignore_matches: string[]; @@ -224,6 +236,10 @@ export interface PartialConfiguration { * The configuration of the database connection */ db?: PartialDatabaseConfiguration; + /** + * A list of paths to other JSON files, used to extends the current configuration. + */ + extends?: StringSet; /** * The configuration of the filesystem */ @@ -271,6 +287,7 @@ export interface PartialDatabaseConfiguration { */ username?: string; } +export type StringSet = string[]; /** * The configuration of the filesystem */ @@ -338,7 +355,7 @@ export interface PartialVcsConfiguration { /** * The folder where we should check for VCS files. By default, we will use the same folder where `postgrestools.jsonc` was found. -If we can't find the configuration, it will attempt to use the current working directory. If no current working directory can't be found, we won't use the VCS integration, and a diagnostic will be emitted +If we can't find the configuration, it will attempt to use the current working directory. If no current working directory can't be found, we won't use the VCS integration, and a diagnostic will be emitted */ root?: string; /** @@ -346,7 +363,6 @@ If we can't find the configuration, it will attempt to use the current working d */ useIgnoreFile?: boolean; } -export type StringSet = string[]; export interface Rules { /** * It enables ALL rules. The rules that belong to `nursery` won't be enabled. @@ -425,6 +441,9 @@ export interface CloseFileParams { export type Configuration = PartialConfiguration; export interface Workspace { isPathIgnored(params: IsPathIgnoredParams): Promise; + registerProjectFolder( + params: RegisterProjectFolderParams, + ): Promise; getFileContent(params: GetFileContentParams): Promise; pullDiagnostics( params: PullDiagnosticsParams, @@ -441,6 +460,9 @@ export function createWorkspace(transport: Transport): Workspace { isPathIgnored(params) { return transport.request("pgt/is_path_ignored", params); }, + registerProjectFolder(params) { + return transport.request("pgt/register_project_folder", params); + }, getFileContent(params) { return transport.request("pgt/get_file_content", params); }, diff --git a/packages/@postgrestools/backend-jsonrpc/tests/workspace.test.mjs b/packages/@postgrestools/backend-jsonrpc/tests/workspace.test.mjs index c83d5e44..c35904c4 100644 --- a/packages/@postgrestools/backend-jsonrpc/tests/workspace.test.mjs +++ b/packages/@postgrestools/backend-jsonrpc/tests/workspace.test.mjs @@ -2,7 +2,7 @@ import { resolve } from "node:path"; import { fileURLToPath } from "node:url"; import { describe, expect, it } from "vitest"; -import { createWorkspaceWithBinary } from "../dist"; +import { createWorkspaceWithBinary } from "../src"; describe("Workspace API", () => { it("should process remote requests", async () => { @@ -14,6 +14,9 @@ describe("Workspace API", () => { ); const workspace = await createWorkspaceWithBinary(command); + workspace.registerProjectFolder({ + setAsCurrentWorkspace: true, + }); await workspace.openFile({ path: { path: "test.sql", From d43d71606f4d2d555ae8985939b2363190737937 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Thu, 5 Jun 2025 09:23:32 +0200 Subject: [PATCH 065/114] feat: versioned docs (#414) (Finally) sets up versioned docs with a version selector. we deploy on every push to `main` on the `main` tag, and on every release on the tag of that release + update `latest` this finally fixes the issue we were having around missing schema files. tested it on a fork. :) Todo - [x] update schema url in default config - [x] Before merging: delete gh-pages branch closes #312 fixes #319 --- .github/workflows/deploy_docs.yml | 29 +- crates/pgt_workspace/src/configuration.rs | 4 +- docs/codegen/src/schema.rs | 21 +- docs/{schemas/0.0.0 => }/schema.json | 0 docs/schemas/latest/schema.json | 449 ---------------------- mkdocs.yml | 10 +- pyproject.toml | 1 + 7 files changed, 29 insertions(+), 485 deletions(-) rename docs/{schemas/0.0.0 => }/schema.json (100%) delete mode 100644 docs/schemas/latest/schema.json diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml index aabd75cd..462c0e5d 100644 --- a/.github/workflows/deploy_docs.yml +++ b/.github/workflows/deploy_docs.yml @@ -1,9 +1,11 @@ name: Build and Deploy Documentation on: + push: + branches: + - main release: - types: [released] - workflow_dispatch: + types: [published] permissions: contents: write @@ -13,17 +15,15 @@ permissions: jobs: build: runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install uv uses: astral-sh/setup-uv@v5 with: - enable-cache: true + enable-cache: true - name: Set up Python run: uv python install @@ -31,7 +31,18 @@ jobs: - name: Install the project run: uv sync --all-extras --dev - - run: uv run mkdocs gh-deploy --force + - name: Configure Git + run: | + git config user.name 'github-actions[bot]' + git config user.email 'github-actions[bot]@users.noreply.github.com' + + - name: Build Docs Website + run: | + if [ "${{ github.event_name }}" = "release" ]; then + uv run mike deploy --push --update-aliases ${{ github.event.release.tag_name }} latest + else + uv run mike deploy --push main + fi deploy: needs: build @@ -44,12 +55,16 @@ jobs: uses: actions/checkout@v4 with: ref: gh-pages + - name: Setup Pages uses: actions/configure-pages@v5 + - name: Upload artifact uses: actions/upload-pages-artifact@v3 with: path: '.' + - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v4 + diff --git a/crates/pgt_workspace/src/configuration.rs b/crates/pgt_workspace/src/configuration.rs index 1baebcc7..87e77b0c 100644 --- a/crates/pgt_workspace/src/configuration.rs +++ b/crates/pgt_workspace/src/configuration.rs @@ -186,9 +186,9 @@ pub fn create_config( configuration.schema = node_schema_path.to_str().map(String::from); } else if VERSION == "0.0.0" { // VERSION is 0.0.0 if it has not been explicitly set (e.g local dev, as fallback) - configuration.schema = Some("https://pgtools.dev/schemas/latest/schema.json".to_string()); + configuration.schema = Some("https://pgtools.dev/latest/schema.json".to_string()); } else { - configuration.schema = Some(format!("https://pgtools.dev/schemas/{VERSION}/schema.json")); + configuration.schema = Some(format!("https://pgtools.dev/{VERSION}/schema.json")); } let contents = serde_json::to_string_pretty(&configuration) diff --git a/docs/codegen/src/schema.rs b/docs/codegen/src/schema.rs index acfc42f1..3e430517 100644 --- a/docs/codegen/src/schema.rs +++ b/docs/codegen/src/schema.rs @@ -1,4 +1,4 @@ -use pgt_configuration::{PartialConfiguration, VERSION}; +use pgt_configuration::PartialConfiguration; use schemars::{ schema::{RootSchema, Schema, SchemaObject}, schema_for, @@ -10,25 +10,10 @@ use std::{fs, path::Path}; /// /// * `docs_dir`: Path to the docs directory. pub fn generate_schema(docs_dir: &Path) -> anyhow::Result<()> { - let schemas_dir = docs_dir.join("schemas"); - let latest_schema_dir = schemas_dir.join("latest"); - let latest_schema_path = latest_schema_dir.join("schema.json"); - - let version_schema_dir = schemas_dir.join(VERSION); - let version_schema_path = version_schema_dir.join("schema.json"); - - if !latest_schema_dir.exists() { - fs::create_dir_all(&latest_schema_dir)?; - } - - if !version_schema_dir.exists() { - fs::create_dir_all(&version_schema_dir)?; - } - + let schema_path = docs_dir.join("schema.json"); let schema_content = get_configuration_schema_content()?; - fs::write(latest_schema_path, &schema_content)?; - fs::write(version_schema_path, &schema_content)?; + fs::write(schema_path, &schema_content)?; Ok(()) } diff --git a/docs/schemas/0.0.0/schema.json b/docs/schema.json similarity index 100% rename from docs/schemas/0.0.0/schema.json rename to docs/schema.json diff --git a/docs/schemas/latest/schema.json b/docs/schemas/latest/schema.json deleted file mode 100644 index 8c478d0a..00000000 --- a/docs/schemas/latest/schema.json +++ /dev/null @@ -1,449 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Configuration", - "description": "The configuration that is contained inside the configuration file.", - "type": "object", - "properties": { - "$schema": { - "description": "A field for the [JSON schema](https://json-schema.org/) specification", - "type": [ - "string", - "null" - ] - }, - "db": { - "description": "The configuration of the database connection", - "anyOf": [ - { - "$ref": "#/definitions/DatabaseConfiguration" - }, - { - "type": "null" - } - ] - }, - "extends": { - "description": "A list of paths to other JSON files, used to extends the current configuration.", - "anyOf": [ - { - "$ref": "#/definitions/StringSet" - }, - { - "type": "null" - } - ] - }, - "files": { - "description": "The configuration of the filesystem", - "anyOf": [ - { - "$ref": "#/definitions/FilesConfiguration" - }, - { - "type": "null" - } - ] - }, - "linter": { - "description": "The configuration for the linter", - "anyOf": [ - { - "$ref": "#/definitions/LinterConfiguration" - }, - { - "type": "null" - } - ] - }, - "migrations": { - "description": "Configure migrations", - "anyOf": [ - { - "$ref": "#/definitions/MigrationsConfiguration" - }, - { - "type": "null" - } - ] - }, - "vcs": { - "description": "The configuration of the VCS integration", - "anyOf": [ - { - "$ref": "#/definitions/VcsConfiguration" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false, - "definitions": { - "DatabaseConfiguration": { - "description": "The configuration of the database connection.", - "type": "object", - "properties": { - "allowStatementExecutionsAgainst": { - "anyOf": [ - { - "$ref": "#/definitions/StringSet" - }, - { - "type": "null" - } - ] - }, - "connTimeoutSecs": { - "description": "The connection timeout in seconds.", - "type": [ - "integer", - "null" - ], - "format": "uint16", - "minimum": 0.0 - }, - "database": { - "description": "The name of the database.", - "type": [ - "string", - "null" - ] - }, - "host": { - "description": "The host of the database. Required if you want database-related features. All else falls back to sensible defaults.", - "type": [ - "string", - "null" - ] - }, - "password": { - "description": "The password to connect to the database.", - "type": [ - "string", - "null" - ] - }, - "port": { - "description": "The port of the database.", - "type": [ - "integer", - "null" - ], - "format": "uint16", - "minimum": 0.0 - }, - "username": { - "description": "The username to connect to the database.", - "type": [ - "string", - "null" - ] - } - }, - "additionalProperties": false - }, - "FilesConfiguration": { - "description": "The configuration of the filesystem", - "type": "object", - "properties": { - "ignore": { - "description": "A list of Unix shell style patterns. Will ignore files/folders that will match these patterns.", - "anyOf": [ - { - "$ref": "#/definitions/StringSet" - }, - { - "type": "null" - } - ] - }, - "include": { - "description": "A list of Unix shell style patterns. Will handle only those files/folders that will match these patterns.", - "anyOf": [ - { - "$ref": "#/definitions/StringSet" - }, - { - "type": "null" - } - ] - }, - "maxSize": { - "description": "The maximum allowed size for source code files in bytes. Files above this limit will be ignored for performance reasons. Defaults to 1 MiB", - "type": [ - "integer", - "null" - ], - "format": "uint64", - "minimum": 1.0 - } - }, - "additionalProperties": false - }, - "LinterConfiguration": { - "type": "object", - "properties": { - "enabled": { - "description": "if `false`, it disables the feature and the linter won't be executed. `true` by default", - "type": [ - "boolean", - "null" - ] - }, - "ignore": { - "description": "A list of Unix shell style patterns. The formatter will ignore files/folders that will match these patterns.", - "anyOf": [ - { - "$ref": "#/definitions/StringSet" - }, - { - "type": "null" - } - ] - }, - "include": { - "description": "A list of Unix shell style patterns. The formatter will include files/folders that will match these patterns.", - "anyOf": [ - { - "$ref": "#/definitions/StringSet" - }, - { - "type": "null" - } - ] - }, - "rules": { - "description": "List of rules", - "anyOf": [ - { - "$ref": "#/definitions/Rules" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false - }, - "MigrationsConfiguration": { - "description": "The configuration of the filesystem", - "type": "object", - "properties": { - "after": { - "description": "Ignore any migrations before this timestamp", - "type": [ - "integer", - "null" - ], - "format": "uint64", - "minimum": 0.0 - }, - "migrationsDir": { - "description": "The directory where the migration files are stored", - "type": [ - "string", - "null" - ] - } - }, - "additionalProperties": false - }, - "RuleConfiguration": { - "anyOf": [ - { - "$ref": "#/definitions/RulePlainConfiguration" - }, - { - "$ref": "#/definitions/RuleWithNoOptions" - } - ] - }, - "RulePlainConfiguration": { - "type": "string", - "enum": [ - "warn", - "error", - "info", - "off" - ] - }, - "RuleWithNoOptions": { - "type": "object", - "required": [ - "level" - ], - "properties": { - "level": { - "description": "The severity of the emitted diagnostics by the rule", - "allOf": [ - { - "$ref": "#/definitions/RulePlainConfiguration" - } - ] - } - }, - "additionalProperties": false - }, - "Rules": { - "type": "object", - "properties": { - "all": { - "description": "It enables ALL rules. The rules that belong to `nursery` won't be enabled.", - "type": [ - "boolean", - "null" - ] - }, - "recommended": { - "description": "It enables the lint rules recommended by Postgres Tools. `true` by default.", - "type": [ - "boolean", - "null" - ] - }, - "safety": { - "anyOf": [ - { - "$ref": "#/definitions/Safety" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false - }, - "Safety": { - "description": "A list of rules that belong to this group", - "type": "object", - "properties": { - "addingRequiredField": { - "description": "Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required.", - "anyOf": [ - { - "$ref": "#/definitions/RuleConfiguration" - }, - { - "type": "null" - } - ] - }, - "all": { - "description": "It enables ALL rules for this group.", - "type": [ - "boolean", - "null" - ] - }, - "banDropColumn": { - "description": "Dropping a column may break existing clients.", - "anyOf": [ - { - "$ref": "#/definitions/RuleConfiguration" - }, - { - "type": "null" - } - ] - }, - "banDropNotNull": { - "description": "Dropping a NOT NULL constraint may break existing clients.", - "anyOf": [ - { - "$ref": "#/definitions/RuleConfiguration" - }, - { - "type": "null" - } - ] - }, - "banDropTable": { - "description": "Dropping a table may break existing clients.", - "anyOf": [ - { - "$ref": "#/definitions/RuleConfiguration" - }, - { - "type": "null" - } - ] - }, - "recommended": { - "description": "It enables the recommended rules for this group", - "type": [ - "boolean", - "null" - ] - } - }, - "additionalProperties": false - }, - "StringSet": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "VcsClientKind": { - "oneOf": [ - { - "description": "Integration with the git client as VCS", - "type": "string", - "enum": [ - "git" - ] - } - ] - }, - "VcsConfiguration": { - "description": "Set of properties to integrate with a VCS software.", - "type": "object", - "properties": { - "clientKind": { - "description": "The kind of client.", - "anyOf": [ - { - "$ref": "#/definitions/VcsClientKind" - }, - { - "type": "null" - } - ] - }, - "defaultBranch": { - "description": "The main branch of the project", - "type": [ - "string", - "null" - ] - }, - "enabled": { - "description": "Whether we should integrate itself with the VCS client", - "type": [ - "boolean", - "null" - ] - }, - "root": { - "description": "The folder where we should check for VCS files. By default, we will use the same folder where `postgrestools.jsonc` was found.\n\nIf we can't find the configuration, it will attempt to use the current working directory. If no current working directory can't be found, we won't use the VCS integration, and a diagnostic will be emitted", - "type": [ - "string", - "null" - ] - }, - "useIgnoreFile": { - "description": "Whether we should use the VCS ignore file. When [true], we will ignore the files specified in the ignore file.", - "type": [ - "boolean", - "null" - ] - } - }, - "additionalProperties": false - } - } -} \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 3597e08c..464aab00 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -28,12 +28,4 @@ plugins: markdown_extensions: - admonition - # - pymdownx.highlight: - # anchor_linenums: true - # line_spans: __span - # pygments_lang_class: true - # - pymdownx.inlinehilite - # - pymdownx.snippets - # - pymdownx.superfences - # - pymdownx.tabbed: - # alternate_style: true + - mike diff --git a/pyproject.toml b/pyproject.toml index 73ee0fa8..41317471 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,6 +5,7 @@ description = "A collection of language tools and a Language Server Protocol (LS readme = "README.md" requires-python = ">=3.13" dependencies = [ + "mike>=2.1.3", "mkdocs>=1.6.1", "mkdocs-github-admonitions-plugin>=0.0.3", ] From 822b626388a93c4dc9ab1f177b4c187a7f213695 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Thu, 5 Jun 2025 09:28:32 +0200 Subject: [PATCH 066/114] fix: mike is a plugin (#418) --- mkdocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index 464aab00..b4520ce4 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -25,7 +25,7 @@ nav: plugins: - gh-admonitions + - mike markdown_extensions: - admonition - - mike From 3ce7496b9763ec6c0ca70c7419b2129e5a513c0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Thu, 5 Jun 2025 09:35:45 +0200 Subject: [PATCH 067/114] fix/mike 2 (#419) - **fix: mike is a plugin** - **fix: mike set-default** --- .github/workflows/deploy_docs.yml | 5 + uv.lock | 221 ++++++++++++++++++++---------- 2 files changed, 150 insertions(+), 76 deletions(-) diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml index 462c0e5d..12edda0c 100644 --- a/.github/workflows/deploy_docs.yml +++ b/.github/workflows/deploy_docs.yml @@ -4,6 +4,9 @@ on: push: branches: - main + # paths: + # - "docs/**" + # - "mkdocs.yml" release: types: [published] @@ -40,8 +43,10 @@ jobs: run: | if [ "${{ github.event_name }}" = "release" ]; then uv run mike deploy --push --update-aliases ${{ github.event.release.tag_name }} latest + uv run mike set-default --push ${{ github.event.release.tag_name }} else uv run mike deploy --push main + uv run mike set-default --push main fi deploy: diff --git a/uv.lock b/uv.lock index 8ec36cbc..0f36c084 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 1 +revision = 2 requires-python = ">=3.13" [[package]] @@ -9,18 +9,18 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 } +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 }, + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, ] [[package]] name = "colorama" version = "0.4.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] [[package]] @@ -30,9 +30,30 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "python-dateutil" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d9/29/d40217cbe2f6b1359e00c6c307bb3fc876ba74068cbab3dde77f03ca0dc4/ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343", size = 10943 } +sdist = { url = "https://files.pythonhosted.org/packages/d9/29/d40217cbe2f6b1359e00c6c307bb3fc876ba74068cbab3dde77f03ca0dc4/ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343", size = 10943, upload-time = "2022-05-02T15:47:16.11Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f7/ec/67fbef5d497f86283db54c22eec6f6140243aae73265799baaaa19cd17fb/ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619", size = 11034 }, + { url = "https://files.pythonhosted.org/packages/f7/ec/67fbef5d497f86283db54c22eec6f6140243aae73265799baaaa19cd17fb/ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619", size = 11034, upload-time = "2022-05-02T15:47:14.552Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "importlib-resources" +version = "6.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/8c/f834fbf984f691b4f7ff60f50b514cc3de5cc08abfc3295564dd89c5e2e7/importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c", size = 44693, upload-time = "2025-01-03T18:51:56.698Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/ed/1f1afb2e9e7f38a545d628f864d562a5ae64fe6f7a10e28ffb9b185b4e89/importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec", size = 37461, upload-time = "2025-01-03T18:51:54.306Z" }, ] [[package]] @@ -42,55 +63,74 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markupsafe" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/af/92/b3130cbbf5591acf9ade8708c365f3238046ac7cb8ccba6e81abccb0ccff/jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb", size = 244674 } +sdist = { url = "https://files.pythonhosted.org/packages/af/92/b3130cbbf5591acf9ade8708c365f3238046ac7cb8ccba6e81abccb0ccff/jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb", size = 244674, upload-time = "2024-12-21T18:30:22.828Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bd/0f/2ba5fbcd631e3e88689309dbe978c5769e883e4b84ebfe7da30b43275c5a/jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb", size = 134596 }, + { url = "https://files.pythonhosted.org/packages/bd/0f/2ba5fbcd631e3e88689309dbe978c5769e883e4b84ebfe7da30b43275c5a/jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb", size = 134596, upload-time = "2024-12-21T18:30:19.133Z" }, ] [[package]] name = "markdown" version = "3.7" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/54/28/3af612670f82f4c056911fbbbb42760255801b3068c48de792d354ff4472/markdown-3.7.tar.gz", hash = "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2", size = 357086 } +sdist = { url = "https://files.pythonhosted.org/packages/54/28/3af612670f82f4c056911fbbbb42760255801b3068c48de792d354ff4472/markdown-3.7.tar.gz", hash = "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2", size = 357086, upload-time = "2024-08-16T15:55:17.812Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3f/08/83871f3c50fc983b88547c196d11cf8c3340e37c32d2e9d6152abe2c61f7/Markdown-3.7-py3-none-any.whl", hash = "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803", size = 106349 }, + { url = "https://files.pythonhosted.org/packages/3f/08/83871f3c50fc983b88547c196d11cf8c3340e37c32d2e9d6152abe2c61f7/Markdown-3.7-py3-none-any.whl", hash = "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803", size = 106349, upload-time = "2024-08-16T15:55:16.176Z" }, ] [[package]] name = "markupsafe" version = "3.0.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, - { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, - { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, - { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, - { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, - { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, - { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, - { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, - { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, - { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, - { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, - { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, - { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, - { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, - { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, - { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, - { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, - { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, - { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, - { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" }, ] [[package]] name = "mergedeep" version = "1.3.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3a/41/580bb4006e3ed0361b8151a01d324fb03f420815446c7def45d02f74c270/mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8", size = 4661 } +sdist = { url = "https://files.pythonhosted.org/packages/3a/41/580bb4006e3ed0361b8151a01d324fb03f420815446c7def45d02f74c270/mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8", size = 4661, upload-time = "2021-02-05T18:55:30.623Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/19/04f9b178c2d8a15b076c8b5140708fa6ffc5601fb6f1e975537072df5b2a/mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307", size = 6354 }, + { url = "https://files.pythonhosted.org/packages/2c/19/04f9b178c2d8a15b076c8b5140708fa6ffc5601fb6f1e975537072df5b2a/mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307", size = 6354, upload-time = "2021-02-05T18:55:29.583Z" }, +] + +[[package]] +name = "mike" +version = "2.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "importlib-resources" }, + { name = "jinja2" }, + { name = "mkdocs" }, + { name = "pyparsing" }, + { name = "pyyaml" }, + { name = "pyyaml-env-tag" }, + { name = "verspec" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/f7/2933f1a1fb0e0f077d5d6a92c6c7f8a54e6128241f116dff4df8b6050bbf/mike-2.1.3.tar.gz", hash = "sha256:abd79b8ea483fb0275b7972825d3082e5ae67a41820f8d8a0dc7a3f49944e810", size = 38119, upload-time = "2024-08-13T05:02:14.167Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/1a/31b7cd6e4e7a02df4e076162e9783620777592bea9e4bb036389389af99d/mike-2.1.3-py3-none-any.whl", hash = "sha256:d90c64077e84f06272437b464735130d380703a76a5738b152932884c60c062a", size = 33754, upload-time = "2024-08-13T05:02:12.515Z" }, ] [[package]] @@ -112,9 +152,9 @@ dependencies = [ { name = "pyyaml-env-tag" }, { name = "watchdog" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bc/c6/bbd4f061bd16b378247f12953ffcb04786a618ce5e904b8c5a01a0309061/mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2", size = 3889159 } +sdist = { url = "https://files.pythonhosted.org/packages/bc/c6/bbd4f061bd16b378247f12953ffcb04786a618ce5e904b8c5a01a0309061/mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2", size = 3889159, upload-time = "2024-08-30T12:24:06.899Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/22/5b/dbc6a8cddc9cfa9c4971d59fb12bb8d42e161b7e7f8cc89e49137c5b279c/mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e", size = 3864451 }, + { url = "https://files.pythonhosted.org/packages/22/5b/dbc6a8cddc9cfa9c4971d59fb12bb8d42e161b7e7f8cc89e49137c5b279c/mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e", size = 3864451, upload-time = "2024-08-30T12:24:05.054Z" }, ] [[package]] @@ -126,9 +166,9 @@ dependencies = [ { name = "platformdirs" }, { name = "pyyaml" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/98/f5/ed29cd50067784976f25ed0ed6fcd3c2ce9eb90650aa3b2796ddf7b6870b/mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c", size = 10239 } +sdist = { url = "https://files.pythonhosted.org/packages/98/f5/ed29cd50067784976f25ed0ed6fcd3c2ce9eb90650aa3b2796ddf7b6870b/mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c", size = 10239, upload-time = "2023-11-20T17:51:09.981Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/d4/029f984e8d3f3b6b726bd33cafc473b75e9e44c0f7e80a5b29abc466bdea/mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134", size = 9521 }, + { url = "https://files.pythonhosted.org/packages/9f/d4/029f984e8d3f3b6b726bd33cafc473b75e9e44c0f7e80a5b29abc466bdea/mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134", size = 9521, upload-time = "2023-11-20T17:51:08.587Z" }, ] [[package]] @@ -138,36 +178,36 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mkdocs" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/65/13/a2b2b81604481569982fdaf51f0746f320df303efbd13d7b74fbf7b2c3a4/mkdocs_github_admonitions_plugin-0.0.3.tar.gz", hash = "sha256:4fd3ca88157c18c5f0cc4420c1a7f73ed1ed3f1886f41d6ce869932e90f38c48", size = 3998 } +sdist = { url = "https://files.pythonhosted.org/packages/65/13/a2b2b81604481569982fdaf51f0746f320df303efbd13d7b74fbf7b2c3a4/mkdocs_github_admonitions_plugin-0.0.3.tar.gz", hash = "sha256:4fd3ca88157c18c5f0cc4420c1a7f73ed1ed3f1886f41d6ce869932e90f38c48", size = 3998, upload-time = "2024-10-23T19:30:39.095Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d2/87/650f2cbd07f142034d84357ac651586748032546287ba70e90244180b92c/mkdocs_github_admonitions_plugin-0.0.3-py3-none-any.whl", hash = "sha256:cb06f56e5b51e5d7b22fcbb4ab632079e3082b7f37bdbeb20cc9fd8a7c5e1657", size = 5043 }, + { url = "https://files.pythonhosted.org/packages/d2/87/650f2cbd07f142034d84357ac651586748032546287ba70e90244180b92c/mkdocs_github_admonitions_plugin-0.0.3-py3-none-any.whl", hash = "sha256:cb06f56e5b51e5d7b22fcbb4ab632079e3082b7f37bdbeb20cc9fd8a7c5e1657", size = 5043, upload-time = "2024-10-23T19:30:38.195Z" }, ] [[package]] name = "packaging" version = "24.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950, upload-time = "2024-11-08T09:47:47.202Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" }, ] [[package]] name = "pathspec" version = "0.12.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043 } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191 }, + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, ] [[package]] name = "platformdirs" version = "4.3.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302 } +sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302, upload-time = "2024-09-17T19:06:50.688Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 }, + { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439, upload-time = "2024-09-17T19:06:49.212Z" }, ] [[package]] @@ -175,16 +215,27 @@ name = "postgrestools" version = "0.1.0" source = { virtual = "." } dependencies = [ + { name = "mike" }, { name = "mkdocs" }, { name = "mkdocs-github-admonitions-plugin" }, ] [package.metadata] requires-dist = [ + { name = "mike", specifier = ">=2.1.3" }, { name = "mkdocs", specifier = ">=1.6.1" }, { name = "mkdocs-github-admonitions-plugin", specifier = ">=0.0.3" }, ] +[[package]] +name = "pyparsing" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/22/f1129e69d94ffff626bdb5c835506b3a5b4f3d070f17ea295e12c2c6f60f/pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be", size = 1088608, upload-time = "2025-03-25T05:01:28.114Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/e7/df2285f3d08fee213f2d041540fa4fc9ca6c2d44cf36d3a035bf2a8d2bcc/pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf", size = 111120, upload-time = "2025-03-25T05:01:24.908Z" }, +] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -192,26 +243,26 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "six" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, ] [[package]] name = "pyyaml" version = "6.0.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, - { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, - { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, - { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, - { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, - { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, - { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, - { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, - { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, ] [[package]] @@ -221,37 +272,55 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyyaml" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fb/8e/da1c6c58f751b70f8ceb1eb25bc25d524e8f14fe16edcce3f4e3ba08629c/pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb", size = 5631 } +sdist = { url = "https://files.pythonhosted.org/packages/fb/8e/da1c6c58f751b70f8ceb1eb25bc25d524e8f14fe16edcce3f4e3ba08629c/pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb", size = 5631, upload-time = "2020-11-12T02:38:26.239Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/66/bbb1dd374f5c870f59c5bb1db0e18cbe7fa739415a24cbd95b2d1f5ae0c4/pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069", size = 3911 }, + { url = "https://files.pythonhosted.org/packages/5a/66/bbb1dd374f5c870f59c5bb1db0e18cbe7fa739415a24cbd95b2d1f5ae0c4/pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069", size = 3911, upload-time = "2020-11-12T02:38:24.638Z" }, ] [[package]] name = "six" version = "1.17.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 }, + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "verspec" +version = "0.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/44/8126f9f0c44319b2efc65feaad589cadef4d77ece200ae3c9133d58464d0/verspec-0.1.0.tar.gz", hash = "sha256:c4504ca697b2056cdb4bfa7121461f5a0e81809255b41c03dda4ba823637c01e", size = 27123, upload-time = "2020-11-30T02:24:09.646Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/ce/3b6fee91c85626eaf769d617f1be9d2e15c1cca027bbdeb2e0d751469355/verspec-0.1.0-py3-none-any.whl", hash = "sha256:741877d5633cc9464c45a469ae2a31e801e6dbbaa85b9675d481cda100f11c31", size = 19640, upload-time = "2020-11-30T02:24:08.387Z" }, ] [[package]] name = "watchdog" version = "6.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220 } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" }, + { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, +] + +[[package]] +name = "zipp" +version = "3.22.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/b6/7b3d16792fdf94f146bed92be90b4eb4563569eca91513c8609aebf0c167/zipp-3.22.0.tar.gz", hash = "sha256:dd2f28c3ce4bc67507bfd3781d21b7bb2be31103b51a4553ad7d90b84e57ace5", size = 25257, upload-time = "2025-05-26T14:46:32.217Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480 }, - { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451 }, - { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057 }, - { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079 }, - { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078 }, - { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076 }, - { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077 }, - { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078 }, - { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077 }, - { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078 }, - { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065 }, - { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070 }, - { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067 }, + { url = "https://files.pythonhosted.org/packages/ad/da/f64669af4cae46f17b90798a827519ce3737d31dbafad65d391e49643dc4/zipp-3.22.0-py3-none-any.whl", hash = "sha256:fe208f65f2aca48b81f9e6fd8cf7b8b32c26375266b009b413d45306b6148343", size = 9796, upload-time = "2025-05-26T14:46:30.775Z" }, ] From 5c0e255971f2c322bab848b057fc645cc9ffdb1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Thu, 5 Jun 2025 09:37:58 +0200 Subject: [PATCH 068/114] fix(docs): make latest default after next release (#420) --- .github/workflows/deploy_docs.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml index 12edda0c..c32bc2af 100644 --- a/.github/workflows/deploy_docs.yml +++ b/.github/workflows/deploy_docs.yml @@ -43,10 +43,9 @@ jobs: run: | if [ "${{ github.event_name }}" = "release" ]; then uv run mike deploy --push --update-aliases ${{ github.event.release.tag_name }} latest - uv run mike set-default --push ${{ github.event.release.tag_name }} + uv run mike set-default --push latest else uv run mike deploy --push main - uv run mike set-default --push main fi deploy: From d7ad288e8d72ed7cc2e223c4ec002144eeb7d48a Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Thu, 5 Jun 2025 17:47:44 +0200 Subject: [PATCH 069/114] fix(completions): convert SQL to lowercase (#416) --- crates/pgt_completions/src/sanitization.rs | 39 +++++++++++++++++++--- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index 40dea7e6..154998e7 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -48,7 +48,8 @@ impl<'larger, 'smaller> From> for SanitizedCompletionP where 'larger: 'smaller, { - fn from(params: CompletionParams<'larger>) -> Self { + fn from(mut params: CompletionParams<'larger>) -> Self { + params.text = params.text.to_ascii_lowercase(); if cursor_inbetween_nodes(¶ms.text, params.position) || cursor_prepared_to_write_token_after_last_node(¶ms.text, params.position) || cursor_before_semicolon(params.tree, params.position) @@ -263,13 +264,43 @@ fn cursor_between_parentheses(sql: &str, position: TextSize) -> bool { #[cfg(test)] mod tests { + use pgt_schema_cache::SchemaCache; use pgt_text_size::TextSize; - use crate::sanitization::{ - cursor_before_semicolon, cursor_between_parentheses, cursor_inbetween_nodes, - cursor_on_a_dot, cursor_prepared_to_write_token_after_last_node, + use crate::{ + CompletionParams, SanitizedCompletionParams, + sanitization::{ + cursor_before_semicolon, cursor_between_parentheses, cursor_inbetween_nodes, + cursor_on_a_dot, cursor_prepared_to_write_token_after_last_node, + }, }; + #[test] + fn should_lowercase_everything_except_replaced_token() { + let input = "SELECT FROM users WHERE ts = NOW();"; + + let position = TextSize::new(7); + let cache = SchemaCache::default(); + + let mut ts = tree_sitter::Parser::new(); + ts.set_language(tree_sitter_sql::language()).unwrap(); + let tree = ts.parse(input, None).unwrap(); + + let params = CompletionParams { + position, + schema: &cache, + text: input.into(), + tree: &tree, + }; + + let sanitized = SanitizedCompletionParams::from(params); + + assert_eq!( + sanitized.text, + "select REPLACED_TOKEN from users where ts = now();" + ); + } + #[test] fn test_cursor_inbetween_nodes() { // note: two spaces between select and from. From 513c73c5cf2eda365a50e2425fef1625fe0ec535 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Fri, 6 Jun 2025 08:47:21 +0200 Subject: [PATCH 070/114] feat(completions): alter/drop/rename column statements (#421) --- crates/pgt_completions/src/context/mod.rs | 83 ++++++++++++++----- .../pgt_completions/src/providers/columns.rs | 55 ++++++++++++ .../src/relevance/filtering.rs | 29 +++++-- .../src/queries/relations.rs | 56 +++++++++++++ 4 files changed, 191 insertions(+), 32 deletions(-) diff --git a/crates/pgt_completions/src/context/mod.rs b/crates/pgt_completions/src/context/mod.rs index 0bb190a9..7006c5bf 100644 --- a/crates/pgt_completions/src/context/mod.rs +++ b/crates/pgt_completions/src/context/mod.rs @@ -31,6 +31,9 @@ pub enum WrappingClause<'a> { Insert, AlterTable, DropTable, + DropColumn, + AlterColumn, + RenameColumn, PolicyName, ToRoleAssignment, } @@ -424,7 +427,7 @@ impl<'a> CompletionContext<'a> { } "where" | "update" | "select" | "delete" | "from" | "join" | "column_definitions" - | "drop_table" | "alter_table" => { + | "drop_table" | "alter_table" | "drop_column" | "alter_column" | "rename_column" => { self.wrapping_clause_type = self.get_wrapping_clause_from_current_node(current_node, &mut cursor); } @@ -515,6 +518,8 @@ impl<'a> CompletionContext<'a> { (WrappingClause::From, &["from"]), (WrappingClause::Join { on_node: None }, &["join"]), (WrappingClause::AlterTable, &["alter", "table"]), + (WrappingClause::AlterColumn, &["alter", "table", "alter"]), + (WrappingClause::RenameColumn, &["alter", "table", "rename"]), ( WrappingClause::AlterTable, &["alter", "table", "if", "exists"], @@ -575,10 +580,54 @@ impl<'a> CompletionContext<'a> { let mut first_sibling = self.get_first_sibling(node); if let Some(clause) = self.wrapping_clause_type.as_ref() { - if clause == &WrappingClause::Insert { - while let Some(sib) = first_sibling.next_sibling() { - match sib.kind() { - "object_reference" => { + match *clause { + WrappingClause::Insert => { + while let Some(sib) = first_sibling.next_sibling() { + match sib.kind() { + "object_reference" => { + if let Some(NodeText::Original(txt)) = + self.get_ts_node_content(&sib) + { + let mut iter = txt.split('.').rev(); + let table = iter.next().unwrap().to_string(); + let schema = iter.next().map(|s| s.to_string()); + self.mentioned_relations + .entry(schema) + .and_modify(|s| { + s.insert(table.clone()); + }) + .or_insert(HashSet::from([table])); + } + } + + "column" => { + if let Some(NodeText::Original(txt)) = + self.get_ts_node_content(&sib) + { + let entry = MentionedColumn { + column: txt, + alias: None, + }; + + self.mentioned_columns + .entry(Some(WrappingClause::Insert)) + .and_modify(|s| { + s.insert(entry.clone()); + }) + .or_insert(HashSet::from([entry])); + } + } + + _ => {} + } + + first_sibling = sib; + } + } + + WrappingClause::AlterColumn => { + while let Some(sib) = first_sibling.next_sibling() { + if sib.kind() == "object_reference" { if let Some(NodeText::Original(txt)) = self.get_ts_node_content(&sib) { let mut iter = txt.split('.').rev(); let table = iter.next().unwrap().to_string(); @@ -591,27 +640,12 @@ impl<'a> CompletionContext<'a> { .or_insert(HashSet::from([table])); } } - "column" => { - if let Some(NodeText::Original(txt)) = self.get_ts_node_content(&sib) { - let entry = MentionedColumn { - column: txt, - alias: None, - }; - self.mentioned_columns - .entry(Some(WrappingClause::Insert)) - .and_modify(|s| { - s.insert(entry.clone()); - }) - .or_insert(HashSet::from([entry])); - } - } - - _ => {} + first_sibling = sib; } - - first_sibling = sib; } + + _ => {} } } } @@ -628,6 +662,9 @@ impl<'a> CompletionContext<'a> { "delete" => Some(WrappingClause::Delete), "from" => Some(WrappingClause::From), "drop_table" => Some(WrappingClause::DropTable), + "drop_column" => Some(WrappingClause::DropColumn), + "alter_column" => Some(WrappingClause::AlterColumn), + "rename_column" => Some(WrappingClause::RenameColumn), "alter_table" => Some(WrappingClause::AlterTable), "column_definitions" => Some(WrappingClause::ColumnDefinitions), "insert" => Some(WrappingClause::Insert), diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index b1dcbdf7..4299973b 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -762,4 +762,59 @@ mod tests { ) .await; } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_columns_in_alter_table_and_drop_table(pool: PgPool) { + let setup = r#" + create table instruments ( + id bigint primary key generated always as identity, + name text not null, + z text, + created_at timestamp with time zone default now() + ); + + create table others ( + a text, + b text, + c text + ); + "#; + + pool.execute(setup).await.unwrap(); + + let queries = vec![ + format!("alter table instruments drop column {}", CURSOR_POS), + format!( + "alter table instruments drop column if exists {}", + CURSOR_POS + ), + format!( + "alter table instruments alter column {} set default", + CURSOR_POS + ), + format!("alter table instruments alter {} set default", CURSOR_POS), + format!("alter table public.instruments alter column {}", CURSOR_POS), + format!("alter table instruments alter {}", CURSOR_POS), + format!("alter table instruments rename {} to new_col", CURSOR_POS), + format!( + "alter table public.instruments rename column {} to new_col", + CURSOR_POS + ), + ]; + + for query in queries { + assert_complete_results( + query.as_str(), + vec![ + CompletionAssertion::Label("created_at".into()), + CompletionAssertion::Label("id".into()), + CompletionAssertion::Label("name".into()), + CompletionAssertion::Label("z".into()), + ], + None, + &pool, + ) + .await; + } + } } diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index 0be9e48a..ea681bd7 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -74,9 +74,13 @@ impl CompletionFilter<'_> { .map(|clause| { match self.data { CompletionRelevanceData::Table(_) => match clause { - WrappingClause::Select - | WrappingClause::Where - | WrappingClause::ColumnDefinitions => false, + WrappingClause::From | WrappingClause::Update => true, + + WrappingClause::Join { on_node: None } => true, + WrappingClause::Join { on_node: Some(on) } => ctx + .node_under_cursor + .as_ref() + .is_some_and(|cn| cn.start_byte() < on.end_byte()), WrappingClause::Insert => { ctx.wrapping_node_kind @@ -94,15 +98,22 @@ impl CompletionFilter<'_> { "keyword_table", ]), - _ => true, + _ => false, }, CompletionRelevanceData::Column(_) => { match clause { - WrappingClause::From - | WrappingClause::ColumnDefinitions - | WrappingClause::AlterTable - | WrappingClause::DropTable => false, + WrappingClause::Select + | WrappingClause::Update + | WrappingClause::Delete + | WrappingClause::DropColumn => true, + + WrappingClause::RenameColumn => ctx + .before_cursor_matches_kind(&["keyword_rename", "keyword_column"]), + + WrappingClause::AlterColumn => { + ctx.before_cursor_matches_kind(&["keyword_alter", "keyword_column"]) + } // We can complete columns in JOIN cluases, but only if we are after the // ON node in the "ON u.id = posts.user_id" part. @@ -126,7 +137,7 @@ impl CompletionFilter<'_> { && ctx.parent_matches_one_of_kind(&["field"])) } - _ => true, + _ => false, } } diff --git a/crates/pgt_treesitter_queries/src/queries/relations.rs b/crates/pgt_treesitter_queries/src/queries/relations.rs index 38fd0513..2d7e4431 100644 --- a/crates/pgt_treesitter_queries/src/queries/relations.rs +++ b/crates/pgt_treesitter_queries/src/queries/relations.rs @@ -22,6 +22,16 @@ static TS_QUERY: LazyLock = LazyLock::new(|| { (identifier)? @table )+ ) + (alter_table + (keyword_alter) + (keyword_table) + (object_reference + . + (identifier) @schema_or_table + "."? + (identifier)? @table + )+ + ) "#; tree_sitter::Query::new(tree_sitter_sql::language(), QUERY_STR).expect("Invalid TS Query") }); @@ -196,4 +206,50 @@ mod tests { assert_eq!(results[0].get_schema(sql), None); assert_eq!(results[0].get_table(sql), "users"); } + + #[test] + fn finds_alter_table_with_schema() { + let sql = r#"alter table public.users alter some_col set default 15;"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&RelationMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].get_schema(sql), Some("public".into())); + assert_eq!(results[0].get_table(sql), "users"); + } + + #[test] + fn finds_alter_table_without_schema() { + let sql = r#"alter table users alter some_col set default 15;"#; + + let mut parser = tree_sitter::Parser::new(); + parser.set_language(tree_sitter_sql::language()).unwrap(); + + let tree = parser.parse(sql, None).unwrap(); + + let mut executor = TreeSitterQueriesExecutor::new(tree.root_node(), sql); + + executor.add_query_results::(); + + let results: Vec<&RelationMatch> = executor + .get_iter(None) + .filter_map(|q| q.try_into().ok()) + .collect(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].get_schema(sql), None); + assert_eq!(results[0].get_table(sql), "users"); + } } From ad22ef59f25bffc52ae7724025e048e323943006 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 10 Jun 2025 16:37:35 +0200 Subject: [PATCH 071/114] feat(compl): complete in (simple) function bodies (#426) --- .../pgt_workspace/src/features/completions.rs | 34 +++++++++++++++++-- .../workspace/server/statement_identifier.rs | 11 ++++++ 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/crates/pgt_workspace/src/features/completions.rs b/crates/pgt_workspace/src/features/completions.rs index 85342183..53eb9eab 100644 --- a/crates/pgt_workspace/src/features/completions.rs +++ b/crates/pgt_workspace/src/features/completions.rs @@ -49,7 +49,7 @@ pub(crate) fn get_statement_for_completions( if count == 1 { eligible_statements.next() } else { - let mut prev_stmt = None; + let mut prev_stmt: Option<(StatementId, TextRange, String, Arc)> = None; for current_stmt in eligible_statements { /* @@ -57,10 +57,16 @@ pub(crate) fn get_statement_for_completions( * with the next one. * * select 1 |select 1; + * + * This is however ok if the current statement is a child of the previous one, + * such as in CREATE FUNCTION bodies. */ - if prev_stmt.is_some_and(|_| current_stmt.1.contains(position)) { + if prev_stmt.is_some_and(|prev| { + current_stmt.1.contains(position) && !current_stmt.0.is_child_of(&prev.0) + }) { return None; } + prev_stmt = Some(current_stmt) } @@ -162,6 +168,30 @@ mod tests { assert_eq!(text, "select * from") } + #[test] + fn identifies_nested_stmts() { + let sql = format!( + r#" + create or replace function one() + returns integer + language sql + as $$ + select {} from cool; + $$; + "#, + CURSOR_POSITION + ); + + let sql = sql.trim(); + + let (doc, position) = get_doc_and_pos(sql); + + let (_, _, text, _) = + get_statement_for_completions(&doc, position).expect("Expected Statement"); + + assert_eq!(text.trim(), "select from cool;") + } + #[test] fn does_not_consider_too_far_offset() { let sql = format!("select * from {}", CURSOR_POSITION); diff --git a/crates/pgt_workspace/src/workspace/server/statement_identifier.rs b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs index 7c7d76f0..627ff261 100644 --- a/crates/pgt_workspace/src/workspace/server/statement_identifier.rs +++ b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs @@ -66,6 +66,17 @@ impl StatementId { matches!(self, StatementId::Child(_)) } + pub fn is_child_of(&self, maybe_parent: &StatementId) -> bool { + match self { + StatementId::Root(_) => false, + StatementId::Child(child_root) => match maybe_parent { + StatementId::Root(parent_rood) => child_root == parent_rood, + // TODO: can we have multiple nested statements? + StatementId::Child(_) => false, + }, + } + } + pub fn parent(&self) -> Option { match self { StatementId::Root(_) => None, From 9144ea1dd77820fec0ad982d7087aeec3afd5e97 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 10 Jun 2025 16:49:12 +0200 Subject: [PATCH 072/114] fix(splitter): split grant stmts (#425) --- crates/pgt_statement_splitter/src/lib.rs | 25 +++++++++++++++---- .../src/parser/common.rs | 22 ++++++++++++++++ .../tests/data/grant_statements__4.sql | 11 ++++++++ 3 files changed, 53 insertions(+), 5 deletions(-) create mode 100644 crates/pgt_statement_splitter/tests/data/grant_statements__4.sql diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index e43a1095..c53ae78c 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -45,8 +45,9 @@ mod tests { assert_eq!( self.parse.ranges.len(), expected.len(), - "Expected {} statements, got {}: {:?}", + "Expected {} statements for input {}, got {}: {:?}", expected.len(), + self.input, self.parse.ranges.len(), self.parse .ranges @@ -114,10 +115,24 @@ mod tests { #[test] fn grant() { - Tester::from("GRANT SELECT ON TABLE \"public\".\"my_table\" TO \"my_role\";") - .expect_statements(vec![ - "GRANT SELECT ON TABLE \"public\".\"my_table\" TO \"my_role\";", - ]); + let stmts = vec![ + "GRANT SELECT ON TABLE \"public\".\"my_table\" TO \"my_role\";", + "GRANT UPDATE ON TABLE \"public\".\"my_table\" TO \"my_role\";", + "GRANT DELETE ON TABLE \"public\".\"my_table\" TO \"my_role\";", + "GRANT INSERT ON TABLE \"public\".\"my_table\" TO \"my_role\";", + "GRANT CREATE ON SCHEMA \"public\" TO \"my_role\";", + "GRANT ALL PRIVILEGES ON DATABASE \"my_database\" TO \"my_role\";", + "GRANT USAGE ON SCHEMA \"public\" TO \"my_role\";", + "GRANT EXECUTE ON FUNCTION \"public\".\"my_function\"() TO \"my_role\";", + "GRANT REFERENCES ON TABLE \"public\".\"my_table\" TO \"my_role\";", + "GRANT SELECT, UPDATE ON ALL TABLES IN SCHEMA \"public\" TO \"my_role\";", + "GRANT SELECT, INSERT ON public.users TO anon WITH GRANT OPION GRANTED BY owner;", + "GRANT owner, admin to anon WITH ADMIN;", + ]; + + for stmt in stmts { + Tester::from(stmt).expect_statements(vec![stmt]); + } } #[test] diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index a5d68df1..2498c04a 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -217,6 +217,7 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { SyntaxKind::Except, // for grant SyntaxKind::Grant, + SyntaxKind::Ascii44, ] .iter() .all(|x| Some(x) != prev.as_ref()) @@ -246,6 +247,7 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { SyntaxKind::Instead, // for grant SyntaxKind::Grant, + SyntaxKind::Ascii44, ] .iter() .all(|x| Some(x) != prev.as_ref()) @@ -263,6 +265,10 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { SyntaxKind::Check, // TIMESTAMP WITH TIME ZONE should not start a new statement SyntaxKind::Time, + SyntaxKind::Grant, + SyntaxKind::Admin, + SyntaxKind::Inherit, + SyntaxKind::Set, ] .iter() .all(|x| Some(x) != next.as_ref()) @@ -271,6 +277,22 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { } p.advance(); } + + Some(SyntaxKind::Create) => { + let prev = p.look_back().map(|t| t.kind); + if [ + // for grant + SyntaxKind::Grant, + SyntaxKind::Ascii44, + ] + .iter() + .all(|x| Some(x) != prev.as_ref()) + { + break; + } + + p.advance(); + } Some(_) => { break; } diff --git a/crates/pgt_statement_splitter/tests/data/grant_statements__4.sql b/crates/pgt_statement_splitter/tests/data/grant_statements__4.sql new file mode 100644 index 00000000..5811810e --- /dev/null +++ b/crates/pgt_statement_splitter/tests/data/grant_statements__4.sql @@ -0,0 +1,11 @@ +GRANT CREATE ON SCHEMA public TO anon; + +GRANT SELECT, INSERT ON public.users TO anon WITH GRANT OPTION GRANTED BY Owner; + +GRANT read_access, write_access TO user_role + WITH INHERIT TRUE + GRANTED BY security_admin; + +GRANT manager_role TO employee_role + WITH ADMIN OPTION + GRANTED BY admin_role; From 895e14c7c7b9c083464067f4eda34fd042e4d498 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 10 Jun 2025 18:21:46 +0200 Subject: [PATCH 073/114] feat(completions): complete roles (#410) --- crates/pgt_completions/src/complete.rs | 4 +- .../src/context/base_parser.rs | 206 +++++++++ .../src/context/grant_parser.rs | 415 ++++++++++++++++++ crates/pgt_completions/src/context/mod.rs | 79 +++- .../src/context/policy_parser.rs | 245 +++-------- .../src/context/revoke_parser.rs | 339 ++++++++++++++ crates/pgt_completions/src/item.rs | 2 + crates/pgt_completions/src/providers/mod.rs | 2 + crates/pgt_completions/src/providers/roles.rs | 291 ++++++++++++ crates/pgt_completions/src/relevance.rs | 1 + .../src/relevance/filtering.rs | 15 +- .../pgt_completions/src/relevance/scoring.rs | 60 ++- crates/pgt_completions/src/sanitization.rs | 1 + crates/pgt_lsp/src/handlers/completions.rs | 1 + crates/pgt_lsp/src/session.rs | 3 + crates/pgt_workspace/src/settings.rs | 39 -- 16 files changed, 1458 insertions(+), 245 deletions(-) create mode 100644 crates/pgt_completions/src/context/base_parser.rs create mode 100644 crates/pgt_completions/src/context/grant_parser.rs create mode 100644 crates/pgt_completions/src/context/revoke_parser.rs create mode 100644 crates/pgt_completions/src/providers/roles.rs diff --git a/crates/pgt_completions/src/complete.rs b/crates/pgt_completions/src/complete.rs index 5bc5d41c..bd5efd19 100644 --- a/crates/pgt_completions/src/complete.rs +++ b/crates/pgt_completions/src/complete.rs @@ -5,7 +5,8 @@ use crate::{ context::CompletionContext, item::CompletionItem, providers::{ - complete_columns, complete_functions, complete_policies, complete_schemas, complete_tables, + complete_columns, complete_functions, complete_policies, complete_roles, complete_schemas, + complete_tables, }, sanitization::SanitizedCompletionParams, }; @@ -36,6 +37,7 @@ pub fn complete(params: CompletionParams) -> Vec { complete_columns(&ctx, &mut builder); complete_schemas(&ctx, &mut builder); complete_policies(&ctx, &mut builder); + complete_roles(&ctx, &mut builder); builder.finish() } diff --git a/crates/pgt_completions/src/context/base_parser.rs b/crates/pgt_completions/src/context/base_parser.rs new file mode 100644 index 00000000..93333679 --- /dev/null +++ b/crates/pgt_completions/src/context/base_parser.rs @@ -0,0 +1,206 @@ +use std::iter::Peekable; + +use pgt_text_size::{TextRange, TextSize}; + +pub(crate) struct TokenNavigator { + tokens: Peekable>, + pub previous_token: Option, + pub current_token: Option, +} + +impl TokenNavigator { + pub(crate) fn next_matches(&mut self, options: &[&str]) -> bool { + self.tokens + .peek() + .is_some_and(|c| options.contains(&c.get_word_without_quotes().as_str())) + } + + pub(crate) fn prev_matches(&self, options: &[&str]) -> bool { + self.previous_token + .as_ref() + .is_some_and(|t| options.contains(&t.get_word_without_quotes().as_str())) + } + + pub(crate) fn advance(&mut self) -> Option { + // we can't peek back n an iterator, so we'll have to keep track manually. + self.previous_token = self.current_token.take(); + self.current_token = self.tokens.next(); + self.current_token.clone() + } +} + +impl From> for TokenNavigator { + fn from(tokens: Vec) -> Self { + TokenNavigator { + tokens: tokens.into_iter().peekable(), + previous_token: None, + current_token: None, + } + } +} + +pub(crate) trait CompletionStatementParser: Sized { + type Context: Default; + const NAME: &'static str; + + fn looks_like_matching_stmt(sql: &str) -> bool; + fn parse(self) -> Self::Context; + fn make_parser(tokens: Vec, cursor_position: usize) -> Self; + + fn get_context(sql: &str, cursor_position: usize) -> Self::Context { + assert!( + Self::looks_like_matching_stmt(sql), + "Using {} for a wrong statement! Developer Error!", + Self::NAME + ); + + match sql_to_words(sql) { + Ok(tokens) => { + let parser = Self::make_parser(tokens, cursor_position); + parser.parse() + } + Err(_) => Self::Context::default(), + } + } +} + +pub(crate) fn schema_and_table_name(token: &WordWithIndex) -> (String, Option) { + let word = token.get_word_without_quotes(); + let mut parts = word.split('.'); + + ( + parts.next().unwrap().into(), + parts.next().map(|tb| tb.into()), + ) +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct WordWithIndex { + word: String, + start: usize, + end: usize, +} + +impl WordWithIndex { + pub(crate) fn is_under_cursor(&self, cursor_pos: usize) -> bool { + self.start <= cursor_pos && self.end > cursor_pos + } + + pub(crate) fn get_range(&self) -> TextRange { + let start: u32 = self.start.try_into().expect("Text too long"); + let end: u32 = self.end.try_into().expect("Text too long"); + TextRange::new(TextSize::from(start), TextSize::from(end)) + } + + pub(crate) fn get_word_without_quotes(&self) -> String { + self.word.replace('"', "") + } + + pub(crate) fn get_word(&self) -> String { + self.word.clone() + } +} + +/// Note: A policy name within quotation marks will be considered a single word. +pub(crate) fn sql_to_words(sql: &str) -> Result, String> { + let mut words = vec![]; + + let mut start_of_word: Option = None; + let mut current_word = String::new(); + let mut in_quotation_marks = false; + + for (current_position, current_char) in sql.char_indices() { + if (current_char.is_ascii_whitespace() || current_char == ';') + && !current_word.is_empty() + && start_of_word.is_some() + && !in_quotation_marks + { + words.push(WordWithIndex { + word: current_word, + start: start_of_word.unwrap(), + end: current_position, + }); + + current_word = String::new(); + start_of_word = None; + } else if (current_char.is_ascii_whitespace() || current_char == ';') + && current_word.is_empty() + { + // do nothing + } else if current_char == '"' && start_of_word.is_none() { + in_quotation_marks = true; + current_word.push(current_char); + start_of_word = Some(current_position); + } else if current_char == '"' && start_of_word.is_some() { + current_word.push(current_char); + in_quotation_marks = false; + } else if start_of_word.is_some() { + current_word.push(current_char) + } else { + start_of_word = Some(current_position); + current_word.push(current_char); + } + } + + if let Some(start_of_word) = start_of_word { + if !current_word.is_empty() { + words.push(WordWithIndex { + word: current_word, + start: start_of_word, + end: sql.len(), + }); + } + } + + if in_quotation_marks { + Err("String was not closed properly.".into()) + } else { + Ok(words) + } +} + +#[cfg(test)] +mod tests { + use crate::context::base_parser::{WordWithIndex, sql_to_words}; + + #[test] + fn determines_positions_correctly() { + let query = "\ncreate policy \"my cool pol\"\n\ton auth.users\n\tas permissive\n\tfor select\n\t\tto public\n\t\tusing (true);".to_string(); + + let words = sql_to_words(query.as_str()).unwrap(); + + assert_eq!(words[0], to_word("create", 1, 7)); + assert_eq!(words[1], to_word("policy", 8, 14)); + assert_eq!(words[2], to_word("\"my cool pol\"", 15, 28)); + assert_eq!(words[3], to_word("on", 30, 32)); + assert_eq!(words[4], to_word("auth.users", 33, 43)); + assert_eq!(words[5], to_word("as", 45, 47)); + assert_eq!(words[6], to_word("permissive", 48, 58)); + assert_eq!(words[7], to_word("for", 60, 63)); + assert_eq!(words[8], to_word("select", 64, 70)); + assert_eq!(words[9], to_word("to", 73, 75)); + assert_eq!(words[10], to_word("public", 78, 84)); + assert_eq!(words[11], to_word("using", 87, 92)); + assert_eq!(words[12], to_word("(true)", 93, 99)); + } + + #[test] + fn handles_schemas_in_quotation_marks() { + let query = r#"grant select on "public"."users""#.to_string(); + + let words = sql_to_words(query.as_str()).unwrap(); + + assert_eq!(words[0], to_word("grant", 0, 5)); + assert_eq!(words[1], to_word("select", 6, 12)); + assert_eq!(words[2], to_word("on", 13, 15)); + assert_eq!(words[3], to_word(r#""public"."users""#, 16, 32)); + } + + fn to_word(word: &str, start: usize, end: usize) -> WordWithIndex { + WordWithIndex { + word: word.into(), + start, + end, + } + } +} diff --git a/crates/pgt_completions/src/context/grant_parser.rs b/crates/pgt_completions/src/context/grant_parser.rs new file mode 100644 index 00000000..14ba882a --- /dev/null +++ b/crates/pgt_completions/src/context/grant_parser.rs @@ -0,0 +1,415 @@ +use pgt_text_size::{TextRange, TextSize}; + +use crate::context::base_parser::{ + CompletionStatementParser, TokenNavigator, WordWithIndex, schema_and_table_name, +}; + +#[derive(Default, Debug, PartialEq, Eq)] +pub(crate) struct GrantContext { + pub table_name: Option, + pub schema_name: Option, + pub node_text: String, + pub node_range: TextRange, + pub node_kind: String, +} + +/// Simple parser that'll turn a policy-related statement into a context object required for +/// completions. +/// The parser will only work if the (trimmed) sql starts with `create policy`, `drop policy`, or `alter policy`. +/// It can only parse policy statements. +pub(crate) struct GrantParser { + navigator: TokenNavigator, + context: GrantContext, + cursor_position: usize, + in_roles_list: bool, +} + +impl CompletionStatementParser for GrantParser { + type Context = GrantContext; + const NAME: &'static str = "GrantParser"; + + fn looks_like_matching_stmt(sql: &str) -> bool { + let lowercased = sql.to_ascii_lowercase(); + let trimmed = lowercased.trim(); + trimmed.starts_with("grant") + } + + fn parse(mut self) -> Self::Context { + while let Some(token) = self.navigator.advance() { + if token.is_under_cursor(self.cursor_position) { + self.handle_token_under_cursor(token); + } else { + self.handle_token(token); + } + } + + self.context + } + + fn make_parser(tokens: Vec, cursor_position: usize) -> Self { + Self { + navigator: tokens.into(), + context: GrantContext::default(), + cursor_position, + in_roles_list: false, + } + } +} + +impl GrantParser { + fn handle_token_under_cursor(&mut self, token: WordWithIndex) { + if self.navigator.previous_token.is_none() { + return; + } + + let previous = self.navigator.previous_token.take().unwrap(); + let current = self + .navigator + .current_token + .as_ref() + .map(|w| w.get_word_without_quotes()); + + match previous + .get_word_without_quotes() + .to_ascii_lowercase() + .as_str() + { + "grant" => { + self.context.node_range = token.get_range(); + self.context.node_kind = "grant_role".into(); + self.context.node_text = token.get_word(); + } + "on" if !matches!(current.as_deref(), Some("table")) => self.handle_table(&token), + + "table" => { + self.handle_table(&token); + } + "to" => { + self.context.node_range = token.get_range(); + self.context.node_kind = "grant_role".into(); + self.context.node_text = token.get_word(); + } + t => { + if self.in_roles_list && t.ends_with(',') { + self.context.node_kind = "grant_role".into(); + } + + self.context.node_range = token.get_range(); + self.context.node_text = token.get_word(); + } + } + } + + fn handle_table(&mut self, token: &WordWithIndex) { + if token.get_word_without_quotes().contains('.') { + let (schema_name, table_name) = schema_and_table_name(token); + + let schema_name_len = schema_name.len(); + self.context.schema_name = Some(schema_name); + + let offset: u32 = schema_name_len.try_into().expect("Text too long"); + let range_without_schema = token + .get_range() + .checked_expand_start( + TextSize::new(offset + 1), // kill the dot as well + ) + .expect("Text too long"); + + self.context.node_range = range_without_schema; + self.context.node_kind = "grant_table".into(); + + // In practice, we should always have a table name. + // The completion sanitization will add a word after a `.` if nothing follows it; + // the token_text will then look like `schema.REPLACED_TOKEN`. + self.context.node_text = table_name.unwrap_or_default(); + } else { + self.context.node_range = token.get_range(); + self.context.node_text = token.get_word(); + self.context.node_kind = "grant_table".into(); + } + } + + fn handle_token(&mut self, token: WordWithIndex) { + match token.get_word_without_quotes().as_str() { + "on" if !self.navigator.next_matches(&[ + "table", + "schema", + "foreign", + "domain", + "sequence", + "database", + "function", + "procedure", + "routine", + "language", + "large", + "parameter", + "schema", + "tablespace", + "type", + ]) => + { + self.table_with_schema() + } + "table" => self.table_with_schema(), + + "to" => { + self.in_roles_list = true; + } + + t => { + if self.in_roles_list && !t.ends_with(',') { + self.in_roles_list = false; + } + } + } + } + + fn table_with_schema(&mut self) { + if let Some(token) = self.navigator.advance() { + if token.is_under_cursor(self.cursor_position) { + self.handle_token_under_cursor(token); + } else if token.get_word_without_quotes().contains('.') { + let (schema, maybe_table) = schema_and_table_name(&token); + self.context.schema_name = Some(schema); + self.context.table_name = maybe_table; + } else { + self.context.table_name = Some(token.get_word()); + } + }; + } +} + +#[cfg(test)] +mod tests { + use pgt_text_size::{TextRange, TextSize}; + + use crate::{ + context::base_parser::CompletionStatementParser, + context::grant_parser::{GrantContext, GrantParser}, + test_helper::CURSOR_POS, + }; + + fn with_pos(query: String) -> (usize, String) { + let mut pos: Option = None; + + for (p, c) in query.char_indices() { + if c == CURSOR_POS { + pos = Some(p); + break; + } + } + + ( + pos.expect("Please add cursor position!"), + query.replace(CURSOR_POS, "REPLACED_TOKEN").to_string(), + ) + } + + #[test] + fn infers_grant_keyword() { + let (pos, query) = with_pos(format!( + r#" + grant {} + "#, + CURSOR_POS + )); + + let context = GrantParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + GrantContext { + table_name: None, + schema_name: None, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(19), TextSize::new(33)), + node_kind: "grant_role".into(), + } + ); + } + + #[test] + fn infers_table_name() { + let (pos, query) = with_pos(format!( + r#" + grant select on {} + "#, + CURSOR_POS + )); + + let context = GrantParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + GrantContext { + table_name: None, + schema_name: None, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(29), TextSize::new(43)), + node_kind: "grant_table".into(), + } + ); + } + + #[test] + fn infers_table_name_with_keyword() { + let (pos, query) = with_pos(format!( + r#" + grant select on table {} + "#, + CURSOR_POS + )); + + let context = GrantParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + GrantContext { + table_name: None, + schema_name: None, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(35), TextSize::new(49)), + node_kind: "grant_table".into(), + } + ); + } + + #[test] + fn infers_schema_and_table_name() { + let (pos, query) = with_pos(format!( + r#" + grant select on public.{} + "#, + CURSOR_POS + )); + + let context = GrantParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + GrantContext { + table_name: None, + schema_name: Some("public".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(36), TextSize::new(50)), + node_kind: "grant_table".into(), + } + ); + } + + #[test] + fn infers_schema_and_table_name_with_keyword() { + let (pos, query) = with_pos(format!( + r#" + grant select on table public.{} + "#, + CURSOR_POS + )); + + let context = GrantParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + GrantContext { + table_name: None, + schema_name: Some("public".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(42), TextSize::new(56)), + node_kind: "grant_table".into(), + } + ); + } + + #[test] + fn infers_role_name() { + let (pos, query) = with_pos(format!( + r#" + grant select on public.users to {} + "#, + CURSOR_POS + )); + + let context = GrantParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + GrantContext { + table_name: Some("users".into()), + schema_name: Some("public".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(45), TextSize::new(59)), + node_kind: "grant_role".into(), + } + ); + } + + #[test] + fn determines_table_name_after_schema() { + let (pos, query) = with_pos(format!( + r#" + grant select on public.{} to test_role + "#, + CURSOR_POS + )); + + let context = GrantParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + GrantContext { + table_name: None, + schema_name: Some("public".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(36), TextSize::new(50)), + node_kind: "grant_table".into(), + } + ); + } + + #[test] + fn infers_quoted_schema_and_table() { + let (pos, query) = with_pos(format!( + r#" + grant select on "MySchema"."MyTable" to {} + "#, + CURSOR_POS + )); + + let context = GrantParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + GrantContext { + table_name: Some("MyTable".into()), + schema_name: Some("MySchema".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(53), TextSize::new(67)), + node_kind: "grant_role".into(), + } + ); + } + + #[test] + fn infers_multiple_roles() { + let (pos, query) = with_pos(format!( + r#" + grant select on public.users to alice, {} + "#, + CURSOR_POS + )); + + let context = GrantParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + GrantContext { + table_name: Some("users".into()), + schema_name: Some("public".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(52), TextSize::new(66)), + node_kind: "grant_role".into(), + } + ); + } +} diff --git a/crates/pgt_completions/src/context/mod.rs b/crates/pgt_completions/src/context/mod.rs index 7006c5bf..996ec6be 100644 --- a/crates/pgt_completions/src/context/mod.rs +++ b/crates/pgt_completions/src/context/mod.rs @@ -2,7 +2,10 @@ use std::{ cmp, collections::{HashMap, HashSet}, }; +mod base_parser; +mod grant_parser; mod policy_parser; +mod revoke_parser; use pgt_schema_cache::SchemaCache; use pgt_text_size::TextRange; @@ -13,7 +16,12 @@ use pgt_treesitter_queries::{ use crate::{ NodeText, - context::policy_parser::{PolicyParser, PolicyStmtKind}, + context::{ + base_parser::CompletionStatementParser, + grant_parser::GrantParser, + policy_parser::{PolicyParser, PolicyStmtKind}, + revoke_parser::RevokeParser, + }, sanitization::SanitizedCompletionParams, }; @@ -36,6 +44,9 @@ pub enum WrappingClause<'a> { RenameColumn, PolicyName, ToRoleAssignment, + SetStatement, + AlterRole, + DropRole, } #[derive(PartialEq, Eq, Hash, Debug, Clone)] @@ -190,8 +201,12 @@ impl<'a> CompletionContext<'a> { // policy handling is important to Supabase, but they are a PostgreSQL specific extension, // so the tree_sitter_sql language does not support it. // We infer the context manually. - if PolicyParser::looks_like_policy_stmt(¶ms.text) { + if PolicyParser::looks_like_matching_stmt(¶ms.text) { ctx.gather_policy_context(); + } else if GrantParser::looks_like_matching_stmt(¶ms.text) { + ctx.gather_grant_context(); + } else if RevokeParser::looks_like_matching_stmt(¶ms.text) { + ctx.gather_revoke_context(); } else { ctx.gather_tree_context(); ctx.gather_info_from_ts_queries(); @@ -200,6 +215,60 @@ impl<'a> CompletionContext<'a> { ctx } + fn gather_revoke_context(&mut self) { + let revoke_context = RevokeParser::get_context(self.text, self.position); + + self.node_under_cursor = Some(NodeUnderCursor::CustomNode { + text: revoke_context.node_text.into(), + range: revoke_context.node_range, + kind: revoke_context.node_kind.clone(), + }); + + if revoke_context.node_kind == "revoke_table" { + self.schema_or_alias_name = revoke_context.schema_name.clone(); + } + + if revoke_context.table_name.is_some() { + let mut new = HashSet::new(); + new.insert(revoke_context.table_name.unwrap()); + self.mentioned_relations + .insert(revoke_context.schema_name, new); + } + + self.wrapping_clause_type = match revoke_context.node_kind.as_str() { + "revoke_role" => Some(WrappingClause::ToRoleAssignment), + "revoke_table" => Some(WrappingClause::From), + _ => None, + }; + } + + fn gather_grant_context(&mut self) { + let grant_context = GrantParser::get_context(self.text, self.position); + + self.node_under_cursor = Some(NodeUnderCursor::CustomNode { + text: grant_context.node_text.into(), + range: grant_context.node_range, + kind: grant_context.node_kind.clone(), + }); + + if grant_context.node_kind == "grant_table" { + self.schema_or_alias_name = grant_context.schema_name.clone(); + } + + if grant_context.table_name.is_some() { + let mut new = HashSet::new(); + new.insert(grant_context.table_name.unwrap()); + self.mentioned_relations + .insert(grant_context.schema_name, new); + } + + self.wrapping_clause_type = match grant_context.node_kind.as_str() { + "grant_role" => Some(WrappingClause::ToRoleAssignment), + "grant_table" => Some(WrappingClause::From), + _ => None, + }; + } + fn gather_policy_context(&mut self) { let policy_context = PolicyParser::get_context(self.text, self.position); @@ -427,7 +496,8 @@ impl<'a> CompletionContext<'a> { } "where" | "update" | "select" | "delete" | "from" | "join" | "column_definitions" - | "drop_table" | "alter_table" | "drop_column" | "alter_column" | "rename_column" => { + | "alter_role" | "drop_role" | "set_statement" | "drop_table" | "alter_table" + | "drop_column" | "alter_column" | "rename_column" => { self.wrapping_clause_type = self.get_wrapping_clause_from_current_node(current_node, &mut cursor); } @@ -662,10 +732,13 @@ impl<'a> CompletionContext<'a> { "delete" => Some(WrappingClause::Delete), "from" => Some(WrappingClause::From), "drop_table" => Some(WrappingClause::DropTable), + "alter_role" => Some(WrappingClause::AlterRole), + "drop_role" => Some(WrappingClause::DropRole), "drop_column" => Some(WrappingClause::DropColumn), "alter_column" => Some(WrappingClause::AlterColumn), "rename_column" => Some(WrappingClause::RenameColumn), "alter_table" => Some(WrappingClause::AlterTable), + "set_statement" => Some(WrappingClause::SetStatement), "column_definitions" => Some(WrappingClause::ColumnDefinitions), "insert" => Some(WrappingClause::Insert), "join" => { diff --git a/crates/pgt_completions/src/context/policy_parser.rs b/crates/pgt_completions/src/context/policy_parser.rs index db37a13f..58619502 100644 --- a/crates/pgt_completions/src/context/policy_parser.rs +++ b/crates/pgt_completions/src/context/policy_parser.rs @@ -1,7 +1,9 @@ -use std::iter::Peekable; - use pgt_text_size::{TextRange, TextSize}; +use crate::context::base_parser::{ + CompletionStatementParser, TokenNavigator, WordWithIndex, schema_and_table_name, +}; + #[derive(Default, Debug, PartialEq, Eq)] pub(crate) enum PolicyStmtKind { #[default] @@ -11,90 +13,6 @@ pub(crate) enum PolicyStmtKind { Drop, } -#[derive(Clone, Debug, PartialEq, Eq)] -struct WordWithIndex { - word: String, - start: usize, - end: usize, -} - -impl WordWithIndex { - fn is_under_cursor(&self, cursor_pos: usize) -> bool { - self.start <= cursor_pos && self.end > cursor_pos - } - - fn get_range(&self) -> TextRange { - let start: u32 = self.start.try_into().expect("Text too long"); - let end: u32 = self.end.try_into().expect("Text too long"); - TextRange::new(TextSize::from(start), TextSize::from(end)) - } -} - -/// Note: A policy name within quotation marks will be considered a single word. -fn sql_to_words(sql: &str) -> Result, String> { - let mut words = vec![]; - - let mut start_of_word: Option = None; - let mut current_word = String::new(); - let mut in_quotation_marks = false; - - for (current_position, current_char) in sql.char_indices() { - if (current_char.is_ascii_whitespace() || current_char == ';') - && !current_word.is_empty() - && start_of_word.is_some() - && !in_quotation_marks - { - words.push(WordWithIndex { - word: current_word, - start: start_of_word.unwrap(), - end: current_position, - }); - - current_word = String::new(); - start_of_word = None; - } else if (current_char.is_ascii_whitespace() || current_char == ';') - && current_word.is_empty() - { - // do nothing - } else if current_char == '"' && start_of_word.is_none() { - in_quotation_marks = true; - current_word.push(current_char); - start_of_word = Some(current_position); - } else if current_char == '"' && start_of_word.is_some() { - current_word.push(current_char); - words.push(WordWithIndex { - word: current_word, - start: start_of_word.unwrap(), - end: current_position + 1, - }); - in_quotation_marks = false; - start_of_word = None; - current_word = String::new() - } else if start_of_word.is_some() { - current_word.push(current_char) - } else { - start_of_word = Some(current_position); - current_word.push(current_char); - } - } - - if let Some(start_of_word) = start_of_word { - if !current_word.is_empty() { - words.push(WordWithIndex { - word: current_word, - start: start_of_word, - end: sql.len(), - }); - } - } - - if in_quotation_marks { - Err("String was not closed properly.".into()) - } else { - Ok(words) - } -} - #[derive(Default, Debug, PartialEq, Eq)] pub(crate) struct PolicyContext { pub policy_name: Option, @@ -111,15 +29,16 @@ pub(crate) struct PolicyContext { /// The parser will only work if the (trimmed) sql starts with `create policy`, `drop policy`, or `alter policy`. /// It can only parse policy statements. pub(crate) struct PolicyParser { - tokens: Peekable>, - previous_token: Option, - current_token: Option, + navigator: TokenNavigator, context: PolicyContext, cursor_position: usize, } -impl PolicyParser { - pub(crate) fn looks_like_policy_stmt(sql: &str) -> bool { +impl CompletionStatementParser for PolicyParser { + type Context = PolicyContext; + const NAME: &'static str = "PolicyParser"; + + fn looks_like_matching_stmt(sql: &str) -> bool { let lowercased = sql.to_ascii_lowercase(); let trimmed = lowercased.trim(); trimmed.starts_with("create policy") @@ -127,30 +46,8 @@ impl PolicyParser { || trimmed.starts_with("alter policy") } - pub(crate) fn get_context(sql: &str, cursor_position: usize) -> PolicyContext { - assert!( - Self::looks_like_policy_stmt(sql), - "PolicyParser should only be used for policy statements. Developer error!" - ); - - match sql_to_words(sql) { - Ok(tokens) => { - let parser = PolicyParser { - tokens: tokens.into_iter().peekable(), - context: PolicyContext::default(), - previous_token: None, - current_token: None, - cursor_position, - }; - - parser.parse() - } - Err(_) => PolicyContext::default(), - } - } - - fn parse(mut self) -> PolicyContext { - while let Some(token) = self.advance() { + fn parse(mut self) -> Self::Context { + while let Some(token) = self.navigator.advance() { if token.is_under_cursor(self.cursor_position) { self.handle_token_under_cursor(token); } else { @@ -161,22 +58,36 @@ impl PolicyParser { self.context } + fn make_parser(tokens: Vec, cursor_position: usize) -> Self { + Self { + navigator: tokens.into(), + context: PolicyContext::default(), + cursor_position, + } + } +} + +impl PolicyParser { fn handle_token_under_cursor(&mut self, token: WordWithIndex) { - if self.previous_token.is_none() { + if self.navigator.previous_token.is_none() { return; } - let previous = self.previous_token.take().unwrap(); + let previous = self.navigator.previous_token.take().unwrap(); - match previous.word.to_ascii_lowercase().as_str() { + match previous + .get_word_without_quotes() + .to_ascii_lowercase() + .as_str() + { "policy" => { self.context.node_range = token.get_range(); self.context.node_kind = "policy_name".into(); - self.context.node_text = token.word; + self.context.node_text = token.get_word(); } "on" => { - if token.word.contains('.') { - let (schema_name, table_name) = self.schema_and_table_name(&token); + if token.get_word_without_quotes().contains('.') { + let (schema_name, table_name) = schema_and_table_name(&token); let schema_name_len = schema_name.len(); self.context.schema_name = Some(schema_name); @@ -198,85 +109,65 @@ impl PolicyParser { self.context.node_text = table_name.unwrap_or_default(); } else { self.context.node_range = token.get_range(); - self.context.node_text = token.word; + self.context.node_text = token.get_word(); self.context.node_kind = "policy_table".into(); } } "to" => { self.context.node_range = token.get_range(); self.context.node_kind = "policy_role".into(); - self.context.node_text = token.word; + self.context.node_text = token.get_word(); } _ => { self.context.node_range = token.get_range(); - self.context.node_text = token.word; + self.context.node_text = token.get_word(); } } } fn handle_token(&mut self, token: WordWithIndex) { - match token.word.to_ascii_lowercase().as_str() { - "create" if self.next_matches("policy") => { + match token + .get_word_without_quotes() + .to_ascii_lowercase() + .as_str() + { + "create" if self.navigator.next_matches(&["policy"]) => { self.context.statement_kind = PolicyStmtKind::Create; } - "alter" if self.next_matches("policy") => { + "alter" if self.navigator.next_matches(&["policy"]) => { self.context.statement_kind = PolicyStmtKind::Alter; } - "drop" if self.next_matches("policy") => { + "drop" if self.navigator.next_matches(&["policy"]) => { self.context.statement_kind = PolicyStmtKind::Drop; } "on" => self.table_with_schema(), // skip the "to" so we don't parse it as the TO rolename when it's under the cursor - "rename" if self.next_matches("to") => { - self.advance(); + "rename" if self.navigator.next_matches(&["to"]) => { + self.navigator.advance(); } _ => { - if self.prev_matches("policy") { - self.context.policy_name = Some(token.word); + if self.navigator.prev_matches(&["policy"]) { + self.context.policy_name = Some(token.get_word()); } } } } - fn next_matches(&mut self, it: &str) -> bool { - self.tokens.peek().is_some_and(|c| c.word.as_str() == it) - } - - fn prev_matches(&self, it: &str) -> bool { - self.previous_token.as_ref().is_some_and(|t| t.word == it) - } - - fn advance(&mut self) -> Option { - // we can't peek back n an iterator, so we'll have to keep track manually. - self.previous_token = self.current_token.take(); - self.current_token = self.tokens.next(); - self.current_token.clone() - } - fn table_with_schema(&mut self) { - if let Some(token) = self.advance() { + if let Some(token) = self.navigator.advance() { if token.is_under_cursor(self.cursor_position) { self.handle_token_under_cursor(token); - } else if token.word.contains('.') { - let (schema, maybe_table) = self.schema_and_table_name(&token); + } else if token.get_word_without_quotes().contains('.') { + let (schema, maybe_table) = schema_and_table_name(&token); self.context.schema_name = Some(schema); self.context.table_name = maybe_table; } else { - self.context.table_name = Some(token.word); + self.context.table_name = Some(token.get_word()); } }; } - - fn schema_and_table_name(&self, token: &WordWithIndex) -> (String, Option) { - let mut parts = token.word.split('.'); - - ( - parts.next().unwrap().into(), - parts.next().map(|tb| tb.into()), - ) - } } #[cfg(test)] @@ -284,11 +175,12 @@ mod tests { use pgt_text_size::{TextRange, TextSize}; use crate::{ - context::policy_parser::{PolicyContext, PolicyStmtKind, WordWithIndex}, + context::base_parser::CompletionStatementParser, + context::policy_parser::{PolicyContext, PolicyStmtKind}, test_helper::CURSOR_POS, }; - use super::{PolicyParser, sql_to_words}; + use super::PolicyParser; fn with_pos(query: String) -> (usize, String) { let mut pos: Option = None; @@ -585,33 +477,4 @@ mod tests { assert_eq!(context, PolicyContext::default()); } - - fn to_word(word: &str, start: usize, end: usize) -> WordWithIndex { - WordWithIndex { - word: word.into(), - start, - end, - } - } - - #[test] - fn determines_positions_correctly() { - let query = "\ncreate policy \"my cool pol\"\n\ton auth.users\n\tas permissive\n\tfor select\n\t\tto public\n\t\tusing (true);".to_string(); - - let words = sql_to_words(query.as_str()).unwrap(); - - assert_eq!(words[0], to_word("create", 1, 7)); - assert_eq!(words[1], to_word("policy", 8, 14)); - assert_eq!(words[2], to_word("\"my cool pol\"", 15, 28)); - assert_eq!(words[3], to_word("on", 30, 32)); - assert_eq!(words[4], to_word("auth.users", 33, 43)); - assert_eq!(words[5], to_word("as", 45, 47)); - assert_eq!(words[6], to_word("permissive", 48, 58)); - assert_eq!(words[7], to_word("for", 60, 63)); - assert_eq!(words[8], to_word("select", 64, 70)); - assert_eq!(words[9], to_word("to", 73, 75)); - assert_eq!(words[10], to_word("public", 78, 84)); - assert_eq!(words[11], to_word("using", 87, 92)); - assert_eq!(words[12], to_word("(true)", 93, 99)); - } } diff --git a/crates/pgt_completions/src/context/revoke_parser.rs b/crates/pgt_completions/src/context/revoke_parser.rs new file mode 100644 index 00000000..e0c43934 --- /dev/null +++ b/crates/pgt_completions/src/context/revoke_parser.rs @@ -0,0 +1,339 @@ +use pgt_text_size::{TextRange, TextSize}; + +use crate::context::base_parser::{ + CompletionStatementParser, TokenNavigator, WordWithIndex, schema_and_table_name, +}; + +#[derive(Default, Debug, PartialEq, Eq)] +pub(crate) struct RevokeContext { + pub table_name: Option, + pub schema_name: Option, + pub node_text: String, + pub node_range: TextRange, + pub node_kind: String, +} + +/// Simple parser that'll turn a policy-related statement into a context object required for +/// completions. +/// The parser will only work if the (trimmed) sql starts with `create policy`, `drop policy`, or `alter policy`. +/// It can only parse policy statements. +pub(crate) struct RevokeParser { + navigator: TokenNavigator, + context: RevokeContext, + cursor_position: usize, + in_roles_list: bool, + is_revoking_role: bool, +} + +impl CompletionStatementParser for RevokeParser { + type Context = RevokeContext; + const NAME: &'static str = "RevokeParser"; + + fn looks_like_matching_stmt(sql: &str) -> bool { + let lowercased = sql.to_ascii_lowercase(); + let trimmed = lowercased.trim(); + trimmed.starts_with("revoke") + } + + fn parse(mut self) -> Self::Context { + while let Some(token) = self.navigator.advance() { + if token.is_under_cursor(self.cursor_position) { + self.handle_token_under_cursor(token); + } else { + self.handle_token(token); + } + } + + self.context + } + + fn make_parser(tokens: Vec, cursor_position: usize) -> Self { + Self { + navigator: tokens.into(), + context: RevokeContext::default(), + cursor_position, + in_roles_list: false, + is_revoking_role: false, + } + } +} + +impl RevokeParser { + fn handle_token_under_cursor(&mut self, token: WordWithIndex) { + if self.navigator.previous_token.is_none() { + return; + } + + let previous = self.navigator.previous_token.take().unwrap(); + let current = self + .navigator + .current_token + .as_ref() + .map(|w| w.get_word_without_quotes()); + + match previous + .get_word_without_quotes() + .to_ascii_lowercase() + .as_str() + { + "on" if !matches!(current.as_deref(), Some("table")) => self.handle_table(&token), + + "table" => { + self.handle_table(&token); + } + + "from" | "revoke" => { + self.context.node_range = token.get_range(); + self.context.node_kind = "revoke_role".into(); + self.context.node_text = token.get_word(); + } + + "for" if self.is_revoking_role => { + self.context.node_range = token.get_range(); + self.context.node_kind = "revoke_role".into(); + self.context.node_text = token.get_word(); + } + + t => { + if self.in_roles_list && t.ends_with(',') { + self.context.node_kind = "revoke_role".into(); + } + + self.context.node_range = token.get_range(); + self.context.node_text = token.get_word(); + } + } + } + + fn handle_table(&mut self, token: &WordWithIndex) { + if token.get_word_without_quotes().contains('.') { + let (schema_name, table_name) = schema_and_table_name(token); + + let schema_name_len = schema_name.len(); + self.context.schema_name = Some(schema_name); + + let offset: u32 = schema_name_len.try_into().expect("Text too long"); + let range_without_schema = token + .get_range() + .checked_expand_start( + TextSize::new(offset + 1), // kill the dot as well + ) + .expect("Text too long"); + + self.context.node_range = range_without_schema; + self.context.node_kind = "revoke_table".into(); + + // In practice, we should always have a table name. + // The completion sanitization will add a word after a `.` if nothing follows it; + // the token_text will then look like `schema.REPLACED_TOKEN`. + self.context.node_text = table_name.unwrap_or_default(); + } else { + self.context.node_range = token.get_range(); + self.context.node_text = token.get_word(); + self.context.node_kind = "revoke_table".into(); + } + } + + fn handle_token(&mut self, token: WordWithIndex) { + match token.get_word_without_quotes().as_str() { + "on" if !self.navigator.next_matches(&["table"]) => self.table_with_schema(), + + // This is the only case where there is no "GRANT" before the option: + // REVOKE [ { ADMIN | INHERIT | SET } OPTION FOR ] role_name + "option" if !self.navigator.prev_matches(&["grant"]) => { + self.is_revoking_role = true; + } + + "table" => self.table_with_schema(), + + "from" => { + self.in_roles_list = true; + } + + t => { + if self.in_roles_list && !t.ends_with(',') { + self.in_roles_list = false; + } + } + } + } + + fn table_with_schema(&mut self) { + if let Some(token) = self.navigator.advance() { + if token.is_under_cursor(self.cursor_position) { + self.handle_token_under_cursor(token); + } else if token.get_word_without_quotes().contains('.') { + let (schema, maybe_table) = schema_and_table_name(&token); + self.context.schema_name = Some(schema); + self.context.table_name = maybe_table; + } else { + self.context.table_name = Some(token.get_word()); + } + }; + } +} + +#[cfg(test)] +mod tests { + use pgt_text_size::{TextRange, TextSize}; + + use crate::{ + context::base_parser::CompletionStatementParser, + context::revoke_parser::{RevokeContext, RevokeParser}, + test_helper::CURSOR_POS, + }; + + fn with_pos(query: String) -> (usize, String) { + let mut pos: Option = None; + + for (p, c) in query.char_indices() { + if c == CURSOR_POS { + pos = Some(p); + break; + } + } + + ( + pos.expect("Please add cursor position!"), + query.replace(CURSOR_POS, "REPLACED_TOKEN").to_string(), + ) + } + + #[test] + fn infers_revoke_keyword() { + let (pos, query) = with_pos(format!( + r#" + revoke {} + "#, + CURSOR_POS + )); + + let context = RevokeParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + RevokeContext { + table_name: None, + schema_name: None, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(20), TextSize::new(34)), + node_kind: "revoke_role".into(), + } + ); + } + + #[test] + fn infers_table_name() { + let (pos, query) = with_pos(format!( + r#" + revoke select on {} + "#, + CURSOR_POS + )); + + let context = RevokeParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + RevokeContext { + table_name: None, + schema_name: None, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(30), TextSize::new(44)), + node_kind: "revoke_table".into(), + } + ); + } + + #[test] + fn infers_schema_and_table_name() { + let (pos, query) = with_pos(format!( + r#" + revoke select on public.{} + "#, + CURSOR_POS + )); + + let context = RevokeParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + RevokeContext { + table_name: None, + schema_name: Some("public".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(37), TextSize::new(51)), + node_kind: "revoke_table".into(), + } + ); + } + + #[test] + fn infers_role_name() { + let (pos, query) = with_pos(format!( + r#" + revoke select on public.users from {} + "#, + CURSOR_POS + )); + + let context = RevokeParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + RevokeContext { + table_name: Some("users".into()), + schema_name: Some("public".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(48), TextSize::new(62)), + node_kind: "revoke_role".into(), + } + ); + } + + #[test] + fn infers_multiple_roles() { + let (pos, query) = with_pos(format!( + r#" + revoke select on public.users from alice, {} + "#, + CURSOR_POS + )); + + let context = RevokeParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + RevokeContext { + table_name: Some("users".into()), + schema_name: Some("public".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(55), TextSize::new(69)), + node_kind: "revoke_role".into(), + } + ); + } + + #[test] + fn infers_quoted_schema_and_table() { + let (pos, query) = with_pos(format!( + r#" + revoke select on "MySchema"."MyTable" from {} + "#, + CURSOR_POS + )); + + let context = RevokeParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + RevokeContext { + table_name: Some("MyTable".into()), + schema_name: Some("MySchema".into()), + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(56), TextSize::new(70)), + node_kind: "revoke_role".into(), + } + ); + } +} diff --git a/crates/pgt_completions/src/item.rs b/crates/pgt_completions/src/item.rs index 73e08cc0..766e436c 100644 --- a/crates/pgt_completions/src/item.rs +++ b/crates/pgt_completions/src/item.rs @@ -12,6 +12,7 @@ pub enum CompletionItemKind { Column, Schema, Policy, + Role, } impl Display for CompletionItemKind { @@ -22,6 +23,7 @@ impl Display for CompletionItemKind { CompletionItemKind::Column => "Column", CompletionItemKind::Schema => "Schema", CompletionItemKind::Policy => "Policy", + CompletionItemKind::Role => "Role", }; write!(f, "{txt}") diff --git a/crates/pgt_completions/src/providers/mod.rs b/crates/pgt_completions/src/providers/mod.rs index 7b07cee8..ddbdf252 100644 --- a/crates/pgt_completions/src/providers/mod.rs +++ b/crates/pgt_completions/src/providers/mod.rs @@ -2,11 +2,13 @@ mod columns; mod functions; mod helper; mod policies; +mod roles; mod schemas; mod tables; pub use columns::*; pub use functions::*; pub use policies::*; +pub use roles::*; pub use schemas::*; pub use tables::*; diff --git a/crates/pgt_completions/src/providers/roles.rs b/crates/pgt_completions/src/providers/roles.rs new file mode 100644 index 00000000..01641543 --- /dev/null +++ b/crates/pgt_completions/src/providers/roles.rs @@ -0,0 +1,291 @@ +use crate::{ + CompletionItemKind, + builder::{CompletionBuilder, PossibleCompletionItem}, + context::CompletionContext, + relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, +}; + +pub fn complete_roles<'a>(ctx: &CompletionContext<'a>, builder: &mut CompletionBuilder<'a>) { + let available_roles = &ctx.schema_cache.roles; + + for role in available_roles { + let relevance = CompletionRelevanceData::Role(role); + + let item = PossibleCompletionItem { + label: role.name.chars().take(35).collect::(), + score: CompletionScore::from(relevance.clone()), + filter: CompletionFilter::from(relevance), + description: role.name.clone(), + kind: CompletionItemKind::Role, + completion_text: None, + detail: None, + }; + + builder.add_item(item); + } +} + +#[cfg(test)] +mod tests { + use sqlx::{Executor, PgPool}; + + use crate::test_helper::{CURSOR_POS, CompletionAssertion, assert_complete_results}; + + const SETUP: &str = r#" + create table users ( + id serial primary key, + email varchar, + address text + ); + "#; + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn works_in_drop_role(pool: PgPool) { + assert_complete_results( + format!("drop role {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + Some(SETUP), + &pool, + ) + .await; + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn works_in_alter_role(pool: PgPool) { + assert_complete_results( + format!("alter role {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + Some(SETUP), + &pool, + ) + .await; + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn works_in_set_statement(pool: PgPool) { + pool.execute(SETUP).await.unwrap(); + + assert_complete_results( + format!("set role {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + None, + &pool, + ) + .await; + + assert_complete_results( + format!("set session authorization {}", CURSOR_POS).as_str(), + vec![ + CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + None, + &pool, + ) + .await; + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn works_in_policies(pool: PgPool) { + pool.execute(SETUP).await.unwrap(); + + assert_complete_results( + format!( + r#"create policy "my cool policy" on public.users + as restrictive + for all + to {} + using (true);"#, + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + None, + &pool, + ) + .await; + + assert_complete_results( + format!( + r#"create policy "my cool policy" on public.users + for select + to {}"#, + CURSOR_POS + ) + .as_str(), + vec![ + CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + None, + &pool, + ) + .await; + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn works_in_grant_statements(pool: PgPool) { + pool.execute(SETUP).await.unwrap(); + + assert_complete_results( + format!( + r#"grant select + on table public.users + to {}"#, + CURSOR_POS + ) + .as_str(), + vec![ + // recognizing already mentioned roles is not supported for now + CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + None, + &pool, + ) + .await; + + assert_complete_results( + format!( + r#"grant select + on table public.users + to owner, {}"#, + CURSOR_POS + ) + .as_str(), + vec![ + // recognizing already mentioned roles is not supported for now + CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + None, + &pool, + ) + .await; + + assert_complete_results( + format!(r#"grant {} to owner"#, CURSOR_POS).as_str(), + vec![ + // recognizing already mentioned roles is not supported for now + CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + None, + &pool, + ) + .await; + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn works_in_revoke_statements(pool: PgPool) { + pool.execute(SETUP).await.unwrap(); + + let queries = vec![ + format!("revoke {} from owner", CURSOR_POS), + format!("revoke admin option for {} from owner", CURSOR_POS), + format!("revoke owner from {}", CURSOR_POS), + format!("revoke all on schema public from {} granted by", CURSOR_POS), + format!("revoke all on schema public from owner, {}", CURSOR_POS), + format!("revoke all on table userse from owner, {}", CURSOR_POS), + ]; + + for query in queries { + assert_complete_results( + query.as_str(), + vec![ + // recognizing already mentioned roles is not supported for now + CompletionAssertion::LabelAndKind( + "owner".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_login".into(), + crate::CompletionItemKind::Role, + ), + CompletionAssertion::LabelAndKind( + "test_nologin".into(), + crate::CompletionItemKind::Role, + ), + ], + None, + &pool, + ) + .await; + } + } +} diff --git a/crates/pgt_completions/src/relevance.rs b/crates/pgt_completions/src/relevance.rs index f51c3c52..1d39d9bb 100644 --- a/crates/pgt_completions/src/relevance.rs +++ b/crates/pgt_completions/src/relevance.rs @@ -8,4 +8,5 @@ pub(crate) enum CompletionRelevanceData<'a> { Column(&'a pgt_schema_cache::Column), Schema(&'a pgt_schema_cache::Schema), Policy(&'a pgt_schema_cache::Policy), + Role(&'a pgt_schema_cache::Role), } diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index ea681bd7..a020d2e8 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -180,6 +180,17 @@ impl CompletionFilter<'_> { CompletionRelevanceData::Policy(_) => { matches!(clause, WrappingClause::PolicyName) } + + CompletionRelevanceData::Role(_) => match clause { + WrappingClause::DropRole + | WrappingClause::AlterRole + | WrappingClause::ToRoleAssignment => true, + + WrappingClause::SetStatement => ctx + .before_cursor_matches_kind(&["keyword_role", "keyword_authorization"]), + + _ => false, + }, } }) .and_then(|is_ok| if is_ok { Some(()) } else { None }) @@ -215,8 +226,8 @@ impl CompletionFilter<'_> { // we should never allow schema suggestions if there already was one. CompletionRelevanceData::Schema(_) => false, - // no policy comletion if user typed a schema node first. - CompletionRelevanceData::Policy(_) => false, + // no policy or row completion if user typed a schema node first. + CompletionRelevanceData::Policy(_) | CompletionRelevanceData::Role(_) => false, }; if !matches { diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs index a8c89f50..a0b5efa5 100644 --- a/crates/pgt_completions/src/relevance/scoring.rs +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -47,6 +47,7 @@ impl CompletionScore<'_> { CompletionRelevanceData::Column(c) => c.name.as_str().to_ascii_lowercase(), CompletionRelevanceData::Schema(s) => s.name.as_str().to_ascii_lowercase(), CompletionRelevanceData::Policy(p) => p.name.as_str().to_ascii_lowercase(), + CompletionRelevanceData::Role(r) => r.name.as_str().to_ascii_lowercase(), }; let fz_matcher = SkimMatcherV2::default(); @@ -126,6 +127,11 @@ impl CompletionScore<'_> { WrappingClause::PolicyName => 25, _ => -50, }, + + CompletionRelevanceData::Role(_) => match clause_type { + WrappingClause::DropRole | WrappingClause::AlterRole => 25, + _ => -50, + }, } } @@ -160,6 +166,7 @@ impl CompletionScore<'_> { _ => -50, }, CompletionRelevanceData::Policy(_) => 0, + CompletionRelevanceData::Role(_) => 0, } } @@ -178,7 +185,10 @@ impl CompletionScore<'_> { Some(n) => n, }; - let data_schema = self.get_schema_name(); + let data_schema = match self.get_schema_name() { + Some(s) => s, + None => return, + }; if schema_name == data_schema { self.score += 25; @@ -194,16 +204,18 @@ impl CompletionScore<'_> { CompletionRelevanceData::Column(c) => c.name.as_str(), CompletionRelevanceData::Schema(s) => s.name.as_str(), CompletionRelevanceData::Policy(p) => p.name.as_str(), + CompletionRelevanceData::Role(r) => r.name.as_str(), } } - fn get_schema_name(&self) -> &str { + fn get_schema_name(&self) -> Option<&str> { match self.data { - CompletionRelevanceData::Function(f) => f.schema.as_str(), - CompletionRelevanceData::Table(t) => t.schema.as_str(), - CompletionRelevanceData::Column(c) => c.schema_name.as_str(), - CompletionRelevanceData::Schema(s) => s.name.as_str(), - CompletionRelevanceData::Policy(p) => p.schema_name.as_str(), + CompletionRelevanceData::Function(f) => Some(f.schema.as_str()), + CompletionRelevanceData::Table(t) => Some(t.schema.as_str()), + CompletionRelevanceData::Column(c) => Some(c.schema_name.as_str()), + CompletionRelevanceData::Schema(s) => Some(s.name.as_str()), + CompletionRelevanceData::Policy(p) => Some(p.schema_name.as_str()), + CompletionRelevanceData::Role(_) => None, } } @@ -222,7 +234,10 @@ impl CompletionScore<'_> { _ => {} } - let schema = self.get_schema_name().to_string(); + let schema = match self.get_schema_name() { + Some(s) => s.to_string(), + None => return, + }; let table_name = match self.get_table_name() { Some(t) => t, None => return, @@ -244,7 +259,34 @@ impl CompletionScore<'_> { } fn check_is_user_defined(&mut self) { - let schema_name = self.get_schema_name().to_string(); + if let CompletionRelevanceData::Role(r) = self.data { + match r.name.as_str() { + "pg_read_all_data" + | "pg_write_all_data" + | "pg_read_all_settings" + | "pg_read_all_stats" + | "pg_stat_scan_tables" + | "pg_monitor" + | "pg_database_owner" + | "pg_signal_backend" + | "pg_read_server_files" + | "pg_write_server_files" + | "pg_execute_server_program" + | "pg_checkpoint" + | "pg_maintain" + | "pg_use_reserved_connections" + | "pg_create_subscription" + | "postgres" => self.score -= 20, + _ => {} + }; + + return; + } + + let schema_name = match self.get_schema_name() { + Some(s) => s.to_string(), + None => return, + }; let system_schemas = ["pg_catalog", "information_schema", "pg_toast"]; diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index 154998e7..ddc9563e 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -6,6 +6,7 @@ use crate::CompletionParams; static SANITIZED_TOKEN: &str = "REPLACED_TOKEN"; +#[derive(Debug)] pub(crate) struct SanitizedCompletionParams<'a> { pub position: TextSize, pub text: String, diff --git a/crates/pgt_lsp/src/handlers/completions.rs b/crates/pgt_lsp/src/handlers/completions.rs index 7e901c79..4a035fcf 100644 --- a/crates/pgt_lsp/src/handlers/completions.rs +++ b/crates/pgt_lsp/src/handlers/completions.rs @@ -76,5 +76,6 @@ fn to_lsp_types_completion_item_kind( pgt_completions::CompletionItemKind::Column => lsp_types::CompletionItemKind::FIELD, pgt_completions::CompletionItemKind::Schema => lsp_types::CompletionItemKind::CLASS, pgt_completions::CompletionItemKind::Policy => lsp_types::CompletionItemKind::CONSTANT, + pgt_completions::CompletionItemKind::Role => lsp_types::CompletionItemKind::CONSTANT, } } diff --git a/crates/pgt_lsp/src/session.rs b/crates/pgt_lsp/src/session.rs index fd5af2da..ede0469f 100644 --- a/crates/pgt_lsp/src/session.rs +++ b/crates/pgt_lsp/src/session.rs @@ -32,9 +32,11 @@ use tower_lsp::lsp_types::{Unregistration, WorkspaceFolder}; use tracing::{error, info}; pub(crate) struct ClientInformation { + #[allow(dead_code)] /// The name of the client pub(crate) name: String, + #[allow(dead_code)] /// The version of the client pub(crate) version: Option, } @@ -76,6 +78,7 @@ pub(crate) struct Session { struct InitializeParams { /// The capabilities provided by the client as part of [`lsp_types::InitializeParams`] client_capabilities: lsp_types::ClientCapabilities, + #[allow(dead_code)] client_information: Option, root_uri: Option, #[allow(unused)] diff --git a/crates/pgt_workspace/src/settings.rs b/crates/pgt_workspace/src/settings.rs index 08854493..ac55d8a1 100644 --- a/crates/pgt_workspace/src/settings.rs +++ b/crates/pgt_workspace/src/settings.rs @@ -214,45 +214,6 @@ pub struct Settings { pub migrations: Option, } -#[derive(Debug)] -pub struct SettingsHandleMut<'a> { - inner: RwLockWriteGuard<'a, Settings>, -} - -/// Handle object holding a temporary lock on the settings -#[derive(Debug)] -pub struct SettingsHandle<'a> { - inner: RwLockReadGuard<'a, Settings>, -} - -impl<'a> SettingsHandle<'a> { - pub(crate) fn new(settings: &'a RwLock) -> Self { - Self { - inner: settings.read().unwrap(), - } - } -} - -impl AsRef for SettingsHandle<'_> { - fn as_ref(&self) -> &Settings { - &self.inner - } -} - -impl<'a> SettingsHandleMut<'a> { - pub(crate) fn new(settings: &'a RwLock) -> Self { - Self { - inner: settings.write().unwrap(), - } - } -} - -impl AsMut for SettingsHandleMut<'_> { - fn as_mut(&mut self) -> &mut Settings { - &mut self.inner - } -} - impl Settings { /// The [PartialConfiguration] is merged into the workspace #[tracing::instrument(level = "trace", skip(self), err)] From b114b818fe3549472e5bea64784b978771a6a7a7 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 10 Jun 2025 19:55:44 +0200 Subject: [PATCH 074/114] feat(cache): include procedures & aggregates, fix query (#424) --- ...117910b19f540f19393b76aa6434e9d1d8502.json | 110 +++++++++++++ ...e496f5337cadbad7a3fb03ccd3e3c21b71389.json | 104 ------------- ...053db65ea6a7529e2cb97b2d3432a18aff6ba.json | 20 --- crates/pgt_schema_cache/src/functions.rs | 144 +++++++++++++++++- .../src/queries/functions.sql | 25 +-- .../backend-jsonrpc/src/workspace.ts | 6 +- 6 files changed, 268 insertions(+), 141 deletions(-) create mode 100644 .sqlx/query-4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502.json delete mode 100644 .sqlx/query-64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389.json delete mode 100644 .sqlx/query-b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba.json diff --git a/.sqlx/query-4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502.json b/.sqlx/query-4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502.json new file mode 100644 index 00000000..4980f4f3 --- /dev/null +++ b/.sqlx/query-4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502.json @@ -0,0 +1,110 @@ +{ + "db_name": "PostgreSQL", + "query": "with functions as (\n select\n oid,\n proname,\n prosrc,\n prorettype,\n proretset,\n provolatile,\n prosecdef,\n prolang,\n pronamespace,\n proconfig,\n prokind,\n -- proargmodes is null when all arg modes are IN\n coalesce(\n p.proargmodes,\n array_fill(\n 'i' :: text,\n array [cardinality(coalesce(p.proallargtypes, p.proargtypes))]\n )\n ) as arg_modes,\n -- proargnames is null when all args are unnamed\n coalesce(\n p.proargnames,\n array_fill(\n '' :: text,\n array [cardinality(coalesce(p.proallargtypes, p.proargtypes))]\n )\n ) as arg_names,\n -- proallargtypes is null when all arg modes are IN\n coalesce(p.proallargtypes, string_to_array(proargtypes::text, ' ')::int[]) as arg_types,\n array_cat(\n array_fill(false, array [pronargs - pronargdefaults]),\n array_fill(true, array [pronargdefaults])\n ) as arg_has_defaults\n from\n pg_proc as p\n)\nselect\n f.oid :: int8 as \"id!\",\n n.nspname as \"schema!\",\n f.proname as \"name!\",\n l.lanname as \"language!\",\n f.prokind as \"kind!\",\n case\n when l.lanname = 'internal' then null\n else f.prosrc\n end as body,\n case\n when l.lanname = 'internal' then null\n else pg_get_functiondef(f.oid)\n end as definition,\n coalesce(f_args.args, '[]') as args,\n nullif(pg_get_function_arguments(f.oid), '') as argument_types,\n nullif(pg_get_function_identity_arguments(f.oid), '') as identity_argument_types,\n f.prorettype :: int8 as return_type_id,\n pg_get_function_result(f.oid) as return_type,\n nullif(rt.typrelid :: int8, 0) as return_type_relation_id,\n f.proretset as \"is_set_returning_function!\",\n case\n when f.provolatile = 'i' then 'IMMUTABLE'\n when f.provolatile = 's' then 'STABLE'\n when f.provolatile = 'v' then 'VOLATILE'\n end as behavior,\n f.prosecdef as \"security_definer!\"\nfrom\n functions f\n left join pg_namespace n on f.pronamespace = n.oid\n left join pg_language l on f.prolang = l.oid\n left join pg_type rt on rt.oid = f.prorettype\n left join (\n select\n oid,\n jsonb_object_agg(param, value) filter (\n where\n param is not null\n ) as config_params\n from\n (\n select\n oid,\n (string_to_array(unnest(proconfig), '=')) [1] as param,\n (string_to_array(unnest(proconfig), '=')) [2] as value\n from\n functions\n ) as t\n group by\n oid\n ) f_config on f_config.oid = f.oid\n left join (\n select\n oid,\n jsonb_agg(\n jsonb_build_object(\n 'mode',\n t2.mode,\n 'name',\n name,\n 'type_id',\n type_id,\n 'has_default',\n has_default\n )\n ) as args\n from\n (\n select\n oid,\n arg_modes[i] as mode,\n arg_names[i] as name,\n arg_types[i] :: int8 as type_id,\n arg_has_defaults[i] as has_default\n from\n functions,\n pg_catalog.generate_subscripts(arg_names, 1) as i\n ) as t1,\n lateral (\n select\n case\n when t1.mode = 'i' then 'in'\n when t1.mode = 'o' then 'out'\n when t1.mode = 'b' then 'inout'\n when t1.mode = 'v' then 'variadic'\n else 'table'\n end as mode\n ) as t2\n group by\n t1.oid\n ) f_args on f_args.oid = f.oid;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "schema!", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "name!", + "type_info": "Name" + }, + { + "ordinal": 3, + "name": "language!", + "type_info": "Name" + }, + { + "ordinal": 4, + "name": "kind!", + "type_info": "Char" + }, + { + "ordinal": 5, + "name": "body", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "definition", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "args", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "argument_types", + "type_info": "Text" + }, + { + "ordinal": 9, + "name": "identity_argument_types", + "type_info": "Text" + }, + { + "ordinal": 10, + "name": "return_type_id", + "type_info": "Int8" + }, + { + "ordinal": 11, + "name": "return_type", + "type_info": "Text" + }, + { + "ordinal": 12, + "name": "return_type_relation_id", + "type_info": "Int8" + }, + { + "ordinal": 13, + "name": "is_set_returning_function!", + "type_info": "Bool" + }, + { + "ordinal": 14, + "name": "behavior", + "type_info": "Text" + }, + { + "ordinal": 15, + "name": "security_definer!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null, + true, + true, + true, + true, + null, + null, + null, + null, + null, + null, + null, + null, + true, + null, + true + ] + }, + "hash": "4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502" +} diff --git a/.sqlx/query-64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389.json b/.sqlx/query-64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389.json deleted file mode 100644 index 43d63459..00000000 --- a/.sqlx/query-64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "with functions as (\n select\n oid,\n proname,\n prosrc,\n prorettype,\n proretset,\n provolatile,\n prosecdef,\n prolang,\n pronamespace,\n proconfig,\n -- proargmodes is null when all arg modes are IN\n coalesce(\n p.proargmodes,\n array_fill(\n 'i' :: text,\n array [cardinality(coalesce(p.proallargtypes, p.proargtypes))]\n )\n ) as arg_modes,\n -- proargnames is null when all args are unnamed\n coalesce(\n p.proargnames,\n array_fill(\n '' :: text,\n array [cardinality(coalesce(p.proallargtypes, p.proargtypes))]\n )\n ) as arg_names,\n -- proallargtypes is null when all arg modes are IN\n coalesce(p.proallargtypes, p.proargtypes) as arg_types,\n array_cat(\n array_fill(false, array [pronargs - pronargdefaults]),\n array_fill(true, array [pronargdefaults])\n ) as arg_has_defaults\n from\n pg_proc as p\n where\n p.prokind = 'f'\n)\nselect\n f.oid :: int8 as \"id!\",\n n.nspname as \"schema!\",\n f.proname as \"name!\",\n l.lanname as \"language!\",\n case\n when l.lanname = 'internal' then null\n else f.prosrc\n end as body,\n case\n when l.lanname = 'internal' then null\n else pg_get_functiondef(f.oid)\n end as definition,\n coalesce(f_args.args, '[]') as args,\n nullif(pg_get_function_arguments(f.oid), '') as argument_types,\n nullif(pg_get_function_identity_arguments(f.oid), '') as identity_argument_types,\n f.prorettype :: int8 as \"return_type_id!\",\n pg_get_function_result(f.oid) as \"return_type!\",\n nullif(rt.typrelid :: int8, 0) as return_type_relation_id,\n f.proretset as is_set_returning_function,\n case\n when f.provolatile = 'i' then 'IMMUTABLE'\n when f.provolatile = 's' then 'STABLE'\n when f.provolatile = 'v' then 'VOLATILE'\n end as behavior,\n f.prosecdef as security_definer\nfrom\n functions f\n left join pg_namespace n on f.pronamespace = n.oid\n left join pg_language l on f.prolang = l.oid\n left join pg_type rt on rt.oid = f.prorettype\n left join (\n select\n oid,\n jsonb_object_agg(param, value) filter (\n where\n param is not null\n ) as config_params\n from\n (\n select\n oid,\n (string_to_array(unnest(proconfig), '=')) [1] as param,\n (string_to_array(unnest(proconfig), '=')) [2] as value\n from\n functions\n ) as t\n group by\n oid\n ) f_config on f_config.oid = f.oid\n left join (\n select\n oid,\n jsonb_agg(\n jsonb_build_object(\n 'mode',\n t2.mode,\n 'name',\n name,\n 'type_id',\n type_id,\n 'has_default',\n has_default\n )\n ) as args\n from\n (\n select\n oid,\n unnest(arg_modes) as mode,\n unnest(arg_names) as name,\n unnest(arg_types) :: int8 as type_id,\n unnest(arg_has_defaults) as has_default\n from\n functions\n ) as t1,\n lateral (\n select\n case\n when t1.mode = 'i' then 'in'\n when t1.mode = 'o' then 'out'\n when t1.mode = 'b' then 'inout'\n when t1.mode = 'v' then 'variadic'\n else 'table'\n end as mode\n ) as t2\n group by\n t1.oid\n ) f_args on f_args.oid = f.oid;", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id!", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "schema!", - "type_info": "Name" - }, - { - "ordinal": 2, - "name": "name!", - "type_info": "Name" - }, - { - "ordinal": 3, - "name": "language!", - "type_info": "Name" - }, - { - "ordinal": 4, - "name": "body", - "type_info": "Text" - }, - { - "ordinal": 5, - "name": "definition", - "type_info": "Text" - }, - { - "ordinal": 6, - "name": "args", - "type_info": "Jsonb" - }, - { - "ordinal": 7, - "name": "argument_types", - "type_info": "Text" - }, - { - "ordinal": 8, - "name": "identity_argument_types", - "type_info": "Text" - }, - { - "ordinal": 9, - "name": "return_type_id!", - "type_info": "Int8" - }, - { - "ordinal": 10, - "name": "return_type!", - "type_info": "Text" - }, - { - "ordinal": 11, - "name": "return_type_relation_id", - "type_info": "Int8" - }, - { - "ordinal": 12, - "name": "is_set_returning_function", - "type_info": "Bool" - }, - { - "ordinal": 13, - "name": "behavior", - "type_info": "Text" - }, - { - "ordinal": 14, - "name": "security_definer", - "type_info": "Bool" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null, - true, - false, - true, - null, - null, - null, - null, - null, - null, - null, - null, - false, - null, - false - ] - }, - "hash": "64d9718b07516f3d2720cb7aa79e496f5337cadbad7a3fb03ccd3e3c21b71389" -} diff --git a/.sqlx/query-b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba.json b/.sqlx/query-b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba.json deleted file mode 100644 index dfc842b7..00000000 --- a/.sqlx/query-b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "select rolname from pg_catalog.pg_roles;", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "rolname", - "type_info": "Name" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true - ] - }, - "hash": "b0504a4340264403ad43d05c60d053db65ea6a7529e2cb97b2d3432a18aff6ba" -} diff --git a/crates/pgt_schema_cache/src/functions.rs b/crates/pgt_schema_cache/src/functions.rs index 5e40709f..4afaa76d 100644 --- a/crates/pgt_schema_cache/src/functions.rs +++ b/crates/pgt_schema_cache/src/functions.rs @@ -4,6 +4,33 @@ use sqlx::types::JsonValue; use crate::schema_cache::SchemaCacheItem; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)] +pub enum ProcKind { + #[default] + Function, + Aggregate, + Window, + Procedure, +} + +impl From for ProcKind { + fn from(value: char) -> Self { + match value { + 'f' => Self::Function, + 'p' => Self::Procedure, + 'w' => Self::Window, + 'a' => Self::Aggregate, + _ => unreachable!(), + } + } +} + +impl From for ProcKind { + fn from(value: i8) -> Self { + char::from(u8::try_from(value).unwrap()).into() + } +} + /// `Behavior` describes the characteristics of the function. Is it deterministic? Does it changed due to side effects, and if so, when? #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)] pub enum Behavior { @@ -72,6 +99,8 @@ pub struct Function { /// e.g. `plpgsql/sql` or `internal`. pub language: String, + pub kind: ProcKind, + /// The body of the function – the `declare [..] begin [..] end [..]` block.` Not set for internal functions. pub body: Option, @@ -88,10 +117,10 @@ pub struct Function { pub identity_argument_types: Option, /// An ID identifying the return type. For example, `2275` refers to `cstring`. 2278 refers to `void`. - pub return_type_id: i64, + pub return_type_id: Option, /// The return type, for example "text", "trigger", or "void". - pub return_type: String, + pub return_type: Option, /// If the return type is a composite type, this will point the matching entry's `oid` column in the `pg_class` table. `None` if the function does not return a composite type. pub return_type_relation_id: Option, @@ -115,3 +144,114 @@ impl SchemaCacheItem for Function { .await } } + +#[cfg(test)] +mod tests { + use sqlx::{Executor, PgPool}; + + use crate::{Behavior, SchemaCache, functions::ProcKind}; + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn loads(pool: PgPool) { + let setup = r#" + create table coos ( + id serial primary key, + name text + ); + + create or replace function my_cool_foo() + returns trigger + language plpgsql + security invoker + as $$ + begin + raise exception 'dont matter'; + end; + $$; + + create or replace procedure my_cool_proc() + language plpgsql + security invoker + as $$ + begin + raise exception 'dont matter'; + end; + $$; + + create or replace function string_concat_state( + state text, + value text, + separator text) + returns text + language plpgsql + as $$ + begin + if state is null then + return value; + else + return state || separator || value; + end if; + end; + $$; + + create aggregate string_concat(text, text) ( + sfunc = string_concat_state, + stype = text, + initcond = '' + ); + "#; + + pool.execute(setup).await.unwrap(); + + let cache = SchemaCache::load(&pool).await.unwrap(); + + // Find and check the function + let foo_fn = cache + .functions + .iter() + .find(|f| f.name == "my_cool_foo") + .unwrap(); + assert_eq!(foo_fn.schema, "public"); + assert_eq!(foo_fn.kind, ProcKind::Function); + assert_eq!(foo_fn.language, "plpgsql"); + assert_eq!(foo_fn.return_type.as_deref(), Some("trigger")); + assert!(!foo_fn.security_definer); + assert_eq!(foo_fn.behavior, Behavior::Volatile); + + // Find and check the procedure + let proc_fn = cache + .functions + .iter() + .find(|f| f.name == "my_cool_proc") + .unwrap(); + + assert_eq!(proc_fn.kind, ProcKind::Procedure); + assert_eq!(proc_fn.language, "plpgsql"); + assert!(!proc_fn.security_definer); + + // Find and check the aggregate + let agg_fn = cache + .functions + .iter() + .find(|f| f.name == "string_concat") + .unwrap(); + assert_eq!(agg_fn.kind, ProcKind::Aggregate); + assert_eq!(agg_fn.language, "internal"); // Aggregates are often "internal" + // The return type should be text + assert_eq!(agg_fn.return_type.as_deref(), Some("text")); + + // Find and check the state function for the aggregate + let state_fn = cache + .functions + .iter() + .find(|f| f.name == "string_concat_state") + .unwrap(); + + assert_eq!(state_fn.kind, ProcKind::Function); + assert_eq!(state_fn.language, "plpgsql"); + assert_eq!(state_fn.return_type.as_deref(), Some("text")); + assert_eq!(state_fn.args.args.len(), 3); + let arg_names: Vec<_> = state_fn.args.args.iter().map(|a| a.name.as_str()).collect(); + assert_eq!(arg_names, &["state", "value", "separator"]); + } +} diff --git a/crates/pgt_schema_cache/src/queries/functions.sql b/crates/pgt_schema_cache/src/queries/functions.sql index f78ba91e..9be1992d 100644 --- a/crates/pgt_schema_cache/src/queries/functions.sql +++ b/crates/pgt_schema_cache/src/queries/functions.sql @@ -10,6 +10,7 @@ with functions as ( prolang, pronamespace, proconfig, + prokind, -- proargmodes is null when all arg modes are IN coalesce( p.proargmodes, @@ -27,21 +28,20 @@ with functions as ( ) ) as arg_names, -- proallargtypes is null when all arg modes are IN - coalesce(p.proallargtypes, p.proargtypes) as arg_types, + coalesce(p.proallargtypes, string_to_array(proargtypes::text, ' ')::int[]) as arg_types, array_cat( array_fill(false, array [pronargs - pronargdefaults]), array_fill(true, array [pronargdefaults]) ) as arg_has_defaults from pg_proc as p - where - p.prokind = 'f' ) select f.oid :: int8 as "id!", n.nspname as "schema!", f.proname as "name!", l.lanname as "language!", + f.prokind as "kind!", case when l.lanname = 'internal' then null else f.prosrc @@ -53,16 +53,16 @@ select coalesce(f_args.args, '[]') as args, nullif(pg_get_function_arguments(f.oid), '') as argument_types, nullif(pg_get_function_identity_arguments(f.oid), '') as identity_argument_types, - f.prorettype :: int8 as "return_type_id!", - pg_get_function_result(f.oid) as "return_type!", + f.prorettype :: int8 as return_type_id, + pg_get_function_result(f.oid) as return_type, nullif(rt.typrelid :: int8, 0) as return_type_relation_id, - f.proretset as is_set_returning_function, + f.proretset as "is_set_returning_function!", case when f.provolatile = 'i' then 'IMMUTABLE' when f.provolatile = 's' then 'STABLE' when f.provolatile = 'v' then 'VOLATILE' end as behavior, - f.prosecdef as security_definer + f.prosecdef as "security_definer!" from functions f left join pg_namespace n on f.pronamespace = n.oid @@ -106,12 +106,13 @@ from ( select oid, - unnest(arg_modes) as mode, - unnest(arg_names) as name, - unnest(arg_types) :: int8 as type_id, - unnest(arg_has_defaults) as has_default + arg_modes[i] as mode, + arg_names[i] as name, + arg_types[i] :: int8 as type_id, + arg_has_defaults[i] as has_default from - functions + functions, + pg_catalog.generate_subscripts(arg_names, 1) as i ) as t1, lateral ( select diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index aaa5a42a..a81a1ca9 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -97,7 +97,7 @@ export type DiagnosticTags = DiagnosticTag[]; /** * Serializable representation of a [Diagnostic](super::Diagnostic) advice -See the [Visitor] trait for additional documentation on all the supported advice types. +See the [Visitor] trait for additional documentation on all the supported advice types. */ export type Advice = | { log: [LogCategory, MarkupBuf] } @@ -202,7 +202,7 @@ export interface CompletionItem { /** * The text that the editor should fill in. If `None`, the `label` should be used. Tables, for example, might have different completion_texts: -label: "users", description: "Schema: auth", completion_text: "auth.users". +label: "users", description: "Schema: auth", completion_text: "auth.users". */ export interface CompletionText { is_snippet: boolean; @@ -355,7 +355,7 @@ export interface PartialVcsConfiguration { /** * The folder where we should check for VCS files. By default, we will use the same folder where `postgrestools.jsonc` was found. -If we can't find the configuration, it will attempt to use the current working directory. If no current working directory can't be found, we won't use the VCS integration, and a diagnostic will be emitted +If we can't find the configuration, it will attempt to use the current working directory. If no current working directory can't be found, we won't use the VCS integration, and a diagnostic will be emitted */ root?: string; /** From 4cb12dfc448e65930775afcd9a0d1a11b0e59c5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Tue, 10 Jun 2025 20:22:02 +0200 Subject: [PATCH 075/114] fix: broken text after receiving batched changes (#413) --- .../src/workspace/server/change.rs | 80 ++++++++++++++----- 1 file changed, 62 insertions(+), 18 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index c8799922..cdd5a569 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -65,6 +65,8 @@ impl Document { // when we recieive more than one change, we need to push back the changes based on the // total range of the previous ones. This is because the ranges are always related to the original state. + // BUT: only for the statement range changes, not for the text changes + // this is why we pass both varaints to apply_change let mut changes = Vec::new(); let mut offset: i64 = 0; @@ -86,9 +88,9 @@ impl Document { change }; - changes.extend(self.apply_change(adjusted_change)); + changes.extend(self.apply_change(adjusted_change, change)); - offset += change.change_size(); + offset += adjusted_change.change_size(); } self.version = change.version; @@ -240,9 +242,17 @@ impl Document { } /// Applies a single change to the document and returns the affected statements - fn apply_change(&mut self, change: &ChangeParams) -> Vec { + /// + /// * `change`: The range-adjusted change to use for statement changes + /// * `original_change`: The original change to use for text changes (yes, this is a bit confusing, and we might want to refactor this entire thing at some point.) + fn apply_change( + &mut self, + change: &ChangeParams, + original_change: &ChangeParams, + ) -> Vec { // if range is none, we have a full change if change.range.is_none() { + // doesnt matter what change since range is null return self.apply_full_change(change); } @@ -255,7 +265,7 @@ impl Document { let change_range = change.range.unwrap(); let previous_content = self.content.clone(); - let new_content = change.apply_to_text(&self.content); + let new_content = original_change.apply_to_text(&self.content); // we first need to determine the affected range and all affected statements, as well as // the index of the prev and the next statement, if any. The full affected range is the @@ -1560,28 +1570,29 @@ mod tests { fn multiple_deletions_at_once() { let path = PgTPath::new("test.sql"); - let mut doc = Document::new("\n\n\n\nALTER TABLE ONLY \"public\".\"sendout\"\n ADD CONSTRAINT \"sendout_organisation_id_fkey\" FOREIGN -KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDATE RESTRICT ON DELETE CASCADE;\n".to_string(), 0); + let mut doc = Document::new("ALTER TABLE ONLY public.omni_channel_message ADD CONSTRAINT omni_channel_message_organisation_id_fkey FOREIGN KEY (organisation_id) REFERENCES public.organisation(id) ON UPDATE RESTRICT ON DELETE CASCADE;".to_string(), 0); let change = ChangeFileParams { path: path.clone(), version: 1, changes: vec![ ChangeParams { - range: Some(TextRange::new(31.into(), 38.into())), - text: "te".to_string(), + range: Some(TextRange::new(60.into(), 80.into())), + text: "sendout".to_string(), }, ChangeParams { - range: Some(TextRange::new(60.into(), 67.into())), - text: "te".to_string(), + range: Some(TextRange::new(24.into(), 44.into())), + text: "sendout".to_string(), }, ], }; let changed = doc.apply_file_change(&change); - assert_eq!(doc.content, "\n\n\n\nALTER TABLE ONLY \"public\".\"te\"\n ADD CONSTRAINT \"te_organisation_id_fkey\" FOREIGN -KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDATE RESTRICT ON DELETE CASCADE;\n"); + assert_eq!( + doc.content, + "ALTER TABLE ONLY public.sendout ADD CONSTRAINT sendout_organisation_id_fkey FOREIGN KEY (organisation_id) REFERENCES public.organisation(id) ON UPDATE RESTRICT ON DELETE CASCADE;" + ); assert_eq!(changed.len(), 2); @@ -1592,19 +1603,18 @@ KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDA fn multiple_additions_at_once() { let path = PgTPath::new("test.sql"); - let mut doc = Document::new("\n\n\n\nALTER TABLE ONLY \"public\".\"sendout\"\n ADD CONSTRAINT \"sendout_organisation_id_fkey\" FOREIGN -KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDATE RESTRICT ON DELETE CASCADE;\n".to_string(), 0); + let mut doc = Document::new("ALTER TABLE ONLY public.sendout ADD CONSTRAINT sendout_organisation_id_fkey FOREIGN KEY (organisation_id) REFERENCES public.organisation(id) ON UPDATE RESTRICT ON DELETE CASCADE;".to_string(), 0); let change = ChangeFileParams { path: path.clone(), version: 1, changes: vec![ ChangeParams { - range: Some(TextRange::new(31.into(), 38.into())), + range: Some(TextRange::new(47.into(), 54.into())), text: "omni_channel_message".to_string(), }, ChangeParams { - range: Some(TextRange::new(60.into(), 67.into())), + range: Some(TextRange::new(24.into(), 31.into())), text: "omni_channel_message".to_string(), }, ], @@ -1612,8 +1622,10 @@ KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDA let changed = doc.apply_file_change(&change); - assert_eq!(doc.content, "\n\n\n\nALTER TABLE ONLY \"public\".\"omni_channel_message\"\n ADD CONSTRAINT \"omni_channel_message_organisation_id_fkey\" FOREIGN -KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDATE RESTRICT ON DELETE CASCADE;\n"); + assert_eq!( + doc.content, + "ALTER TABLE ONLY public.omni_channel_message ADD CONSTRAINT omni_channel_message_organisation_id_fkey FOREIGN KEY (organisation_id) REFERENCES public.organisation(id) ON UPDATE RESTRICT ON DELETE CASCADE;" + ); assert_eq!(changed.len(), 2); @@ -1663,6 +1675,38 @@ KEY (\"organisation_id\") REFERENCES \"public\".\"organisation\"(\"id\") ON UPDA assert_document_integrity(&doc); } + #[test] + fn test_content_out_of_sync() { + let path = PgTPath::new("test.sql"); + let initial_content = "select 1, 2, 2232231313393319 from unknown_users;\n"; + + let mut doc = Document::new(initial_content.to_string(), 0); + + let change1 = ChangeFileParams { + path: path.clone(), + version: 1, + changes: vec![ + ChangeParams { + range: Some(TextRange::new(29.into(), 29.into())), + text: "3".to_string(), + }, + ChangeParams { + range: Some(TextRange::new(30.into(), 30.into())), + text: "1".to_string(), + }, + ], + }; + + let _changes = doc.apply_file_change(&change1); + + assert_eq!( + doc.content, + "select 1, 2, 223223131339331931 from unknown_users;\n" + ); + + assert_document_integrity(&doc); + } + #[test] fn test_comments_only() { let path = PgTPath::new("test.sql"); From 224d7fd1d51993fe5152a229c0f3ad2ef755a5a8 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Thu, 12 Jun 2025 09:05:06 +0200 Subject: [PATCH 076/114] feat(completions): complete in WITH CHECK and USING clauses (#422) --- .../src/context/base_parser.rs | 175 +++++++++++----- crates/pgt_completions/src/context/mod.rs | 27 ++- .../src/context/policy_parser.rs | 187 +++++++++++++++++- .../pgt_completions/src/providers/columns.rs | 48 +++++ .../src/providers/functions.rs | 87 +++++++- .../src/relevance/filtering.rs | 24 ++- crates/pgt_completions/src/sanitization.rs | 28 ++- crates/pgt_schema_cache/src/lib.rs | 2 +- 8 files changed, 504 insertions(+), 74 deletions(-) diff --git a/crates/pgt_completions/src/context/base_parser.rs b/crates/pgt_completions/src/context/base_parser.rs index 93333679..83b31582 100644 --- a/crates/pgt_completions/src/context/base_parser.rs +++ b/crates/pgt_completions/src/context/base_parser.rs @@ -1,6 +1,5 @@ -use std::iter::Peekable; - use pgt_text_size::{TextRange, TextSize}; +use std::iter::Peekable; pub(crate) struct TokenNavigator { tokens: Peekable>, @@ -101,73 +100,139 @@ impl WordWithIndex { } } -/// Note: A policy name within quotation marks will be considered a single word. -pub(crate) fn sql_to_words(sql: &str) -> Result, String> { - let mut words = vec![]; - - let mut start_of_word: Option = None; - let mut current_word = String::new(); - let mut in_quotation_marks = false; - - for (current_position, current_char) in sql.char_indices() { - if (current_char.is_ascii_whitespace() || current_char == ';') - && !current_word.is_empty() - && start_of_word.is_some() - && !in_quotation_marks - { - words.push(WordWithIndex { - word: current_word, - start: start_of_word.unwrap(), - end: current_position, - }); - - current_word = String::new(); - start_of_word = None; - } else if (current_char.is_ascii_whitespace() || current_char == ';') - && current_word.is_empty() - { - // do nothing - } else if current_char == '"' && start_of_word.is_none() { - in_quotation_marks = true; - current_word.push(current_char); - start_of_word = Some(current_position); - } else if current_char == '"' && start_of_word.is_some() { - current_word.push(current_char); - in_quotation_marks = false; - } else if start_of_word.is_some() { - current_word.push(current_char) +pub(crate) struct SubStatementParser { + start_of_word: Option, + current_word: String, + in_quotation_marks: bool, + is_fn_call: bool, + words: Vec, +} + +impl SubStatementParser { + pub(crate) fn parse(sql: &str) -> Result, String> { + let mut parser = SubStatementParser { + start_of_word: None, + current_word: String::new(), + in_quotation_marks: false, + is_fn_call: false, + words: vec![], + }; + + parser.collect_words(sql); + + if parser.in_quotation_marks { + Err("String was not closed properly.".into()) } else { - start_of_word = Some(current_position); - current_word.push(current_char); + Ok(parser.words) } } - if let Some(start_of_word) = start_of_word { - if !current_word.is_empty() { - words.push(WordWithIndex { - word: current_word, - start: start_of_word, - end: sql.len(), - }); + pub fn collect_words(&mut self, sql: &str) { + for (pos, c) in sql.char_indices() { + match c { + '"' => { + if !self.has_started_word() { + self.in_quotation_marks = true; + self.add_char(c); + self.start_word(pos); + } else { + self.in_quotation_marks = false; + self.add_char(c); + } + } + + '(' => { + if !self.has_started_word() { + self.push_char_as_word(c, pos); + } else { + self.add_char(c); + self.is_fn_call = true; + } + } + + ')' => { + if self.is_fn_call { + self.add_char(c); + self.is_fn_call = false; + } else { + if self.has_started_word() { + self.push_word(pos); + } + self.push_char_as_word(c, pos); + } + } + + _ => { + if c.is_ascii_whitespace() || c == ';' { + if self.in_quotation_marks { + self.add_char(c); + } else if !self.is_empty() && self.has_started_word() { + self.push_word(pos); + } + } else if self.has_started_word() { + self.add_char(c); + } else { + self.start_word(pos); + self.add_char(c) + } + } + } + } + + if self.has_started_word() && !self.is_empty() { + self.push_word(sql.len()) } } - if in_quotation_marks { - Err("String was not closed properly.".into()) - } else { - Ok(words) + fn is_empty(&self) -> bool { + self.current_word.is_empty() + } + + fn add_char(&mut self, c: char) { + self.current_word.push(c) + } + + fn start_word(&mut self, pos: usize) { + self.start_of_word = Some(pos); + } + + fn has_started_word(&self) -> bool { + self.start_of_word.is_some() + } + + fn push_char_as_word(&mut self, c: char, pos: usize) { + self.words.push(WordWithIndex { + word: String::from(c), + start: pos, + end: pos + 1, + }); + } + + fn push_word(&mut self, current_position: usize) { + self.words.push(WordWithIndex { + word: self.current_word.clone(), + start: self.start_of_word.unwrap(), + end: current_position, + }); + self.current_word = String::new(); + self.start_of_word = None; } } +/// Note: A policy name within quotation marks will be considered a single word. +pub(crate) fn sql_to_words(sql: &str) -> Result, String> { + SubStatementParser::parse(sql) +} + #[cfg(test)] mod tests { - use crate::context::base_parser::{WordWithIndex, sql_to_words}; + use crate::context::base_parser::{SubStatementParser, WordWithIndex, sql_to_words}; #[test] fn determines_positions_correctly() { - let query = "\ncreate policy \"my cool pol\"\n\ton auth.users\n\tas permissive\n\tfor select\n\t\tto public\n\t\tusing (true);".to_string(); + let query = "\ncreate policy \"my cool pol\"\n\ton auth.users\n\tas permissive\n\tfor select\n\t\tto public\n\t\tusing (auth.uid());".to_string(); - let words = sql_to_words(query.as_str()).unwrap(); + let words = SubStatementParser::parse(query.as_str()).unwrap(); assert_eq!(words[0], to_word("create", 1, 7)); assert_eq!(words[1], to_word("policy", 8, 14)); @@ -181,7 +246,9 @@ mod tests { assert_eq!(words[9], to_word("to", 73, 75)); assert_eq!(words[10], to_word("public", 78, 84)); assert_eq!(words[11], to_word("using", 87, 92)); - assert_eq!(words[12], to_word("(true)", 93, 99)); + assert_eq!(words[12], to_word("(", 93, 94)); + assert_eq!(words[13], to_word("auth.uid()", 94, 104)); + assert_eq!(words[14], to_word(")", 104, 105)); } #[test] diff --git a/crates/pgt_completions/src/context/mod.rs b/crates/pgt_completions/src/context/mod.rs index 996ec6be..01e563b0 100644 --- a/crates/pgt_completions/src/context/mod.rs +++ b/crates/pgt_completions/src/context/mod.rs @@ -47,6 +47,15 @@ pub enum WrappingClause<'a> { SetStatement, AlterRole, DropRole, + + /// `PolicyCheck` refers to either the `WITH CHECK` or the `USING` clause + /// in a policy statement. + /// ```sql + /// CREATE POLICY "my pol" ON PUBLIC.USERS + /// FOR SELECT + /// USING (...) -- this one! + /// ``` + PolicyCheck, } #[derive(PartialEq, Eq, Hash, Debug, Clone)] @@ -78,6 +87,7 @@ pub(crate) enum NodeUnderCursor<'a> { text: NodeText, range: TextRange, kind: String, + previous_node_kind: Option, }, } @@ -222,6 +232,7 @@ impl<'a> CompletionContext<'a> { text: revoke_context.node_text.into(), range: revoke_context.node_range, kind: revoke_context.node_kind.clone(), + previous_node_kind: None, }); if revoke_context.node_kind == "revoke_table" { @@ -249,6 +260,7 @@ impl<'a> CompletionContext<'a> { text: grant_context.node_text.into(), range: grant_context.node_range, kind: grant_context.node_kind.clone(), + previous_node_kind: None, }); if grant_context.node_kind == "grant_table" { @@ -276,6 +288,7 @@ impl<'a> CompletionContext<'a> { text: policy_context.node_text.into(), range: policy_context.node_range, kind: policy_context.node_kind.clone(), + previous_node_kind: Some(policy_context.previous_node_kind), }); if policy_context.node_kind == "policy_table" { @@ -295,7 +308,13 @@ impl<'a> CompletionContext<'a> { } "policy_role" => Some(WrappingClause::ToRoleAssignment), "policy_table" => Some(WrappingClause::From), - _ => None, + _ => { + if policy_context.in_check_or_using_clause { + Some(WrappingClause::PolicyCheck) + } else { + None + } + } }; } @@ -785,7 +804,11 @@ impl<'a> CompletionContext<'a> { .is_some_and(|sib| kinds.contains(&sib.kind())) } - NodeUnderCursor::CustomNode { .. } => false, + NodeUnderCursor::CustomNode { + previous_node_kind, .. + } => previous_node_kind + .as_ref() + .is_some_and(|k| kinds.contains(&k.as_str())), } }) } diff --git a/crates/pgt_completions/src/context/policy_parser.rs b/crates/pgt_completions/src/context/policy_parser.rs index 58619502..bcc60499 100644 --- a/crates/pgt_completions/src/context/policy_parser.rs +++ b/crates/pgt_completions/src/context/policy_parser.rs @@ -22,6 +22,10 @@ pub(crate) struct PolicyContext { pub node_text: String, pub node_range: TextRange, pub node_kind: String, + pub previous_node_text: String, + pub previous_node_range: TextRange, + pub previous_node_kind: String, + pub in_check_or_using_clause: bool, } /// Simple parser that'll turn a policy-related statement into a context object required for @@ -32,6 +36,7 @@ pub(crate) struct PolicyParser { navigator: TokenNavigator, context: PolicyContext, cursor_position: usize, + in_check_or_using_clause: bool, } impl CompletionStatementParser for PolicyParser { @@ -63,6 +68,7 @@ impl CompletionStatementParser for PolicyParser { navigator: tokens.into(), context: PolicyContext::default(), cursor_position, + in_check_or_using_clause: false, } } } @@ -73,6 +79,8 @@ impl PolicyParser { return; } + self.context.in_check_or_using_clause = self.in_check_or_using_clause; + let previous = self.navigator.previous_token.take().unwrap(); match previous @@ -84,6 +92,8 @@ impl PolicyParser { self.context.node_range = token.get_range(); self.context.node_kind = "policy_name".into(); self.context.node_text = token.get_word(); + + self.context.previous_node_kind = "keyword_policy".into(); } "on" => { if token.get_word_without_quotes().contains('.') { @@ -112,17 +122,35 @@ impl PolicyParser { self.context.node_text = token.get_word(); self.context.node_kind = "policy_table".into(); } + + self.context.previous_node_kind = "keyword_on".into(); } "to" => { self.context.node_range = token.get_range(); self.context.node_kind = "policy_role".into(); self.context.node_text = token.get_word(); + + self.context.previous_node_kind = "keyword_to".into(); } - _ => { + + other => { self.context.node_range = token.get_range(); self.context.node_text = token.get_word(); + + self.context.previous_node_range = previous.get_range(); + self.context.previous_node_text = previous.get_word(); + + match other { + "(" | "=" => self.context.previous_node_kind = other.into(), + "and" => self.context.previous_node_kind = "keyword_and".into(), + + _ => self.context.previous_node_kind = "".into(), + } } } + + self.context.previous_node_range = previous.get_range(); + self.context.previous_node_text = previous.get_word(); } fn handle_token(&mut self, token: WordWithIndex) { @@ -142,6 +170,13 @@ impl PolicyParser { } "on" => self.table_with_schema(), + "(" if self.navigator.prev_matches(&["using", "check"]) => { + self.in_check_or_using_clause = true; + } + ")" => { + self.in_check_or_using_clause = false; + } + // skip the "to" so we don't parse it as the TO rolename when it's under the cursor "rename" if self.navigator.next_matches(&["to"]) => { self.navigator.advance(); @@ -218,7 +253,11 @@ mod tests { statement_kind: PolicyStmtKind::Create, node_text: "REPLACED_TOKEN".into(), node_range: TextRange::new(TextSize::new(25), TextSize::new(39)), - node_kind: "policy_name".into() + node_kind: "policy_name".into(), + in_check_or_using_clause: false, + previous_node_kind: "keyword_policy".into(), + previous_node_range: TextRange::new(18.into(), 24.into()), + previous_node_text: "policy".into(), } ); @@ -241,6 +280,10 @@ mod tests { node_text: "REPLACED_TOKEN".into(), node_kind: "".into(), node_range: TextRange::new(TextSize::new(42), TextSize::new(56)), + in_check_or_using_clause: false, + previous_node_kind: "".into(), + previous_node_range: TextRange::new(25.into(), 41.into()), + previous_node_text: "\"my cool policy\"".into(), } ); @@ -263,6 +306,10 @@ mod tests { node_text: "REPLACED_TOKEN".into(), node_kind: "policy_table".into(), node_range: TextRange::new(TextSize::new(45), TextSize::new(59)), + in_check_or_using_clause: false, + previous_node_kind: "keyword_on".into(), + previous_node_range: TextRange::new(42.into(), 44.into()), + previous_node_text: "on".into(), } ); @@ -285,6 +332,10 @@ mod tests { node_text: "REPLACED_TOKEN".into(), node_kind: "policy_table".into(), node_range: TextRange::new(TextSize::new(50), TextSize::new(64)), + in_check_or_using_clause: false, + previous_node_kind: "keyword_on".into(), + previous_node_range: TextRange::new(42.into(), 44.into()), + previous_node_text: "on".into(), } ); @@ -308,6 +359,10 @@ mod tests { node_text: "REPLACED_TOKEN".into(), node_kind: "".into(), node_range: TextRange::new(TextSize::new(72), TextSize::new(86)), + in_check_or_using_clause: false, + previous_node_kind: "".into(), + previous_node_range: TextRange::new(69.into(), 71.into()), + previous_node_text: "as".into(), } ); @@ -332,6 +387,10 @@ mod tests { node_text: "REPLACED_TOKEN".into(), node_kind: "".into(), node_range: TextRange::new(TextSize::new(95), TextSize::new(109)), + in_check_or_using_clause: false, + previous_node_kind: "".into(), + previous_node_range: TextRange::new(72.into(), 82.into()), + previous_node_text: "permissive".into(), } ); @@ -356,6 +415,10 @@ mod tests { node_text: "REPLACED_TOKEN".into(), node_kind: "policy_role".into(), node_range: TextRange::new(TextSize::new(98), TextSize::new(112)), + in_check_or_using_clause: false, + previous_node_kind: "keyword_to".into(), + previous_node_range: TextRange::new(95.into(), 97.into()), + previous_node_text: "to".into(), } ); } @@ -383,7 +446,11 @@ mod tests { statement_kind: PolicyStmtKind::Create, node_text: "REPLACED_TOKEN".into(), node_range: TextRange::new(TextSize::new(57), TextSize::new(71)), - node_kind: "policy_table".into() + node_kind: "policy_table".into(), + in_check_or_using_clause: false, + previous_node_kind: "keyword_on".into(), + previous_node_range: TextRange::new(54.into(), 56.into()), + previous_node_text: "on".into(), } ) } @@ -411,7 +478,11 @@ mod tests { statement_kind: PolicyStmtKind::Create, node_text: "REPLACED_TOKEN".into(), node_range: TextRange::new(TextSize::new(62), TextSize::new(76)), - node_kind: "policy_table".into() + node_kind: "policy_table".into(), + in_check_or_using_clause: false, + previous_node_kind: "keyword_on".into(), + previous_node_range: TextRange::new(54.into(), 56.into()), + previous_node_text: "on".into(), } ) } @@ -436,7 +507,11 @@ mod tests { statement_kind: PolicyStmtKind::Drop, node_text: "REPLACED_TOKEN".into(), node_range: TextRange::new(TextSize::new(23), TextSize::new(37)), - node_kind: "policy_name".into() + node_kind: "policy_name".into(), + in_check_or_using_clause: false, + previous_node_kind: "keyword_policy".into(), + previous_node_range: TextRange::new(16.into(), 22.into()), + previous_node_text: "policy".into(), } ); @@ -459,7 +534,11 @@ mod tests { statement_kind: PolicyStmtKind::Drop, node_text: "\"REPLACED_TOKEN\"".into(), node_range: TextRange::new(TextSize::new(23), TextSize::new(39)), - node_kind: "policy_name".into() + node_kind: "policy_name".into(), + in_check_or_using_clause: false, + previous_node_kind: "keyword_policy".into(), + previous_node_range: TextRange::new(16.into(), 22.into()), + previous_node_text: "policy".into(), } ); } @@ -477,4 +556,100 @@ mod tests { assert_eq!(context, PolicyContext::default()); } + + #[test] + fn correctly_determines_we_are_inside_checks() { + { + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" + on auth.users + to all + using (id = {}) + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some(r#""my cool policy""#.into()), + table_name: Some("users".into()), + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(112), TextSize::new(126)), + node_kind: "".into(), + in_check_or_using_clause: true, + previous_node_kind: "=".into(), + previous_node_range: TextRange::new(110.into(), 111.into()), + previous_node_text: "=".into(), + } + ); + } + + { + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" + on auth.users + to all + using ({} + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some(r#""my cool policy""#.into()), + table_name: Some("users".into()), + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(106), TextSize::new(120)), + node_kind: "".into(), + in_check_or_using_clause: true, + previous_node_kind: "(".into(), + previous_node_range: TextRange::new(105.into(), 106.into()), + previous_node_text: "(".into(), + } + ) + } + + { + let (pos, query) = with_pos(format!( + r#" + create policy "my cool policy" + on auth.users + to all + with check ({} + "#, + CURSOR_POS + )); + + let context = PolicyParser::get_context(query.as_str(), pos); + + assert_eq!( + context, + PolicyContext { + policy_name: Some(r#""my cool policy""#.into()), + table_name: Some("users".into()), + schema_name: Some("auth".into()), + statement_kind: PolicyStmtKind::Create, + node_text: "REPLACED_TOKEN".into(), + node_range: TextRange::new(TextSize::new(111), TextSize::new(125)), + node_kind: "".into(), + in_check_or_using_clause: true, + previous_node_kind: "(".into(), + previous_node_range: TextRange::new(110.into(), 111.into()), + previous_node_text: "(".into(), + } + ) + } + } } diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 4299973b..04d0af65 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -817,4 +817,52 @@ mod tests { .await; } } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn suggests_columns_policy_using_clause(pool: PgPool) { + let setup = r#" + create table instruments ( + id bigint primary key generated always as identity, + name text not null, + z text, + created_at timestamp with time zone default now() + ); + "#; + + pool.execute(setup).await.unwrap(); + + let col_queries = vec![ + format!( + r#"create policy "my_pol" on public.instruments for select using ({})"#, + CURSOR_POS + ), + format!( + r#"create policy "my_pol" on public.instruments for insert with check ({})"#, + CURSOR_POS + ), + format!( + r#"create policy "my_pol" on public.instruments for update using (id = 1 and {})"#, + CURSOR_POS + ), + format!( + r#"create policy "my_pol" on public.instruments for insert with check (id = 1 and {})"#, + CURSOR_POS + ), + ]; + + for query in col_queries { + assert_complete_results( + query.as_str(), + vec![ + CompletionAssertion::Label("created_at".into()), + CompletionAssertion::Label("id".into()), + CompletionAssertion::Label("name".into()), + CompletionAssertion::Label("z".into()), + ], + None, + &pool, + ) + .await; + } + } } diff --git a/crates/pgt_completions/src/providers/functions.rs b/crates/pgt_completions/src/providers/functions.rs index 2bc4f331..615e4f95 100644 --- a/crates/pgt_completions/src/providers/functions.rs +++ b/crates/pgt_completions/src/providers/functions.rs @@ -65,11 +65,14 @@ fn get_completion_text(ctx: &CompletionContext, func: &Function) -> CompletionTe #[cfg(test)] mod tests { - use sqlx::PgPool; + use sqlx::{Executor, PgPool}; use crate::{ CompletionItem, CompletionItemKind, complete, - test_helper::{CURSOR_POS, get_test_deps, get_test_params}, + test_helper::{ + CURSOR_POS, CompletionAssertion, assert_complete_results, get_test_deps, + get_test_params, + }, }; #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] @@ -201,4 +204,84 @@ mod tests { assert_eq!(label, "cool"); assert_eq!(kind, CompletionItemKind::Function); } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn only_allows_functions_and_procedures_in_policy_checks(pool: PgPool) { + let setup = r#" + create table coos ( + id serial primary key, + name text + ); + + create or replace function my_cool_foo() + returns trigger + language plpgsql + security invoker + as $$ + begin + raise exception 'dont matter'; + end; + $$; + + create or replace procedure my_cool_proc() + language plpgsql + security invoker + as $$ + begin + raise exception 'dont matter'; + end; + $$; + + create or replace function string_concat_state( + state text, + value text, + separator text) + returns text + language plpgsql + as $$ + begin + if state is null then + return value; + else + return state || separator || value; + end if; + end; + $$; + + create aggregate string_concat(text, text) ( + sfunc = string_concat_state, + stype = text, + initcond = '' + ); + "#; + + pool.execute(setup).await.unwrap(); + + let query = format!( + r#"create policy "my_pol" on public.instruments for insert with check (id = {})"#, + CURSOR_POS + ); + + assert_complete_results( + query.as_str(), + vec![ + CompletionAssertion::LabelNotExists("string_concat".into()), + CompletionAssertion::LabelAndKind( + "my_cool_foo".into(), + CompletionItemKind::Function, + ), + CompletionAssertion::LabelAndKind( + "my_cool_proc".into(), + CompletionItemKind::Function, + ), + CompletionAssertion::LabelAndKind( + "string_concat_state".into(), + CompletionItemKind::Function, + ), + ], + None, + &pool, + ) + .await; + } } diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index a020d2e8..beea6ddb 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -1,3 +1,5 @@ +use pgt_schema_cache::ProcKind; + use crate::context::{CompletionContext, NodeUnderCursor, WrappingClause, WrappingNode}; use super::CompletionRelevanceData; @@ -137,17 +139,27 @@ impl CompletionFilter<'_> { && ctx.parent_matches_one_of_kind(&["field"])) } + WrappingClause::PolicyCheck => { + ctx.before_cursor_matches_kind(&["keyword_and", "("]) + } + _ => false, } } - CompletionRelevanceData::Function(_) => matches!( - clause, + CompletionRelevanceData::Function(f) => match clause { WrappingClause::From - | WrappingClause::Select - | WrappingClause::Where - | WrappingClause::Join { .. } - ), + | WrappingClause::Select + | WrappingClause::Where + | WrappingClause::Join { .. } => true, + + WrappingClause::PolicyCheck => { + ctx.before_cursor_matches_kind(&["="]) + && matches!(f.kind, ProcKind::Function | ProcKind::Procedure) + } + + _ => false, + }, CompletionRelevanceData::Schema(_) => match clause { WrappingClause::Select diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index ddc9563e..bf4d9816 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -257,10 +257,15 @@ fn cursor_between_parentheses(sql: &str, position: TextSize) -> bool { .find(|c| !c.is_whitespace()) .unwrap_or_default(); - let before_matches = before == ',' || before == '('; - let after_matches = after == ',' || after == ')'; + // (.. and |) + let after_and_keyword = &sql[position.saturating_sub(4)..position] == "and " && after == ')'; + let after_eq_sign = before == '=' && after == ')'; - before_matches && after_matches + let head_of_list = before == '(' && after == ','; + let end_of_list = before == ',' && after == ')'; + let between_list_items = before == ',' && after == ','; + + head_of_list || end_of_list || between_list_items || after_and_keyword || after_eq_sign } #[cfg(test)] @@ -444,5 +449,22 @@ mod tests { "insert into instruments (name) values (a_function(name, ))", TextSize::new(56) )); + + // will sanitize after = + assert!(cursor_between_parentheses( + // create policy my_pol on users using (id = |), + "create policy my_pol on users using (id = )", + TextSize::new(42) + )); + + // will sanitize after and + assert!(cursor_between_parentheses( + // create policy my_pol on users using (id = 1 and |), + "create policy my_pol on users using (id = 1 and )", + TextSize::new(48) + )); + + // does not break if sql is really short + assert!(!cursor_between_parentheses("(a)", TextSize::new(2))); } } diff --git a/crates/pgt_schema_cache/src/lib.rs b/crates/pgt_schema_cache/src/lib.rs index 9beb2f8a..b67f9412 100644 --- a/crates/pgt_schema_cache/src/lib.rs +++ b/crates/pgt_schema_cache/src/lib.rs @@ -14,7 +14,7 @@ mod types; mod versions; pub use columns::*; -pub use functions::{Behavior, Function, FunctionArg, FunctionArgs}; +pub use functions::{Behavior, Function, FunctionArg, FunctionArgs, ProcKind}; pub use policies::{Policy, PolicyCommand}; pub use roles::*; pub use schema_cache::SchemaCache; From a621abcc66bfa300cbfb6d115b28d4027d629df0 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 14 Jun 2025 12:07:54 +0200 Subject: [PATCH 077/114] fix: release pipeline points at correct schema.json (#427) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 934edba9..96760298 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -152,7 +152,7 @@ jobs: tag_name: ${{ steps.create_changelog.outputs.version }} files: | postgrestools_* - docs/schemas/latest/schema.json + docs/schema.json fail_on_unmatched_files: true draft: true From 31e6ddff9a59a195a27601da5b7840d57deca788 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Mon, 16 Jun 2025 17:48:36 +0200 Subject: [PATCH 078/114] fix: batched changes (#428) --- crates/pgt_lsp/tests/server.rs | 136 ++++++++++++++++++ .../src/workspace/server/change.rs | 74 +++++----- 2 files changed, 172 insertions(+), 38 deletions(-) diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 438c7298..96ff566c 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -1542,3 +1542,139 @@ async fn extends_config(test_db: PgPool) -> Result<()> { Ok(()) } + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_multiple_content_changes_single_request(test_db: PgPool) -> Result<()> { + let factory = ServerFactory::default(); + let mut fs = MemoryFileSystem::default(); + + let setup = r#" + create table public.campaign_contact_list ( + id serial primary key, + contact_list_id integer + ); + + create table public.contact_list ( + id serial primary key, + name varchar(255) + ); + + create table public.journey_node_contact_list ( + id serial primary key, + contact_list_id integer + ); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + fs.insert( + url!("postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf).unwrap(), + ); + + let (service, client) = factory + .create_with_fs(None, DynRef::Owned(Box::new(fs))) + .into_inner(); + + let (stream, sink) = client.split(); + let mut server = Server::new(service); + + let (sender, mut receiver) = channel(CHANNEL_BUFFER_SIZE); + let reader = tokio::spawn(client_handler(stream, sink, sender)); + + server.initialize().await?; + server.initialized().await?; + + server.load_configuration().await?; + + // Open document with initial content that matches the log trace + let initial_content = r#" + + + +ALTER TABLE ONLY "public"."campaign_contact_list" + ADD CONSTRAINT "campaign_contact_list_contact_list_id_fkey" FOREIGN KEY ("contact_list_id") REFERENCES "public"."contact_list"("id") ON UPDATE RESTRICT ON DELETE CASCADE; +"#; + + server.open_document(initial_content).await?; + + // Apply multiple content changes in a single request, similar to the log trace + // This simulates changing "campaign" to "journey_node" in two places simultaneously + server + .change_document( + 4, + vec![ + // First change: line 4, character 27-35 (changing "campaign" to "journey_node") + TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 27, + }, + end: Position { + line: 4, + character: 35, + }, + }), + range_length: Some(8), + text: "journey_node".to_string(), + }, + // Second change: line 5, character 20-28 (changing "campaign" to "journey_node") + TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 5, + character: 20, + }, + end: Position { + line: 5, + character: 28, + }, + }), + range_length: Some(8), + text: "journey_node".to_string(), + }, + ], + ) + .await?; + + // make sure there is no diagnostics + let notification = tokio::time::timeout(Duration::from_secs(2), async { + loop { + match receiver.next().await { + Some(ServerNotification::PublishDiagnostics(msg)) => { + if !msg.diagnostics.is_empty() { + return true; + } + } + _ => continue, + } + } + }) + .await + .is_ok(); + + assert!(!notification, "did not expect diagnostics"); + + server.shutdown().await?; + reader.abort(); + + Ok(()) +} diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index cdd5a569..62e3da03 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -69,28 +69,19 @@ impl Document { // this is why we pass both varaints to apply_change let mut changes = Vec::new(); - let mut offset: i64 = 0; - - for change in &change.changes { - let adjusted_change = if offset != 0 && change.range.is_some() { - &ChangeParams { - text: change.text.clone(), - range: change.range.map(|range| { - let start = u32::from(range.start()); - let end = u32::from(range.end()); - TextRange::new( - TextSize::from((start as i64 + offset).try_into().unwrap_or(0)), - TextSize::from((end as i64 + offset).try_into().unwrap_or(0)), - ) - }), - } - } else { - change - }; - - changes.extend(self.apply_change(adjusted_change, change)); - - offset += adjusted_change.change_size(); + let mut change_indices: Vec = (0..change.changes.len()).collect(); + change_indices.sort_by(|&a, &b| { + match (change.changes[a].range, change.changes[b].range) { + (Some(range_a), Some(range_b)) => range_b.start().cmp(&range_a.start()), + (Some(_), None) => std::cmp::Ordering::Greater, // full changes will never be sent in a batch so this does not matter + (None, Some(_)) => std::cmp::Ordering::Less, + (None, None) => std::cmp::Ordering::Equal, + } + }); + + // Sort changes by start position and process from last to first to avoid position invalidation + for &idx in &change_indices { + changes.extend(self.apply_change(&change.changes[idx])); } self.version = change.version; @@ -245,11 +236,7 @@ impl Document { /// /// * `change`: The range-adjusted change to use for statement changes /// * `original_change`: The original change to use for text changes (yes, this is a bit confusing, and we might want to refactor this entire thing at some point.) - fn apply_change( - &mut self, - change: &ChangeParams, - original_change: &ChangeParams, - ) -> Vec { + fn apply_change(&mut self, change: &ChangeParams) -> Vec { // if range is none, we have a full change if change.range.is_none() { // doesnt matter what change since range is null @@ -265,7 +252,7 @@ impl Document { let change_range = change.range.unwrap(); let previous_content = self.content.clone(); - let new_content = original_change.apply_to_text(&self.content); + let new_content = change.apply_to_text(&self.content); // we first need to determine the affected range and all affected statements, as well as // the index of the prev and the next statement, if any. The full affected range is the @@ -1676,9 +1663,15 @@ mod tests { } #[test] - fn test_content_out_of_sync() { + fn test_another_issue() { let path = PgTPath::new("test.sql"); - let initial_content = "select 1, 2, 2232231313393319 from unknown_users;\n"; + let initial_content = r#" + + + +ALTER TABLE ONLY "public"."campaign_contact_list" + ADD CONSTRAINT "campaign_contact_list_contact_list_id_fkey" FOREIGN KEY ("contact_list_id") REFERENCES "public"."contact_list"("id") ON UPDATE RESTRICT ON DELETE CASCADE; +"#; let mut doc = Document::new(initial_content.to_string(), 0); @@ -1687,22 +1680,27 @@ mod tests { version: 1, changes: vec![ ChangeParams { - range: Some(TextRange::new(29.into(), 29.into())), - text: "3".to_string(), + range: Some(TextRange::new(31.into(), 39.into())), + text: "journey_node".to_string(), }, ChangeParams { - range: Some(TextRange::new(30.into(), 30.into())), - text: "1".to_string(), + range: Some(TextRange::new(74.into(), 82.into())), + text: "journey_node".to_string(), }, ], }; let _changes = doc.apply_file_change(&change1); - assert_eq!( - doc.content, - "select 1, 2, 223223131339331931 from unknown_users;\n" - ); + let expected_content = r#" + + + +ALTER TABLE ONLY "public"."journey_node_contact_list" + ADD CONSTRAINT "journey_node_contact_list_contact_list_id_fkey" FOREIGN KEY ("contact_list_id") REFERENCES "public"."contact_list"("id") ON UPDATE RESTRICT ON DELETE CASCADE; +"#; + + assert_eq!(doc.content, expected_content); assert_document_integrity(&doc); } From e24bacd7b0ad74ad3b54f82de2e996d9b10e6f8c Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Thu, 19 Jun 2025 11:34:03 +0200 Subject: [PATCH 079/114] feat(linting): customize severity per rule (#430) --- Cargo.lock | 1 + crates/pgt_analyse/src/macros.rs | 5 +- crates/pgt_analyse/src/rule.rs | 12 ++- crates/pgt_analyser/CONTRIBUTING.md | 9 +- crates/pgt_analyser/Cargo.toml | 9 +- crates/pgt_analyser/src/lint/safety.rs | 4 +- .../src/lint/safety/adding_required_field.rs | 2 + .../src/lint/safety/ban_drop_column.rs | 2 + .../src/lint/safety/ban_drop_database.rs | 39 +++++++++ .../src/lint/safety/ban_drop_not_null.rs | 2 + .../src/lint/safety/ban_drop_table.rs | 2 + .../src/lint/safety/ban_truncate_cascade.rs | 51 ++++++++++++ crates/pgt_analyser/src/options.rs | 4 + .../specs/safety/banDropDatabase/basic.sql | 2 + .../safety/banDropDatabase/basic.sql.snap | 16 ++++ .../specs/safety/banTruncateCascade/basic.sql | 2 + .../safety/banTruncateCascade/basic.sql.snap | 16 ++++ .../src/analyser/linter/rules.rs | 83 +++++++++++++------ crates/pgt_diagnostics/src/diagnostic.rs | 3 +- .../src/categories.rs | 2 + crates/pgt_workspace/src/settings.rs | 10 +-- crates/pgt_workspace/src/workspace/server.rs | 51 +++++++----- .../src/workspace/server/annotation.rs | 6 +- docs/rule_sources.md | 2 + docs/rules.md | 2 + docs/rules/ban-drop-column.md | 2 +- docs/rules/ban-drop-database.md | 28 +++++++ docs/rules/ban-drop-not-null.md | 2 +- docs/rules/ban-drop-table.md | 2 +- docs/rules/ban-truncate-cascade.md | 40 +++++++++ docs/schema.json | 22 +++++ justfile | 4 +- .../backend-jsonrpc/src/workspace.ts | 13 ++- xtask/codegen/Cargo.toml | 1 + xtask/codegen/src/generate_configuration.rs | 53 ++++++------ .../codegen/src/generate_new_analyser_rule.rs | 21 ++++- xtask/codegen/src/lib.rs | 5 ++ xtask/codegen/src/main.rs | 3 +- 38 files changed, 435 insertions(+), 98 deletions(-) create mode 100644 crates/pgt_analyser/src/lint/safety/ban_drop_database.rs create mode 100644 crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs create mode 100644 crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql create mode 100644 crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql.snap create mode 100644 crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql create mode 100644 crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql.snap create mode 100644 docs/rules/ban-drop-database.md create mode 100644 docs/rules/ban-truncate-cascade.md diff --git a/Cargo.lock b/Cargo.lock index 875d4d9f..41f807d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5128,6 +5128,7 @@ dependencies = [ "bpaf", "pgt_analyse", "pgt_analyser", + "pgt_diagnostics", "pgt_workspace", "proc-macro2", "pulldown-cmark", diff --git a/crates/pgt_analyse/src/macros.rs b/crates/pgt_analyse/src/macros.rs index d9f70ed3..aa7b25c5 100644 --- a/crates/pgt_analyse/src/macros.rs +++ b/crates/pgt_analyse/src/macros.rs @@ -24,6 +24,7 @@ macro_rules! declare_lint_rule { ( $( #[doc = $doc:literal] )+ $vis:vis $id:ident { version: $version:literal, name: $name:tt, + severity: $severity:expr_2021, $( $key:ident: $value:expr_2021, )* } ) => { @@ -32,6 +33,7 @@ macro_rules! declare_lint_rule { $vis $id { version: $version, name: $name, + severity: $severity, $( $key: $value, )* } ); @@ -53,6 +55,7 @@ macro_rules! declare_rule { ( $( #[doc = $doc:literal] )+ $vis:vis $id:ident { version: $version:literal, name: $name:tt, + severity: $severity:expr_2021, $( $key:ident: $value:expr_2021, )* } ) => { $( #[doc = $doc] )* @@ -61,7 +64,7 @@ macro_rules! declare_rule { impl $crate::RuleMeta for $id { type Group = super::Group; const METADATA: $crate::RuleMetadata = - $crate::RuleMetadata::new($version, $name, concat!( $( $doc, "\n", )* )) $( .$key($value) )*; + $crate::RuleMetadata::new($version, $name, concat!( $( $doc, "\n", )* ), $severity) $( .$key($value) )*; } } } diff --git a/crates/pgt_analyse/src/rule.rs b/crates/pgt_analyse/src/rule.rs index f135705e..fae3cda3 100644 --- a/crates/pgt_analyse/src/rule.rs +++ b/crates/pgt_analyse/src/rule.rs @@ -3,7 +3,7 @@ use pgt_console::{MarkupBuf, markup}; use pgt_diagnostics::advice::CodeSuggestionAdvice; use pgt_diagnostics::{ Advices, Category, Diagnostic, DiagnosticTags, Location, LogCategory, MessageAndDescription, - Visit, + Severity, Visit, }; use pgt_text_size::TextRange; use std::cmp::Ordering; @@ -31,10 +31,17 @@ pub struct RuleMetadata { pub recommended: bool, /// The source URL of the rule pub sources: &'static [RuleSource], + /// The default severity of the rule + pub severity: Severity, } impl RuleMetadata { - pub const fn new(version: &'static str, name: &'static str, docs: &'static str) -> Self { + pub const fn new( + version: &'static str, + name: &'static str, + docs: &'static str, + severity: Severity, + ) -> Self { Self { deprecated: None, version, @@ -42,6 +49,7 @@ impl RuleMetadata { docs, sources: &[], recommended: false, + severity, } } diff --git a/crates/pgt_analyser/CONTRIBUTING.md b/crates/pgt_analyser/CONTRIBUTING.md index 50327d5e..b0929eda 100644 --- a/crates/pgt_analyser/CONTRIBUTING.md +++ b/crates/pgt_analyser/CONTRIBUTING.md @@ -54,9 +54,11 @@ Let's say we want to create a new **lint** rule called `useMyRuleName`, follow t 1. Run the command ```shell - just new-lintrule safety useMyRuleName + just new-lintrule safety useMyRuleName () ``` + Where severity is optional but can be "info", "warn", or "error" (default). + The script will generate a bunch of files inside the `pgt_analyser` crate. Among the other files, you'll find a file called `use_my_new_rule_name.rs` inside the `pgt_analyser/lib/src/lint/safety` folder. You'll implement your rule in this file. @@ -187,6 +189,7 @@ declare_lint_rule! { pub(crate) ExampleRule { version: "next", name: "myRuleName", + severity: Severity::Error, recommended: false, } } @@ -206,6 +209,7 @@ declare_lint_rule! { pub(crate) ExampleRule { version: "next", name: "myRuleName", + severity: Severity::Error, recommended: false, sources: &[RuleSource::Squawk("ban-drop-column")], } @@ -228,6 +232,7 @@ declare_lint_rule! { pub(crate) ExampleRule { version: "next", name: "myRuleName", + severity: Severity::Error, recommended: false, } } @@ -280,6 +285,7 @@ declare_lint_rule! { version: "next", name: "banDropColumn", recommended: true, + severity: Severity::Error, sources: &[RuleSource::Squawk("ban-drop-column")], } } @@ -351,6 +357,7 @@ declare_lint_rule! { version: "next", name: "banDropColumn", recommended: true, + severity: Severity::Error, deprecated: true, sources: &[RuleSource::Squawk("ban-drop-column")], } diff --git a/crates/pgt_analyser/Cargo.toml b/crates/pgt_analyser/Cargo.toml index bd51c36a..e77aaa4f 100644 --- a/crates/pgt_analyser/Cargo.toml +++ b/crates/pgt_analyser/Cargo.toml @@ -12,10 +12,11 @@ repository.workspace = true version = "0.0.0" [dependencies] -pgt_analyse = { workspace = true } -pgt_console = { workspace = true } -pgt_query_ext = { workspace = true } -serde = { workspace = true } +pgt_analyse = { workspace = true } +pgt_console = { workspace = true } +pgt_diagnostics = { workspace = true } +pgt_query_ext = { workspace = true } +serde = { workspace = true } [dev-dependencies] insta = { version = "1.42.1" } diff --git a/crates/pgt_analyser/src/lint/safety.rs b/crates/pgt_analyser/src/lint/safety.rs index 920326c2..a2b72fce 100644 --- a/crates/pgt_analyser/src/lint/safety.rs +++ b/crates/pgt_analyser/src/lint/safety.rs @@ -3,6 +3,8 @@ use pgt_analyse::declare_lint_group; pub mod adding_required_field; pub mod ban_drop_column; +pub mod ban_drop_database; pub mod ban_drop_not_null; pub mod ban_drop_table; -declare_lint_group! { pub Safety { name : "safety" , rules : [self :: adding_required_field :: AddingRequiredField , self :: ban_drop_column :: BanDropColumn , self :: ban_drop_not_null :: BanDropNotNull , self :: ban_drop_table :: BanDropTable ,] } } +pub mod ban_truncate_cascade; +declare_lint_group! { pub Safety { name : "safety" , rules : [self :: adding_required_field :: AddingRequiredField , self :: ban_drop_column :: BanDropColumn , self :: ban_drop_database :: BanDropDatabase , self :: ban_drop_not_null :: BanDropNotNull , self :: ban_drop_table :: BanDropTable , self :: ban_truncate_cascade :: BanTruncateCascade ,] } } diff --git a/crates/pgt_analyser/src/lint/safety/adding_required_field.rs b/crates/pgt_analyser/src/lint/safety/adding_required_field.rs index d4f72a7f..06901952 100644 --- a/crates/pgt_analyser/src/lint/safety/adding_required_field.rs +++ b/crates/pgt_analyser/src/lint/safety/adding_required_field.rs @@ -1,5 +1,6 @@ use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; +use pgt_diagnostics::Severity; declare_lint_rule! { /// Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required. @@ -17,6 +18,7 @@ declare_lint_rule! { pub AddingRequiredField { version: "next", name: "addingRequiredField", + severity: Severity::Error, recommended: false, sources: &[RuleSource::Squawk("adding-required-field")], } diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs index aab5d515..165d4230 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs @@ -1,5 +1,6 @@ use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; +use pgt_diagnostics::Severity; declare_lint_rule! { /// Dropping a column may break existing clients. @@ -19,6 +20,7 @@ declare_lint_rule! { pub BanDropColumn { version: "next", name: "banDropColumn", + severity: Severity::Warning, recommended: true, sources: &[RuleSource::Squawk("ban-drop-column")], } diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs new file mode 100644 index 00000000..11d07da9 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs @@ -0,0 +1,39 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; + +declare_lint_rule! { + /// Dropping a database may break existing clients (and everything else, really). + /// + /// Make sure that you really want to drop it. + pub BanDropDatabase { + version: "next", + name: "banDropDatabase", + severity: Severity::Warning, + recommended: false, + sources: &[RuleSource::Squawk("ban-drop-database")], + } +} + +impl Rule for BanDropDatabase { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = vec![]; + + if let pgt_query_ext::NodeEnum::DropdbStmt(_) = &ctx.stmt() { + diagnostics.push( + RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "Dropping a database may break existing clients." + }, + ) + .detail(None, "You probably don't want to drop your database."), + ); + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs index eb17f694..fa4c9011 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs @@ -1,5 +1,6 @@ use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; +use pgt_diagnostics::Severity; declare_lint_rule! { /// Dropping a NOT NULL constraint may break existing clients. @@ -18,6 +19,7 @@ declare_lint_rule! { pub BanDropNotNull { version: "next", name: "banDropNotNull", + severity: Severity::Warning, recommended: true, sources: &[RuleSource::Squawk("ban-drop-not-null")], diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs index 4ce00a60..90c08514 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs @@ -1,5 +1,6 @@ use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; +use pgt_diagnostics::Severity; declare_lint_rule! { /// Dropping a table may break existing clients. @@ -18,6 +19,7 @@ declare_lint_rule! { pub BanDropTable { version: "next", name: "banDropTable", + severity: Severity::Warning, recommended: true, sources: &[RuleSource::Squawk("ban-drop-table")], } diff --git a/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs b/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs new file mode 100644 index 00000000..cef5cd47 --- /dev/null +++ b/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs @@ -0,0 +1,51 @@ +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_console::markup; +use pgt_diagnostics::Severity; +use pgt_query_ext::protobuf::DropBehavior; + +declare_lint_rule! { + /// Using `TRUNCATE`'s `CASCADE` option will truncate any tables that are also foreign-keyed to the specified tables. + /// + /// So if you had tables with foreign-keys like: + /// + /// `a <- b <- c` + /// + /// and ran: + /// + /// `truncate a cascade;` + /// + /// You'd end up with a, b, & c all being truncated! + /// + /// Instead, you can manually specify the tables you want. + /// + /// `truncate a, b;` + pub BanTruncateCascade { + version: "next", + name: "banTruncateCascade", + severity: Severity::Error, + recommended: false, + sources: &[RuleSource::Squawk("ban-truncate-cascade")], + } +} + +impl Rule for BanTruncateCascade { + type Options = (); + + fn run(ctx: &RuleContext) -> Vec { + let mut diagnostics = Vec::new(); + + if let pgt_query_ext::NodeEnum::TruncateStmt(stmt) = &ctx.stmt() { + if stmt.behavior() == DropBehavior::DropCascade { + diagnostics.push(RuleDiagnostic::new( + rule_category!(), + None, + markup! { + "The `CASCADE` option will also truncate any tables that are foreign-keyed to the specified tables." + }, + ).detail(None, "Do not use the `CASCADE` option. Instead, specify manually what you want: `TRUNCATE a, b;`.")); + } + } + + diagnostics + } +} diff --git a/crates/pgt_analyser/src/options.rs b/crates/pgt_analyser/src/options.rs index d78020f8..d893b84f 100644 --- a/crates/pgt_analyser/src/options.rs +++ b/crates/pgt_analyser/src/options.rs @@ -5,6 +5,10 @@ pub type AddingRequiredField = ::Options; pub type BanDropColumn = ::Options; +pub type BanDropDatabase = + ::Options; pub type BanDropNotNull = ::Options; pub type BanDropTable = ::Options; +pub type BanTruncateCascade = + ::Options; diff --git a/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql b/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql new file mode 100644 index 00000000..0dc01652 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql @@ -0,0 +1,2 @@ +-- expect_only_lint/safety/banDropDatabase +drop database all_users; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql.snap new file mode 100644 index 00000000..90e35820 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banDropDatabase/basic.sql.snap @@ -0,0 +1,16 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +--- +# Input +``` +-- expect_only_lint/safety/banDropDatabase +drop database all_users; +``` + +# Diagnostics +lint/safety/banDropDatabase ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × Dropping a database may break existing clients. + + i You probably don't want to drop your database. diff --git a/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql b/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql new file mode 100644 index 00000000..d17fed13 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql @@ -0,0 +1,2 @@ +-- expect_only_lint/safety/banTruncateCascade +truncate a cascade; \ No newline at end of file diff --git a/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql.snap b/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql.snap new file mode 100644 index 00000000..d214b978 --- /dev/null +++ b/crates/pgt_analyser/tests/specs/safety/banTruncateCascade/basic.sql.snap @@ -0,0 +1,16 @@ +--- +source: crates/pgt_analyser/tests/rules_tests.rs +expression: snapshot +--- +# Input +``` +-- expect_only_lint/safety/banTruncateCascade +truncate a cascade; +``` + +# Diagnostics +lint/safety/banTruncateCascade ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + × The `CASCADE` option will also truncate any tables that are foreign-keyed to the specified tables. + + i Do not use the `CASCADE` option. Instead, specify manually what you want: `TRUNCATE a, b;`. diff --git a/crates/pgt_configuration/src/analyser/linter/rules.rs b/crates/pgt_configuration/src/analyser/linter/rules.rs index 14d796bf..8db5a0ab 100644 --- a/crates/pgt_configuration/src/analyser/linter/rules.rs +++ b/crates/pgt_configuration/src/analyser/linter/rules.rs @@ -65,10 +65,9 @@ impl Rules { } #[doc = r" Given a category coming from [Diagnostic](pgt_diagnostics::Diagnostic), this function returns"] #[doc = r" the [Severity](pgt_diagnostics::Severity) associated to the rule, if the configuration changed it."] - #[doc = r" If the severity is off or not set, then the function returns the default severity of the rule:"] - #[doc = r" [Severity::Error] for recommended rules and [Severity::Warning] for other rules."] - #[doc = r""] - #[doc = r" If not, the function returns [None]."] + #[doc = r" If the severity is off or not set, then the function returns the default severity of the rule,"] + #[doc = r" which is configured at the rule definition."] + #[doc = r" The function can return `None` if the rule is not properly configured."] pub fn get_severity_from_code(&self, category: &Category) -> Option { let mut split_code = category.name().split('/'); let _lint = split_code.next(); @@ -82,16 +81,7 @@ impl Rules { .as_ref() .and_then(|group| group.get_rule_configuration(rule_name)) .filter(|(level, _)| !matches!(level, RulePlainConfiguration::Off)) - .map_or_else( - || { - if Safety::is_recommended_rule(rule_name) { - Severity::Error - } else { - Severity::Warning - } - }, - |(level, _)| level.into(), - ), + .map_or_else(|| Safety::severity(rule_name), |(level, _)| level.into()), }; Some(severity) } @@ -150,33 +140,41 @@ pub struct Safety { #[doc = "Dropping a column may break existing clients."] #[serde(skip_serializing_if = "Option::is_none")] pub ban_drop_column: Option>, + #[doc = "Dropping a database may break existing clients (and everything else, really)."] + #[serde(skip_serializing_if = "Option::is_none")] + pub ban_drop_database: Option>, #[doc = "Dropping a NOT NULL constraint may break existing clients."] #[serde(skip_serializing_if = "Option::is_none")] pub ban_drop_not_null: Option>, #[doc = "Dropping a table may break existing clients."] #[serde(skip_serializing_if = "Option::is_none")] pub ban_drop_table: Option>, + #[doc = "Using TRUNCATE's CASCADE option will truncate any tables that are also foreign-keyed to the specified tables."] + #[serde(skip_serializing_if = "Option::is_none")] + pub ban_truncate_cascade: Option>, } impl Safety { const GROUP_NAME: &'static str = "safety"; pub(crate) const GROUP_RULES: &'static [&'static str] = &[ "addingRequiredField", "banDropColumn", + "banDropDatabase", "banDropNotNull", "banDropTable", + "banTruncateCascade", ]; - const RECOMMENDED_RULES: &'static [&'static str] = - &["banDropColumn", "banDropNotNull", "banDropTable"]; const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), - RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), ]; const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5]), ]; #[doc = r" Retrieves the recommended rules"] pub(crate) fn is_recommended_true(&self) -> bool { @@ -203,16 +201,26 @@ impl Safety { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); } } - if let Some(rule) = self.ban_drop_not_null.as_ref() { + if let Some(rule) = self.ban_drop_database.as_ref() { if rule.is_enabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); } } - if let Some(rule) = self.ban_drop_table.as_ref() { + if let Some(rule) = self.ban_drop_not_null.as_ref() { if rule.is_enabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); } } + if let Some(rule) = self.ban_drop_table.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + if let Some(rule) = self.ban_truncate_cascade.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5])); + } + } index_set } pub(crate) fn get_disabled_rules(&self) -> FxHashSet> { @@ -227,26 +235,32 @@ impl Safety { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); } } - if let Some(rule) = self.ban_drop_not_null.as_ref() { + if let Some(rule) = self.ban_drop_database.as_ref() { if rule.is_disabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); } } - if let Some(rule) = self.ban_drop_table.as_ref() { + if let Some(rule) = self.ban_drop_not_null.as_ref() { if rule.is_disabled() { index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); } } + if let Some(rule) = self.ban_drop_table.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + if let Some(rule) = self.ban_truncate_cascade.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5])); + } + } index_set } #[doc = r" Checks if, given a rule name, matches one of the rules contained in this category"] pub(crate) fn has_rule(rule_name: &str) -> Option<&'static str> { Some(Self::GROUP_RULES[Self::GROUP_RULES.binary_search(&rule_name).ok()?]) } - #[doc = r" Checks if, given a rule name, it is marked as recommended"] - pub(crate) fn is_recommended_rule(rule_name: &str) -> bool { - Self::RECOMMENDED_RULES.contains(&rule_name) - } pub(crate) fn recommended_rules_as_filters() -> &'static [RuleFilter<'static>] { Self::RECOMMENDED_RULES_AS_FILTERS } @@ -268,6 +282,17 @@ impl Safety { enabled_rules.extend(Self::recommended_rules_as_filters()); } } + pub(crate) fn severity(rule_name: &str) -> Severity { + match rule_name { + "addingRequiredField" => Severity::Error, + "banDropColumn" => Severity::Warning, + "banDropDatabase" => Severity::Warning, + "banDropNotNull" => Severity::Warning, + "banDropTable" => Severity::Warning, + "banTruncateCascade" => Severity::Error, + _ => unreachable!(), + } + } pub(crate) fn get_rule_configuration( &self, rule_name: &str, @@ -281,6 +306,10 @@ impl Safety { .ban_drop_column .as_ref() .map(|conf| (conf.level(), conf.get_options())), + "banDropDatabase" => self + .ban_drop_database + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), "banDropNotNull" => self .ban_drop_not_null .as_ref() @@ -289,6 +318,10 @@ impl Safety { .ban_drop_table .as_ref() .map(|conf| (conf.level(), conf.get_options())), + "banTruncateCascade" => self + .ban_truncate_cascade + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), _ => None, } } diff --git a/crates/pgt_diagnostics/src/diagnostic.rs b/crates/pgt_diagnostics/src/diagnostic.rs index 9a62ede1..3f365aed 100644 --- a/crates/pgt_diagnostics/src/diagnostic.rs +++ b/crates/pgt_diagnostics/src/diagnostic.rs @@ -6,6 +6,7 @@ use std::{ str::FromStr, }; +use bpaf::Bpaf; use enumflags2::{BitFlags, bitflags, make_bitflags}; use serde::{Deserialize, Serialize}; @@ -115,7 +116,7 @@ pub trait Diagnostic: Debug { } #[derive( - Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default, + Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default, Bpaf, )] #[serde(rename_all = "camelCase")] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] diff --git a/crates/pgt_diagnostics_categories/src/categories.rs b/crates/pgt_diagnostics_categories/src/categories.rs index 8a91cfb5..d5ce48d7 100644 --- a/crates/pgt_diagnostics_categories/src/categories.rs +++ b/crates/pgt_diagnostics_categories/src/categories.rs @@ -15,8 +15,10 @@ define_categories! { "lint/safety/addingRequiredField": "https://pglt.dev/linter/rules/adding-required-field", "lint/safety/banDropColumn": "https://pglt.dev/linter/rules/ban-drop-column", + "lint/safety/banDropDatabase": "https://pgtools.dev/linter/rules/ban-drop-database", "lint/safety/banDropNotNull": "https://pglt.dev/linter/rules/ban-drop-not-null", "lint/safety/banDropTable": "https://pglt.dev/linter/rules/ban-drop-table", + "lint/safety/banTruncateCascade": "https://pgtools.dev/linter/rules/ban-truncate-cascade", // end lint rules ; // General categories diff --git a/crates/pgt_workspace/src/settings.rs b/crates/pgt_workspace/src/settings.rs index ac55d8a1..40db2c1e 100644 --- a/crates/pgt_workspace/src/settings.rs +++ b/crates/pgt_workspace/src/settings.rs @@ -275,12 +275,10 @@ impl Settings { &self, code: &Category, ) -> Option { - let rules = self.linter.rules.as_ref(); - if let Some(rules) = rules { - rules.get_severity_from_code(code) - } else { - None - } + self.linter + .rules + .as_ref() + .and_then(|r| r.get_severity_from_code(code)) } } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 67a3713c..d0c8d13a 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -427,34 +427,20 @@ impl Workspace for WorkspaceServer { } }; - // create analyser for this run - // first, collect enabled and disabled rules from the workspace settings - let (enabled_rules, disabled_rules) = AnalyserVisitorBuilder::new(settings) - .with_linter_rules(¶ms.only, ¶ms.skip) - .finish(); - // then, build a map that contains all options - let options = AnalyserOptions { - rules: to_analyser_rules(settings), - }; - // next, build the analysis filter which will be used to match rules - let filter = AnalysisFilter { - categories: params.categories, - enabled_rules: Some(enabled_rules.as_slice()), - disabled_rules: &disabled_rules, - }; - // finally, create the analyser that will be used during this run - let analyser = Analyser::new(AnalyserConfig { - options: &options, - filter, - }); - let parser = self .parsed_documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; + /* + * The statements in the document might already have associated diagnostics, + * e.g. if they contain syntax errors that surfaced while parsing/splitting the statements + */ let mut diagnostics: Vec = parser.document_diagnostics().to_vec(); + /* + * Type-checking against database connection + */ if let Some(pool) = self.get_current_connection() { let path_clone = params.path.clone(); let schema_cache = self.schema_cache.load(pool.clone())?; @@ -518,6 +504,29 @@ impl Workspace for WorkspaceServer { } } + /* + * Below, we'll apply our static linting rules against the statements, + * considering the user's settings + */ + let (enabled_rules, disabled_rules) = AnalyserVisitorBuilder::new(settings) + .with_linter_rules(¶ms.only, ¶ms.skip) + .finish(); + + let options = AnalyserOptions { + rules: to_analyser_rules(settings), + }; + + let filter = AnalysisFilter { + categories: params.categories, + enabled_rules: Some(enabled_rules.as_slice()), + disabled_rules: &disabled_rules, + }; + + let analyser = Analyser::new(AnalyserConfig { + options: &options, + filter, + }); + diagnostics.extend(parser.iter(SyncDiagnosticsMapper).flat_map( |(_id, range, ast, diag)| { let mut errors: Vec = vec![]; diff --git a/crates/pgt_workspace/src/workspace/server/annotation.rs b/crates/pgt_workspace/src/workspace/server/annotation.rs index 321dd3ac..2fdf32eb 100644 --- a/crates/pgt_workspace/src/workspace/server/annotation.rs +++ b/crates/pgt_workspace/src/workspace/server/annotation.rs @@ -22,10 +22,10 @@ impl AnnotationStore { #[allow(unused)] pub fn get_annotations( &self, - statement: &StatementId, + statement_id: &StatementId, content: &str, ) -> Option> { - if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { + if let Some(existing) = self.db.get(statement_id).map(|x| x.clone()) { return existing; } @@ -43,7 +43,7 @@ impl AnnotationStore { }) }); - self.db.insert(statement.clone(), None); + self.db.insert(statement_id.clone(), None); annotations } diff --git a/docs/rule_sources.md b/docs/rule_sources.md index b5c1f49f..8c0f085d 100644 --- a/docs/rule_sources.md +++ b/docs/rule_sources.md @@ -5,5 +5,7 @@ | ---- | ---- | | [adding-required-field](https://squawkhq.com/docs/adding-required-field) |[addingRequiredField](./rules/adding-required-field) | | [ban-drop-column](https://squawkhq.com/docs/ban-drop-column) |[banDropColumn](./rules/ban-drop-column) | +| [ban-drop-database](https://squawkhq.com/docs/ban-drop-database) |[banDropDatabase](./rules/ban-drop-database) | | [ban-drop-not-null](https://squawkhq.com/docs/ban-drop-not-null) |[banDropNotNull](./rules/ban-drop-not-null) | | [ban-drop-table](https://squawkhq.com/docs/ban-drop-table) |[banDropTable](./rules/ban-drop-table) | +| [ban-truncate-cascade](https://squawkhq.com/docs/ban-truncate-cascade) |[banTruncateCascade](./rules/ban-truncate-cascade) | diff --git a/docs/rules.md b/docs/rules.md index 1f674af6..19e110c6 100644 --- a/docs/rules.md +++ b/docs/rules.md @@ -14,8 +14,10 @@ Rules that detect potential safety issues in your code. | --- | --- | --- | | [addingRequiredField](/rules/adding-required-field) | Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required. | | | [banDropColumn](/rules/ban-drop-column) | Dropping a column may break existing clients. | ✅ | +| [banDropDatabase](/rules/ban-drop-database) | Dropping a database may break existing clients (and everything else, really). | | | [banDropNotNull](/rules/ban-drop-not-null) | Dropping a NOT NULL constraint may break existing clients. | ✅ | | [banDropTable](/rules/ban-drop-table) | Dropping a table may break existing clients. | ✅ | +| [banTruncateCascade](/rules/ban-truncate-cascade) | Using `TRUNCATE`'s `CASCADE` option will truncate any tables that are also foreign-keyed to the specified tables. | | [//]: # (END RULES_INDEX) diff --git a/docs/rules/ban-drop-column.md b/docs/rules/ban-drop-column.md index 49a0d054..0c46d40a 100644 --- a/docs/rules/ban-drop-column.md +++ b/docs/rules/ban-drop-column.md @@ -27,7 +27,7 @@ alter table test drop column id; ```sh code-block.sql lint/safety/banDropColumn ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - × Dropping a column may break existing clients. + ! Dropping a column may break existing clients. i You can leave the column as nullable or delete the column once queries no longer select or modify the column. diff --git a/docs/rules/ban-drop-database.md b/docs/rules/ban-drop-database.md new file mode 100644 index 00000000..8bbcd396 --- /dev/null +++ b/docs/rules/ban-drop-database.md @@ -0,0 +1,28 @@ +# banDropDatabase +**Diagnostic Category: `lint/safety/banDropDatabase`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/ban-drop-database + +## Description +Dropping a database may break existing clients (and everything else, really). + +Make sure that you really want to drop it. + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "banDropDatabase": "error" + } + } + } +} + +``` diff --git a/docs/rules/ban-drop-not-null.md b/docs/rules/ban-drop-not-null.md index ccf49f95..b860c45c 100644 --- a/docs/rules/ban-drop-not-null.md +++ b/docs/rules/ban-drop-not-null.md @@ -27,7 +27,7 @@ alter table users alter column email drop not null; ```sh code-block.sql lint/safety/banDropNotNull ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - × Dropping a NOT NULL constraint may break existing clients. + ! Dropping a NOT NULL constraint may break existing clients. i Consider using a marker value that represents NULL. Alternatively, create a new table allowing NULL values, copy the data from the old table, and create a view that filters NULL values. diff --git a/docs/rules/ban-drop-table.md b/docs/rules/ban-drop-table.md index f2f34156..4b81755f 100644 --- a/docs/rules/ban-drop-table.md +++ b/docs/rules/ban-drop-table.md @@ -28,7 +28,7 @@ drop table some_table; ```sh code-block.sql lint/safety/banDropTable ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - × Dropping a table may break existing clients. + ! Dropping a table may break existing clients. i Update your application code to no longer read or write the table, and only then delete the table. Be sure to create a backup. diff --git a/docs/rules/ban-truncate-cascade.md b/docs/rules/ban-truncate-cascade.md new file mode 100644 index 00000000..1a4502d2 --- /dev/null +++ b/docs/rules/ban-truncate-cascade.md @@ -0,0 +1,40 @@ +# banTruncateCascade +**Diagnostic Category: `lint/safety/banTruncateCascade`** + +**Since**: `vnext` + + +**Sources**: +- Inspired from: squawk/ban-truncate-cascade + +## Description +Using `TRUNCATE`'s `CASCADE` option will truncate any tables that are also foreign-keyed to the specified tables. + +So if you had tables with foreign-keys like: + +`a <- b <- c` + +and ran: + +`truncate a cascade;` + +You'd end up with a, b, & c all being truncated! + +Instead, you can manually specify the tables you want. + +`truncate a, b;` + +## How to configure +```json + +{ + "linter": { + "rules": { + "safety": { + "banTruncateCascade": "error" + } + } + } +} + +``` diff --git a/docs/schema.json b/docs/schema.json index 8c478d0a..1c56618e 100644 --- a/docs/schema.json +++ b/docs/schema.json @@ -349,6 +349,17 @@ } ] }, + "banDropDatabase": { + "description": "Dropping a database may break existing clients (and everything else, really).", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, "banDropNotNull": { "description": "Dropping a NOT NULL constraint may break existing clients.", "anyOf": [ @@ -371,6 +382,17 @@ } ] }, + "banTruncateCascade": { + "description": "Using TRUNCATE's CASCADE option will truncate any tables that are also foreign-keyed to the specified tables.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, "recommended": { "description": "It enables the recommended rules for this group", "type": [ diff --git a/justfile b/justfile index c868a122..7e53a8a6 100644 --- a/justfile +++ b/justfile @@ -31,8 +31,8 @@ gen-lint: just format # Creates a new lint rule in the given path, with the given name. Name has to be camel case. Group should be lowercase. -new-lintrule group rulename: - cargo run -p xtask_codegen -- new-lintrule --category=lint --name={{rulename}} --group={{group}} +new-lintrule group rulename severity="error": + cargo run -p xtask_codegen -- new-lintrule --category=lint --name={{rulename}} --group={{group}} --severity={{severity}} just gen-lint # Format Rust, JS and TOML files diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index a81a1ca9..72b77bc1 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -65,8 +65,10 @@ export interface Advices { export type Category = | "lint/safety/addingRequiredField" | "lint/safety/banDropColumn" + | "lint/safety/banDropDatabase" | "lint/safety/banDropNotNull" | "lint/safety/banDropTable" + | "lint/safety/banTruncateCascade" | "stdin" | "check" | "configuration" @@ -217,7 +219,8 @@ export type CompletionItemKind = | "function" | "column" | "schema" - | "policy"; + | "policy" + | "role"; export interface UpdateSettingsParams { configuration: PartialConfiguration; gitignore_matches: string[]; @@ -391,6 +394,10 @@ export interface Safety { * Dropping a column may break existing clients. */ banDropColumn?: RuleConfiguration_for_Null; + /** + * Dropping a database may break existing clients (and everything else, really). + */ + banDropDatabase?: RuleConfiguration_for_Null; /** * Dropping a NOT NULL constraint may break existing clients. */ @@ -399,6 +406,10 @@ export interface Safety { * Dropping a table may break existing clients. */ banDropTable?: RuleConfiguration_for_Null; + /** + * Using TRUNCATE's CASCADE option will truncate any tables that are also foreign-keyed to the specified tables. + */ + banTruncateCascade?: RuleConfiguration_for_Null; /** * It enables the recommended rules for this group */ diff --git a/xtask/codegen/Cargo.toml b/xtask/codegen/Cargo.toml index b5497b2c..758a3212 100644 --- a/xtask/codegen/Cargo.toml +++ b/xtask/codegen/Cargo.toml @@ -14,6 +14,7 @@ biome_string_case = { workspace = true } bpaf = { workspace = true, features = ["derive"] } pgt_analyse = { workspace = true } pgt_analyser = { workspace = true } +pgt_diagnostics = { workspace = true } pgt_workspace = { workspace = true, features = ["schema"] } proc-macro2 = { workspace = true, features = ["span-locations"] } pulldown-cmark = { version = "0.12.2" } diff --git a/xtask/codegen/src/generate_configuration.rs b/xtask/codegen/src/generate_configuration.rs index 91ae304c..3799f19b 100644 --- a/xtask/codegen/src/generate_configuration.rs +++ b/xtask/codegen/src/generate_configuration.rs @@ -1,6 +1,7 @@ use crate::{to_capitalized, update}; use biome_string_case::Case; use pgt_analyse::{GroupCategory, RegistryVisitor, Rule, RuleCategory, RuleGroup, RuleMetadata}; +use pgt_diagnostics::Severity; use proc_macro2::{Ident, Literal, Span, TokenStream}; use pulldown_cmark::{Event, Parser, Tag, TagEnd}; use quote::quote; @@ -135,10 +136,9 @@ fn generate_for_groups( /// Given a category coming from [Diagnostic](pgt_diagnostics::Diagnostic), this function returns /// the [Severity](pgt_diagnostics::Severity) associated to the rule, if the configuration changed it. - /// If the severity is off or not set, then the function returns the default severity of the rule: - /// [Severity::Error] for recommended rules and [Severity::Warning] for other rules. - /// - /// If not, the function returns [None]. + /// If the severity is off or not set, then the function returns the default severity of the rule, + /// which is configured at the rule definition. + /// The function can return `None` if the rule is not properly configured. pub fn get_severity_from_code(&self, category: &Category) -> Option { let mut split_code = category.name().split('/'); @@ -155,13 +155,10 @@ fn generate_for_groups( .as_ref() .and_then(|group| group.get_rule_configuration(rule_name)) .filter(|(level, _)| !matches!(level, RulePlainConfiguration::Off)) - .map_or_else(|| { - if #group_pascal_idents::is_recommended_rule(rule_name) { - Severity::Error - } else { - Severity::Warning - } - }, |(level, _)| level.into()), + .map_or_else( + || #group_pascal_idents::severity(rule_name), + |(level, _)| level.into() + ), )* }; Some(severity) @@ -453,7 +450,6 @@ fn generate_group_struct( rules: &BTreeMap<&'static str, RuleMetadata>, kind: RuleCategory, ) -> TokenStream { - let mut lines_recommended_rule = Vec::new(); let mut lines_recommended_rule_as_filter = Vec::new(); let mut lines_all_rule_as_filter = Vec::new(); let mut lines_rule = Vec::new(); @@ -461,6 +457,7 @@ fn generate_group_struct( let mut rule_enabled_check_line = Vec::new(); let mut rule_disabled_check_line = Vec::new(); let mut get_rule_configuration_line = Vec::new(); + let mut get_severity_lines = Vec::new(); for (index, (rule, metadata)) in rules.iter().enumerate() { let summary = { @@ -522,10 +519,6 @@ fn generate_group_struct( lines_recommended_rule_as_filter.push(quote! { RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[#rule_position]) }); - - lines_recommended_rule.push(quote! { - #rule - }); } lines_all_rule_as_filter.push(quote! { RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[#rule_position]) @@ -567,6 +560,18 @@ fn generate_group_struct( get_rule_configuration_line.push(quote! { #rule => self.#rule_identifier.as_ref().map(|conf| (conf.level(), conf.get_options())) }); + + let severity = match metadata.severity { + Severity::Hint => quote! { Severity::Hint }, + Severity::Information => quote! { Severity::Information }, + Severity::Warning => quote! { Severity::Warning }, + Severity::Error => quote! { Severity::Error }, + Severity::Fatal => quote! { Severity::Fatal }, + }; + + get_severity_lines.push(quote! { + #rule => #severity + }) } let group_pascal_ident = Ident::new(&to_capitalized(group), Span::call_site()); @@ -648,10 +653,6 @@ fn generate_group_struct( #( #lines_rule ),* ]; - const RECOMMENDED_RULES: &'static [&'static str] = &[ - #( #lines_recommended_rule ),* - ]; - const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ #( #lines_recommended_rule_as_filter ),* ]; @@ -695,11 +696,6 @@ fn generate_group_struct( Some(Self::GROUP_RULES[Self::GROUP_RULES.binary_search(&rule_name).ok()?]) } - /// Checks if, given a rule name, it is marked as recommended - pub(crate) fn is_recommended_rule(rule_name: &str) -> bool { - Self::RECOMMENDED_RULES.contains(&rule_name) - } - pub(crate) fn recommended_rules_as_filters() -> &'static [RuleFilter<'static>] { Self::RECOMMENDED_RULES_AS_FILTERS } @@ -725,6 +721,13 @@ fn generate_group_struct( } } + pub(crate) fn severity(rule_name: &str) -> Severity { + match rule_name { + #( #get_severity_lines ),*, + _ => unreachable!() + } + } + #get_configuration_function } } diff --git a/xtask/codegen/src/generate_new_analyser_rule.rs b/xtask/codegen/src/generate_new_analyser_rule.rs index 6fecdff7..fc225712 100644 --- a/xtask/codegen/src/generate_new_analyser_rule.rs +++ b/xtask/codegen/src/generate_new_analyser_rule.rs @@ -1,5 +1,6 @@ use biome_string_case::Case; use bpaf::Bpaf; +use pgt_diagnostics::Severity; use std::str::FromStr; use xtask::project_root; @@ -24,15 +25,26 @@ fn generate_rule_template( category: &Category, rule_name_upper_camel: &str, rule_name_lower_camel: &str, + severity: Severity, ) -> String { let macro_name = match category { Category::Lint => "declare_lint_rule", }; + + let severity_code = match severity { + Severity::Hint => "Severity::Hint", + Severity::Information => "Severity::Information", + Severity::Warning => "Severity::Warning", + Severity::Error => "Severity::Error", + Severity::Fatal => "Severity::Fatal", + }; + format!( r#"use pgt_analyse::{{ context::RuleContext, {macro_name}, Rule, RuleDiagnostic }}; use pgt_console::markup; +use pgt_diagnostics::Severity; {macro_name}! {{ /// Succinct description of the rule. @@ -58,6 +70,7 @@ use pgt_console::markup; pub {rule_name_upper_camel} {{ version: "next", name: "{rule_name_lower_camel}", + severity: {severity_code}, recommended: false, }} }} @@ -77,7 +90,12 @@ fn gen_sql(category_name: &str) -> String { format!("-- expect_only_{category_name}\n-- select 1;") } -pub fn generate_new_analyser_rule(category: Category, rule_name: &str, group: &str) { +pub fn generate_new_analyser_rule( + category: Category, + rule_name: &str, + group: &str, + severity: Severity, +) { let rule_name_camel = Case::Camel.convert(rule_name); let crate_folder = project_root().join("crates/pgt_analyser"); let rule_folder = match &category { @@ -92,6 +110,7 @@ pub fn generate_new_analyser_rule(category: Category, rule_name: &str, group: &s &category, Case::Pascal.convert(rule_name).as_str(), rule_name_camel.as_str(), + severity, ); let file_name = format!( "{}/{}.rs", diff --git a/xtask/codegen/src/lib.rs b/xtask/codegen/src/lib.rs index 61ae5e4f..dc6f81a0 100644 --- a/xtask/codegen/src/lib.rs +++ b/xtask/codegen/src/lib.rs @@ -13,6 +13,7 @@ pub use self::generate_crate::generate_crate; pub use self::generate_new_analyser_rule::generate_new_analyser_rule; use bpaf::Bpaf; use generate_new_analyser_rule::Category; +use pgt_diagnostics::Severity; use std::path::Path; use xtask::{glue::fs2, Mode, Result}; @@ -84,5 +85,9 @@ pub enum TaskCommand { /// Group of the rule #[bpaf(long("group"))] group: String, + + /// Severity of the rule + #[bpaf(long("severity"), fallback(Severity::Error))] + severity: Severity, }, } diff --git a/xtask/codegen/src/main.rs b/xtask/codegen/src/main.rs index 8e0e6cd8..4ff33c21 100644 --- a/xtask/codegen/src/main.rs +++ b/xtask/codegen/src/main.rs @@ -21,8 +21,9 @@ fn main() -> Result<()> { name, category, group, + severity, } => { - generate_new_analyser_rule(category, &name, &group); + generate_new_analyser_rule(category, &name, &group, severity); } TaskCommand::Configuration => { generate_rules_configuration(Overwrite)?; From 1535b76d655f267323cc0486267deeea5d2a5c37 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Mon, 23 Jun 2025 11:31:26 +0200 Subject: [PATCH 080/114] fix(splitter): do update in INSERT INTO (#436) --- crates/pgt_statement_splitter/src/parser/common.rs | 2 ++ .../tests/data/on_conflict_do_update__1.sql | 7 +++++++ 2 files changed, 9 insertions(+) create mode 100644 crates/pgt_statement_splitter/tests/data/on_conflict_do_update__1.sql diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs index 2498c04a..4c4ab986 100644 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ b/crates/pgt_statement_splitter/src/parser/common.rs @@ -248,6 +248,8 @@ pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { // for grant SyntaxKind::Grant, SyntaxKind::Ascii44, + // Do update in INSERT stmt + SyntaxKind::Do, ] .iter() .all(|x| Some(x) != prev.as_ref()) diff --git a/crates/pgt_statement_splitter/tests/data/on_conflict_do_update__1.sql b/crates/pgt_statement_splitter/tests/data/on_conflict_do_update__1.sql new file mode 100644 index 00000000..0b62366b --- /dev/null +++ b/crates/pgt_statement_splitter/tests/data/on_conflict_do_update__1.sql @@ -0,0 +1,7 @@ +INSERT INTO foo.bar ( + pk +) VALUES ( + $1 +) ON CONFLICT (pk) DO UPDATE SET + date_deleted = DEFAULT, + date_created = DEFAULT; \ No newline at end of file From 2e21f2e987222b66f891463339614c1b597f6240 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Fri, 4 Jul 2025 16:26:56 +0200 Subject: [PATCH 081/114] fix(analyser): recognize disabled rules (#438) --- .../src/analyser/linter/rules.rs | 8 ++++ .../src/workspace/server/analyser.rs | 48 +++++++++++++++++++ xtask/codegen/src/generate_configuration.rs | 22 +++++++++ 3 files changed, 78 insertions(+) diff --git a/crates/pgt_configuration/src/analyser/linter/rules.rs b/crates/pgt_configuration/src/analyser/linter/rules.rs index 8db5a0ab..d45199b0 100644 --- a/crates/pgt_configuration/src/analyser/linter/rules.rs +++ b/crates/pgt_configuration/src/analyser/linter/rules.rs @@ -121,6 +121,14 @@ impl Rules { } enabled_rules.difference(&disabled_rules).copied().collect() } + #[doc = r" It returns the disabled rules by configuration."] + pub fn as_disabled_rules(&self) -> FxHashSet> { + let mut disabled_rules = FxHashSet::default(); + if let Some(group) = self.safety.as_ref() { + disabled_rules.extend(&group.get_disabled_rules()); + } + disabled_rules + } } #[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] #[cfg_attr(feature = "schema", derive(JsonSchema))] diff --git a/crates/pgt_workspace/src/workspace/server/analyser.rs b/crates/pgt_workspace/src/workspace/server/analyser.rs index d4b08ba1..4defc79e 100644 --- a/crates/pgt_workspace/src/workspace/server/analyser.rs +++ b/crates/pgt_workspace/src/workspace/server/analyser.rs @@ -68,6 +68,7 @@ impl<'a, 'b> LintVisitor<'a, 'b> { fn finish(mut self) -> (FxHashSet>, FxHashSet>) { let has_only_filter = !self.only.is_empty(); + if !has_only_filter { let enabled_rules = self .settings @@ -75,7 +76,15 @@ impl<'a, 'b> LintVisitor<'a, 'b> { .map(|rules| rules.as_enabled_rules()) .unwrap_or_default(); self.enabled_rules.extend(enabled_rules); + + let disabled_rules = self + .settings + .as_linter_rules() + .map(|rules| rules.as_disabled_rules()) + .unwrap_or_default(); + self.disabled_rules.extend(disabled_rules); } + (self.enabled_rules, self.disabled_rules) } @@ -127,3 +136,42 @@ impl RegistryVisitor for LintVisitor<'_, '_> { self.push_rule::() } } + +#[cfg(test)] +mod tests { + use pgt_analyse::RuleFilter; + use pgt_configuration::{RuleConfiguration, Rules, analyser::Safety}; + + use crate::{ + settings::{LinterSettings, Settings}, + workspace::server::analyser::AnalyserVisitorBuilder, + }; + + #[test] + fn recognizes_disabled_rules() { + let settings = Settings { + linter: LinterSettings { + rules: Some(Rules { + safety: Some(Safety { + ban_drop_column: Some(RuleConfiguration::Plain( + pgt_configuration::RulePlainConfiguration::Off, + )), + ..Default::default() + }), + ..Default::default() + }), + ..Default::default() + }, + ..Default::default() + }; + + let (_, disabled_rules) = AnalyserVisitorBuilder::new(&settings) + .with_linter_rules(&[], &[]) + .finish(); + + assert_eq!( + disabled_rules, + vec![RuleFilter::Rule("safety", "banDropColumn")] + ) + } +} diff --git a/xtask/codegen/src/generate_configuration.rs b/xtask/codegen/src/generate_configuration.rs index 3799f19b..661f44b5 100644 --- a/xtask/codegen/src/generate_configuration.rs +++ b/xtask/codegen/src/generate_configuration.rs @@ -61,6 +61,8 @@ fn generate_for_groups( let mut group_idents = Vec::with_capacity(groups.len()); let mut group_strings = Vec::with_capacity(groups.len()); let mut group_as_default_rules = Vec::with_capacity(groups.len()); + let mut group_as_disabled_rules = Vec::with_capacity(groups.len()); + for (group, rules) in groups { let group_pascal_ident = quote::format_ident!("{}", &Case::Pascal.convert(group)); let group_ident = quote::format_ident!("{}", group); @@ -95,6 +97,12 @@ fn generate_for_groups( } }); + group_as_disabled_rules.push(quote! { + if let Some(group) = self.#group_ident.as_ref() { + disabled_rules.extend(&group.get_disabled_rules()); + } + }); + group_pascal_idents.push(group_pascal_ident); group_idents.push(group_ident); group_strings.push(Literal::string(group)); @@ -246,6 +254,13 @@ fn generate_for_groups( #( #group_as_default_rules )* enabled_rules } + + /// It returns the disabled rules by configuration. + pub fn as_disabled_rules(&self) -> FxHashSet> { + let mut disabled_rules = FxHashSet::default(); + #( #group_as_disabled_rules )* + disabled_rules + } } #( #struct_groups )* @@ -358,6 +373,13 @@ fn generate_for_groups( enabled_rules.difference(&disabled_rules).copied().collect() } + + /// It returns the disabled rules by configuration. + pub fn as_disabled_rules(&self) -> FxHashSet> { + let mut disabled_rules = FxHashSet::default(); + #( #group_as_disabled_rules )* + disabled_rules + } } #( #struct_groups )* From adb7a9e7a5201272b8bce1d38663f25f53b89abd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 5 Jul 2025 09:50:46 +0200 Subject: [PATCH 082/114] feat: add Nix development environment (#441) --- README.md | 15 +++++++++ flake.lock | 82 ++++++++++++++++++++++++++++++++++++++++++++++++ flake.nix | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 189 insertions(+) create mode 100644 flake.lock create mode 100644 flake.nix diff --git a/README.md b/README.md index 162bb9c0..fa18d0fe 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,21 @@ Our current focus is on refining and enhancing these core features while buildin - [psteinroe](https://github.com/psteinroe) - [juleswritescode](https://github.com/juleswritescode) +## Development + +### Using Nix + +```bash +nix develop +docker-compose up -d +``` + +### Using Docker + +```bash +docker-compose up -d +``` + ## Acknowledgements A big thanks to the following projects, without which this project wouldn't have been possible: diff --git a/flake.lock b/flake.lock new file mode 100644 index 00000000..e8bb4576 --- /dev/null +++ b/flake.lock @@ -0,0 +1,82 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1751271578, + "narHash": "sha256-P/SQmKDu06x8yv7i0s8bvnnuJYkxVGBWLWHaU+tt4YY=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "3016b4b15d13f3089db8a41ef937b13a9e33a8df", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs", + "rust-overlay": "rust-overlay" + } + }, + "rust-overlay": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1751510438, + "narHash": "sha256-m8PjOoyyCR4nhqtHEBP1tB/jF+gJYYguSZmUmVTEAQE=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "7f415261f298656f8164bd636c0dc05af4e95b6b", + "type": "github" + }, + "original": { + "owner": "oxalica", + "repo": "rust-overlay", + "type": "github" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 00000000..0dac4b19 --- /dev/null +++ b/flake.nix @@ -0,0 +1,92 @@ +{ + description = "PostgreSQL Language Server Development Environment"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; + flake-utils.url = "github:numtide/flake-utils"; + rust-overlay = { + url = "github:oxalica/rust-overlay"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = { self, nixpkgs, flake-utils, rust-overlay }: + flake-utils.lib.eachDefaultSystem (system: + let + overlays = [ (import rust-overlay) ]; + pkgs = import nixpkgs { + inherit system overlays; + }; + + # Read rust-toolchain.toml to get the exact Rust version + rustToolchain = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml; + + # Development dependencies + buildInputs = with pkgs; [ + # Rust toolchain + rustToolchain + + # Node.js ecosystem + bun + nodejs_20 + + # Python for additional tooling + python3 + python3Packages.pip + + # System dependencies + pkg-config + openssl + + # Build tools + just + git + + # LSP and development tools + rust-analyzer + + # Additional tools that might be needed + cmake + gcc + libiconv + ]; + + # Environment variables + env = { + RUST_SRC_PATH = "${rustToolchain}/lib/rustlib/src/rust/library"; + PKG_CONFIG_PATH = "${pkgs.openssl.dev}/lib/pkgconfig"; + }; + + in + { + devShells.default = pkgs.mkShell { + inherit buildInputs; + + shellHook = '' + echo "PostgreSQL Language Server Development Environment" + echo "Available tools:" + echo " • Rust $(rustc --version)" + echo " • Node.js $(node --version)" + echo " • Bun $(bun --version)" + echo " • Just $(just --version)" + echo "" + echo "Development Commands:" + echo " • just --list # Show tasks" + echo " • cargo check # Check Rust" + echo " • bun install # Install deps" + echo "" + echo "Use Docker for database:" + echo " • docker-compose up -d" + echo "" + + # Set environment variables + ${pkgs.lib.concatStringsSep "\n" + (pkgs.lib.mapAttrsToList (name: value: "export ${name}=\"${value}\"") env)} + ''; + }; + + # Formatter for nix files + formatter = pkgs.nixfmt-rfc-style; + } + ); +} \ No newline at end of file From 21b05d2eb66888e6c55b2f228174775e6fcce0a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sun, 6 Jul 2025 13:53:18 +0200 Subject: [PATCH 083/114] refactor: custom lexer (#437) - adds a new `tokenizer` crate that turns a string into simple tokens - adds a new `lexer` + `lexer_codegen` that uses the tokeniser to lex into a new `SyntaxKind` enum the new implementation is - much more performant (no extra string allocations, no call to C library) - works with broken strings (!!!!) - custom-made to our use-case (eg the `LineEnding` variant comes with a count) in a follow-up, we will be able to: - parse custom parameters that popular tools use - pre-process to remove unsupported stuff - parse non-sql content (e.g. commands) via a simple custom parser todos: - [x] use new lexer in splitter - [ ] make sure we support all the different parameter formats popular tools use -> will do it in a follow-up - [x] tests --- .claude/settings.local.json | 13 + Cargo.lock | 242 +++- Cargo.toml | 5 +- crates/pgt_diagnostics/src/display/message.rs | 9 + crates/pgt_lexer/Cargo.toml | 8 +- crates/pgt_lexer/README.md | 9 +- crates/pgt_lexer/src/codegen.rs | 3 - crates/pgt_lexer/src/codegen/mod.rs | 1 + crates/pgt_lexer/src/codegen/syntax_kind.rs | 1 + crates/pgt_lexer/src/diagnostics.rs | 67 -- crates/pgt_lexer/src/lexed.rs | 107 ++ crates/pgt_lexer/src/lexer.rs | 208 ++++ crates/pgt_lexer/src/lib.rs | 360 ++---- crates/pgt_lexer_codegen/Cargo.toml | 14 +- crates/pgt_lexer_codegen/README.md | 8 +- crates/pgt_lexer_codegen/build.rs | 49 + .../postgres/17-6.1.0/kwlist.h | 518 +++++++++ crates/pgt_lexer_codegen/src/keywords.rs | 43 + crates/pgt_lexer_codegen/src/lib.rs | 28 +- crates/pgt_lexer_codegen/src/syntax_kind.rs | 204 ++-- crates/pgt_query_ext/Cargo.toml | 8 +- crates/pgt_query_ext/src/codegen.rs | 1 - crates/pgt_query_ext/src/lib.rs | 5 - .../pgt_query_ext_codegen/src/get_location.rs | 122 -- .../src/get_node_properties.rs | 1006 ----------------- crates/pgt_query_ext_codegen/src/get_nodes.rs | 141 --- crates/pgt_query_ext_codegen/src/lib.rs | 48 - .../src/node_iterator.rs | 123 -- .../pgt_statement_splitter/src/diagnostics.rs | 22 + crates/pgt_statement_splitter/src/lib.rs | 68 +- crates/pgt_statement_splitter/src/parser.rs | 237 ---- .../src/parser/common.rs | 307 ----- .../pgt_statement_splitter/src/parser/ddl.rs | 15 - .../pgt_statement_splitter/src/parser/dml.rs | 59 - crates/pgt_statement_splitter/src/splitter.rs | 168 +++ .../src/splitter/common.rs | 275 +++++ .../src/{parser => splitter}/data.rs | 16 +- .../src/splitter/ddl.rs | 15 + .../src/splitter/dml.rs | 59 + .../tests/statement_splitter_tests.rs | 2 +- .../Cargo.toml | 9 +- crates/pgt_tokenizer/README.md | 1 + crates/pgt_tokenizer/src/cursor.rs | 73 ++ crates/pgt_tokenizer/src/lib.rs | 830 ++++++++++++++ .../pgt_tokenizer__tests__bitstring.snap | 16 + .../pgt_tokenizer__tests__block_comment.snap | 9 + ...er__tests__block_comment_unterminated.snap | 9 + ...s__dollar_quote_mismatch_tags_complex.snap | 11 + ...ts__dollar_quote_mismatch_tags_simple.snap | 11 + .../pgt_tokenizer__tests__dollar_quoting.snap | 15 + ...okenizer__tests__dollar_strings_part2.snap | 12 + .../pgt_tokenizer__tests__lex_statement.snap | 11 + .../pgt_tokenizer__tests__line_comment.snap | 10 + ...nizer__tests__line_comment_whitespace.snap | 16 + .../pgt_tokenizer__tests__numeric.snap | 30 + ...tokenizer__tests__numeric_non_decimal.snap | 20 + ...nizer__tests__numeric_with_seperators.snap | 18 + .../pgt_tokenizer__tests__params.snap | 27 + .../pgt_tokenizer__tests__quoted_ident.snap | 12 + ...tests__quoted_ident_with_escape_quote.snap | 10 + ..._tokenizer__tests__select_with_period.snap | 15 + .../pgt_tokenizer__tests__string.snap | 26 + ...kenizer__tests__string_unicode_escape.snap | 20 + ...tokenizer__tests__string_with_escapes.snap | 20 + crates/pgt_tokenizer/src/token.rs | 170 +++ .../src/workspace/server/annotation.rs | 44 +- .../src/workspace/server/change.rs | 159 +-- .../src/workspace/server/document.rs | 41 +- docs/codegen/src/rules_docs.rs | 2 +- xtask/rules_check/src/lib.rs | 75 +- 70 files changed, 3462 insertions(+), 2854 deletions(-) create mode 100644 .claude/settings.local.json delete mode 100644 crates/pgt_lexer/src/codegen.rs create mode 100644 crates/pgt_lexer/src/codegen/mod.rs create mode 100644 crates/pgt_lexer/src/codegen/syntax_kind.rs delete mode 100644 crates/pgt_lexer/src/diagnostics.rs create mode 100644 crates/pgt_lexer/src/lexed.rs create mode 100644 crates/pgt_lexer/src/lexer.rs create mode 100644 crates/pgt_lexer_codegen/build.rs create mode 100644 crates/pgt_lexer_codegen/postgres/17-6.1.0/kwlist.h create mode 100644 crates/pgt_lexer_codegen/src/keywords.rs delete mode 100644 crates/pgt_query_ext/src/codegen.rs delete mode 100644 crates/pgt_query_ext_codegen/src/get_location.rs delete mode 100644 crates/pgt_query_ext_codegen/src/get_node_properties.rs delete mode 100644 crates/pgt_query_ext_codegen/src/get_nodes.rs delete mode 100644 crates/pgt_query_ext_codegen/src/lib.rs delete mode 100644 crates/pgt_query_ext_codegen/src/node_iterator.rs delete mode 100644 crates/pgt_statement_splitter/src/parser.rs delete mode 100644 crates/pgt_statement_splitter/src/parser/common.rs delete mode 100644 crates/pgt_statement_splitter/src/parser/ddl.rs delete mode 100644 crates/pgt_statement_splitter/src/parser/dml.rs create mode 100644 crates/pgt_statement_splitter/src/splitter.rs create mode 100644 crates/pgt_statement_splitter/src/splitter/common.rs rename crates/pgt_statement_splitter/src/{parser => splitter}/data.rs (62%) create mode 100644 crates/pgt_statement_splitter/src/splitter/ddl.rs create mode 100644 crates/pgt_statement_splitter/src/splitter/dml.rs rename crates/{pgt_query_ext_codegen => pgt_tokenizer}/Cargo.toml (62%) create mode 100644 crates/pgt_tokenizer/README.md create mode 100644 crates/pgt_tokenizer/src/cursor.rs create mode 100644 crates/pgt_tokenizer/src/lib.rs create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__bitstring.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__block_comment.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__block_comment_unterminated.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quote_mismatch_tags_complex.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quote_mismatch_tags_simple.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quoting.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_strings_part2.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__lex_statement.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__line_comment.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__line_comment_whitespace.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric_non_decimal.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric_with_seperators.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__params.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__quoted_ident.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__quoted_ident_with_escape_quote.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__select_with_period.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string_unicode_escape.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string_with_escapes.snap create mode 100644 crates/pgt_tokenizer/src/token.rs diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 00000000..85429d0c --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,13 @@ +{ + "permissions": { + "allow": [ + "Bash(grep:*)", + "Bash(rg:*)", + "Bash(cargo test:*)", + "Bash(cargo run:*)", + "Bash(cargo check:*)", + "Bash(cargo fmt:*)" + ], + "deny": [] + } +} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 41f807d1..074ed19b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -334,6 +334,12 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "beef" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" + [[package]] name = "bindgen" version = "0.66.1" @@ -747,9 +753,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.3" +version = "1.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" +checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" dependencies = [ "shlex", ] @@ -1363,6 +1369,12 @@ dependencies = [ "spin", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -2113,6 +2125,40 @@ dependencies = [ "value-bag", ] +[[package]] +name = "logos" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab6f536c1af4c7cc81edf73da1f8029896e7e1e16a219ef09b184e76a296f3db" +dependencies = [ + "logos-derive", +] + +[[package]] +name = "logos-codegen" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "189bbfd0b61330abea797e5e9276408f2edbe4f822d7ad08685d67419aafb34e" +dependencies = [ + "beef", + "fnv", + "lazy_static", + "proc-macro2", + "quote", + "regex-syntax 0.8.5", + "rustc_version", + "syn 2.0.90", +] + +[[package]] +name = "logos-derive" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebfe8e1a19049ddbfccbd14ac834b215e11b85b90bab0c2dba7c7b92fb5d5cba" +dependencies = [ + "logos-codegen", +] + [[package]] name = "lsp-types" version = "0.94.1" @@ -2160,6 +2206,28 @@ dependencies = [ "autocfg", ] +[[package]] +name = "miette" +version = "7.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f98efec8807c63c752b5bd61f862c165c115b0a35685bdcfd9238c7aeb592b7" +dependencies = [ + "cfg-if", + "miette-derive", + "unicode-width", +] + +[[package]] +name = "miette-derive" +version = "7.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "mimalloc" version = "0.1.43" @@ -2694,20 +2762,23 @@ name = "pgt_lexer" version = "0.0.0" dependencies = [ "insta", - "pg_query", "pgt_diagnostics", "pgt_lexer_codegen", "pgt_text_size", - "regex", + "pgt_tokenizer", ] [[package]] name = "pgt_lexer_codegen" version = "0.0.0" dependencies = [ - "pgt_query_proto_parser", + "anyhow", + "convert_case", "proc-macro2", + "prost-reflect", + "protox", "quote", + "ureq", ] [[package]] @@ -2755,20 +2826,9 @@ dependencies = [ "petgraph", "pg_query", "pgt_diagnostics", - "pgt_lexer", - "pgt_query_ext_codegen", "pgt_text_size", ] -[[package]] -name = "pgt_query_ext_codegen" -version = "0.0.0" -dependencies = [ - "pgt_query_proto_parser", - "proc-macro2", - "quote", -] - [[package]] name = "pgt_query_proto_parser" version = "0.0.0" @@ -2851,6 +2911,13 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "pgt_tokenizer" +version = "0.0.0" +dependencies = [ + "insta", +] + [[package]] name = "pgt_treesitter_queries" version = "0.0.0" @@ -3194,6 +3261,18 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "prost-reflect" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37587d5a8a1b3dc9863403d084fc2254b91ab75a702207098837950767e2260b" +dependencies = [ + "logos", + "miette", + "prost", + "prost-types", +] + [[package]] name = "prost-types" version = "0.13.5" @@ -3239,6 +3318,33 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "protox" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "424c2bd294b69c49b949f3619362bc3c5d28298cd1163b6d1a62df37c16461aa" +dependencies = [ + "bytes", + "miette", + "prost", + "prost-reflect", + "prost-types", + "protox-parse", + "thiserror 2.0.6", +] + +[[package]] +name = "protox-parse" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57927f9dbeeffcce7192404deee6157a640cbb3fe8ac11eabbe571565949ab75" +dependencies = [ + "logos", + "miette", + "prost-types", + "thiserror 2.0.6", +] + [[package]] name = "pulldown-cmark" version = "0.12.2" @@ -3405,6 +3511,20 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + [[package]] name = "rsa" version = "0.9.7" @@ -3458,6 +3578,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + [[package]] name = "rustix" version = "0.37.28" @@ -3485,6 +3614,41 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "rustls" +version = "0.23.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "rustversion" version = "1.0.20" @@ -3539,6 +3703,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" + [[package]] name = "serde" version = "1.0.215" @@ -4647,6 +4817,28 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "ureq" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" +dependencies = [ + "base64", + "flate2", + "log", + "once_cell", + "rustls", + "rustls-pki-types", + "url", + "webpki-roots 0.26.11", +] + [[package]] name = "url" version = "2.5.4" @@ -4844,6 +5036,24 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.1", +] + +[[package]] +name = "webpki-roots" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "which" version = "4.4.2" diff --git a/Cargo.toml b/Cargo.toml index fe00d7ca..b5d6dd01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,9 @@ slotmap = "1.0.7" smallvec = { version = "1.13.2", features = ["union", "const_new", "serde"] } strum = { version = "0.27.1", features = ["derive"] } # this will use tokio if available, otherwise async-std +convert_case = "0.6.0" +prost-reflect = "0.15.3" +protox = "0.8.0" sqlx = { version = "0.8.2", features = ["runtime-tokio", "runtime-async-std", "postgres", "json"] } syn = "1.0.109" termcolor = "1.4.1" @@ -72,12 +75,12 @@ pgt_lexer_codegen = { path = "./crates/pgt_lexer_codegen", version = "0 pgt_lsp = { path = "./crates/pgt_lsp", version = "0.0.0" } pgt_markup = { path = "./crates/pgt_markup", version = "0.0.0" } pgt_query_ext = { path = "./crates/pgt_query_ext", version = "0.0.0" } -pgt_query_ext_codegen = { path = "./crates/pgt_query_ext_codegen", version = "0.0.0" } pgt_query_proto_parser = { path = "./crates/pgt_query_proto_parser", version = "0.0.0" } pgt_schema_cache = { path = "./crates/pgt_schema_cache", version = "0.0.0" } pgt_statement_splitter = { path = "./crates/pgt_statement_splitter", version = "0.0.0" } pgt_text_edit = { path = "./crates/pgt_text_edit", version = "0.0.0" } pgt_text_size = { path = "./crates/pgt_text_size", version = "0.0.0" } +pgt_tokenizer = { path = "./crates/pgt_tokenizer", version = "0.0.0" } pgt_treesitter_queries = { path = "./crates/pgt_treesitter_queries", version = "0.0.0" } pgt_typecheck = { path = "./crates/pgt_typecheck", version = "0.0.0" } pgt_workspace = { path = "./crates/pgt_workspace", version = "0.0.0" } diff --git a/crates/pgt_diagnostics/src/display/message.rs b/crates/pgt_diagnostics/src/display/message.rs index 3cf9be3f..20c039a9 100644 --- a/crates/pgt_diagnostics/src/display/message.rs +++ b/crates/pgt_diagnostics/src/display/message.rs @@ -47,6 +47,15 @@ impl From for MessageAndDescription { } } +impl From<&str> for MessageAndDescription { + fn from(description: &str) -> Self { + Self { + message: markup! { {description} }.to_owned(), + description: description.into(), + } + } +} + impl From for MessageAndDescription { fn from(message: MarkupBuf) -> Self { let description = markup_to_string(&message); diff --git a/crates/pgt_lexer/Cargo.toml b/crates/pgt_lexer/Cargo.toml index 4b218588..7f4ada43 100644 --- a/crates/pgt_lexer/Cargo.toml +++ b/crates/pgt_lexer/Cargo.toml @@ -12,16 +12,12 @@ version = "0.0.0" [dependencies] -regex = "1.9.1" - -pg_query.workspace = true pgt_diagnostics.workspace = true pgt_lexer_codegen.workspace = true - -pgt_text_size.workspace = true +pgt_text_size.workspace = true +pgt_tokenizer.workspace = true [dev-dependencies] insta.workspace = true [lib] -doctest = false diff --git a/crates/pgt_lexer/README.md b/crates/pgt_lexer/README.md index ec61c7b2..57bdaa34 100644 --- a/crates/pgt_lexer/README.md +++ b/crates/pgt_lexer/README.md @@ -1,8 +1 @@ -# pgt_lexer - -The `pgt_lexer` crate exposes the `lex` method, which turns an SQL query text into a `Vec>`: the base for the `pg_parser` and most of pgtools's operations. - -A token is always of a certain `SyntaxKind` kind. That `SyntaxKind` enum is derived from `libpg_query`'s protobuf file. - -The SQL query text is mostly lexed using the `pg_query::scan` method (`pg_query` is just a Rust wrapper around `libpg_query`). -However, that method does not parse required whitespace tokens, so the `lex` method takes care of parsing those and merging them into the result. +Heavily inspired by and copied from [squawk_parser](https://github.com/sbdchd/squawk/tree/9acfecbbb7f3c7eedcbaf060e7b25f9afa136db3/crates/squawk_parser). Thanks for making all the hard work MIT-licensed! diff --git a/crates/pgt_lexer/src/codegen.rs b/crates/pgt_lexer/src/codegen.rs deleted file mode 100644 index 6c750590..00000000 --- a/crates/pgt_lexer/src/codegen.rs +++ /dev/null @@ -1,3 +0,0 @@ -use pgt_lexer_codegen::lexer_codegen; - -lexer_codegen!(); diff --git a/crates/pgt_lexer/src/codegen/mod.rs b/crates/pgt_lexer/src/codegen/mod.rs new file mode 100644 index 00000000..c4e67bc5 --- /dev/null +++ b/crates/pgt_lexer/src/codegen/mod.rs @@ -0,0 +1 @@ +pub mod syntax_kind; diff --git a/crates/pgt_lexer/src/codegen/syntax_kind.rs b/crates/pgt_lexer/src/codegen/syntax_kind.rs new file mode 100644 index 00000000..f50398ec --- /dev/null +++ b/crates/pgt_lexer/src/codegen/syntax_kind.rs @@ -0,0 +1 @@ +pgt_lexer_codegen::syntax_kind_codegen!(); diff --git a/crates/pgt_lexer/src/diagnostics.rs b/crates/pgt_lexer/src/diagnostics.rs deleted file mode 100644 index 9516387a..00000000 --- a/crates/pgt_lexer/src/diagnostics.rs +++ /dev/null @@ -1,67 +0,0 @@ -use pgt_diagnostics::{Diagnostic, MessageAndDescription}; -use pgt_text_size::TextRange; - -/// A specialized diagnostic for scan errors. -/// -/// Scan diagnostics are always **fatal errors**. -#[derive(Clone, Debug, Diagnostic, PartialEq)] -#[diagnostic(category = "syntax", severity = Fatal)] -pub struct ScanError { - /// The location where the error is occurred - #[location(span)] - span: Option, - #[message] - #[description] - pub message: MessageAndDescription, -} - -impl ScanError { - pub fn from_pg_query_err(err: pg_query::Error, input: &str) -> Vec { - let err_msg = err.to_string(); - let re = regex::Regex::new(r#"at or near "(.*?)""#).unwrap(); - let mut diagnostics = Vec::new(); - - for captures in re.captures_iter(&err_msg) { - if let Some(matched) = captures.get(1) { - let search_term = matched.as_str(); - for (idx, _) in input.match_indices(search_term) { - let from = idx; - let to = from + search_term.len(); - diagnostics.push(ScanError { - span: Some(TextRange::new( - from.try_into().unwrap(), - to.try_into().unwrap(), - )), - message: MessageAndDescription::from(err_msg.clone()), - }); - } - } - } - - if diagnostics.is_empty() { - diagnostics.push(ScanError { - span: None, - message: MessageAndDescription::from(err_msg), - }); - } - - diagnostics - } -} - -#[cfg(test)] -mod tests { - use crate::lex; - - #[test] - fn finds_all_occurrences() { - let input = - "select 1443ddwwd33djwdkjw13331333333333; select 1443ddwwd33djwdkjw13331333333333;"; - let diagnostics = lex(input).unwrap_err(); - assert_eq!(diagnostics.len(), 2); - assert_eq!(diagnostics[0].span.unwrap().start(), 7.into()); - assert_eq!(diagnostics[0].span.unwrap().end(), 39.into()); - assert_eq!(diagnostics[1].span.unwrap().start(), 48.into()); - assert_eq!(diagnostics[1].span.unwrap().end(), 80.into()); - } -} diff --git a/crates/pgt_lexer/src/lexed.rs b/crates/pgt_lexer/src/lexed.rs new file mode 100644 index 00000000..6f0a273f --- /dev/null +++ b/crates/pgt_lexer/src/lexed.rs @@ -0,0 +1,107 @@ +use pgt_diagnostics::{Diagnostic, MessageAndDescription}; +use pgt_text_size::TextRange; + +use crate::SyntaxKind; + +/// Internal error type used during lexing +#[derive(Debug, Clone)] +pub struct LexError { + pub msg: String, + pub token: u32, +} + +/// A specialized diagnostic for lex errors. +#[derive(Clone, Debug, Diagnostic, PartialEq)] +#[diagnostic(category = "syntax", severity = Error)] +pub struct LexDiagnostic { + /// The location where the error is occurred + #[location(span)] + pub span: TextRange, + #[message] + #[description] + pub message: MessageAndDescription, +} + +/// Result of lexing a string, providing access to tokens and diagnostics +pub struct Lexed<'a> { + pub(crate) text: &'a str, + pub(crate) kind: Vec, + pub(crate) start: Vec, + pub(crate) error: Vec, + pub(crate) line_ending_counts: Vec, +} + +impl Lexed<'_> { + /// Returns the number of tokens + pub fn len(&self) -> usize { + self.kind.len() + } + + /// Returns true if there are no tokens + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns an iterator over token kinds + pub fn tokens(&self) -> impl Iterator + '_ { + self.kind.iter().copied() + } + + /// Returns the kind of token at the given index + pub fn kind(&self, idx: usize) -> SyntaxKind { + assert!( + idx < self.len(), + "expected index < {}, got {}", + self.len(), + idx + ); + self.kind[idx] + } + + /// Returns the number of line endings in the token at the given index + pub fn line_ending_count(&self, idx: usize) -> usize { + assert!( + idx < self.len(), + "expected index < {}, got {}", + self.len(), + idx + ); + assert!(self.kind(idx) == SyntaxKind::LINE_ENDING); + self.line_ending_counts[idx] + } + + /// Returns the text range of token at the given index + pub fn range(&self, idx: usize) -> TextRange { + self.text_range(idx) + } + + /// Returns the text of token at the given index + pub fn text(&self, idx: usize) -> &str { + self.range_text(idx..idx + 1) + } + + /// Returns all lexing errors with their text ranges + pub fn errors(&self) -> Vec { + self.error + .iter() + .map(|it| LexDiagnostic { + message: it.msg.as_str().into(), + span: self.text_range(it.token as usize), + }) + .collect() + } + + pub(crate) fn text_range(&self, i: usize) -> TextRange { + assert!(i < self.len()); + let lo = self.start[i]; + let hi = self.start[i + 1]; + TextRange::new(lo.into(), hi.into()) + } + + fn range_text(&self, r: std::ops::Range) -> &str { + assert!(r.start < r.end && r.end <= self.len()); + let lo = self.start[r.start] as usize; + let hi = self.start[r.end] as usize; + &self.text[lo..hi] + } +} diff --git a/crates/pgt_lexer/src/lexer.rs b/crates/pgt_lexer/src/lexer.rs new file mode 100644 index 00000000..db4b4ae2 --- /dev/null +++ b/crates/pgt_lexer/src/lexer.rs @@ -0,0 +1,208 @@ +use pgt_tokenizer::tokenize; + +use crate::SyntaxKind; +use crate::lexed::{LexError, Lexed}; + +/// Lexer that processes input text into tokens and diagnostics +pub struct Lexer<'a> { + text: &'a str, + kind: Vec, + start: Vec, + error: Vec, + offset: usize, + /// we store line ending counts outside of SyntaxKind because of the u16 represenation of SyntaxKind + line_ending_counts: Vec, +} + +impl<'a> Lexer<'a> { + /// Create a new lexer for the given text + pub fn new(text: &'a str) -> Self { + Self { + text, + kind: Vec::new(), + start: Vec::new(), + error: Vec::new(), + offset: 0, + line_ending_counts: Vec::new(), + } + } + + /// Lex the input text and return the result + pub fn lex(mut self) -> Lexed<'a> { + for token in tokenize(&self.text[self.offset..]) { + let token_text = &self.text[self.offset..][..token.len as usize]; + self.extend_token(&token.kind, token_text); + } + + // Add EOF token + self.push(SyntaxKind::EOF, 0, None, None); + + Lexed { + text: self.text, + kind: self.kind, + start: self.start, + error: self.error, + line_ending_counts: self.line_ending_counts, + } + } + + fn push( + &mut self, + kind: SyntaxKind, + len: usize, + err: Option<&str>, + line_ending_count: Option, + ) { + self.kind.push(kind); + self.start.push(self.offset as u32); + self.offset += len; + + assert!( + kind != SyntaxKind::LINE_ENDING || line_ending_count.is_some(), + "Line ending token must have a line ending count" + ); + + self.line_ending_counts.push(line_ending_count.unwrap_or(0)); + + if let Some(err) = err { + let token = (self.kind.len() - 1) as u32; + let msg = err.to_owned(); + self.error.push(LexError { msg, token }); + } + } + + fn extend_token(&mut self, kind: &pgt_tokenizer::TokenKind, token_text: &str) { + let mut err = ""; + let mut line_ending_count = None; + + let syntax_kind = { + match kind { + pgt_tokenizer::TokenKind::LineComment => SyntaxKind::COMMENT, + pgt_tokenizer::TokenKind::BlockComment { terminated } => { + if !terminated { + err = "Missing trailing `*/` symbols to terminate the block comment"; + } + SyntaxKind::COMMENT + } + pgt_tokenizer::TokenKind::Space => SyntaxKind::SPACE, + pgt_tokenizer::TokenKind::Tab => SyntaxKind::TAB, + pgt_tokenizer::TokenKind::LineEnding { count } => { + line_ending_count = Some(*count); + SyntaxKind::LINE_ENDING + } + pgt_tokenizer::TokenKind::VerticalTab => SyntaxKind::VERTICAL_TAB, + pgt_tokenizer::TokenKind::FormFeed => SyntaxKind::FORM_FEED, + pgt_tokenizer::TokenKind::Ident => { + SyntaxKind::from_keyword(token_text).unwrap_or(SyntaxKind::IDENT) + } + pgt_tokenizer::TokenKind::Literal { kind, .. } => { + self.extend_literal(token_text.len(), kind); + return; + } + pgt_tokenizer::TokenKind::Semi => SyntaxKind::SEMICOLON, + pgt_tokenizer::TokenKind::Comma => SyntaxKind::COMMA, + pgt_tokenizer::TokenKind::Dot => SyntaxKind::DOT, + pgt_tokenizer::TokenKind::OpenParen => SyntaxKind::L_PAREN, + pgt_tokenizer::TokenKind::CloseParen => SyntaxKind::R_PAREN, + pgt_tokenizer::TokenKind::OpenBracket => SyntaxKind::L_BRACK, + pgt_tokenizer::TokenKind::CloseBracket => SyntaxKind::R_BRACK, + pgt_tokenizer::TokenKind::At => SyntaxKind::AT, + pgt_tokenizer::TokenKind::Pound => SyntaxKind::POUND, + pgt_tokenizer::TokenKind::Tilde => SyntaxKind::TILDE, + pgt_tokenizer::TokenKind::Question => SyntaxKind::QUESTION, + pgt_tokenizer::TokenKind::Colon => SyntaxKind::COLON, + pgt_tokenizer::TokenKind::Eq => SyntaxKind::EQ, + pgt_tokenizer::TokenKind::Bang => SyntaxKind::BANG, + pgt_tokenizer::TokenKind::Lt => SyntaxKind::L_ANGLE, + pgt_tokenizer::TokenKind::Gt => SyntaxKind::R_ANGLE, + pgt_tokenizer::TokenKind::Minus => SyntaxKind::MINUS, + pgt_tokenizer::TokenKind::And => SyntaxKind::AMP, + pgt_tokenizer::TokenKind::Or => SyntaxKind::PIPE, + pgt_tokenizer::TokenKind::Plus => SyntaxKind::PLUS, + pgt_tokenizer::TokenKind::Star => SyntaxKind::STAR, + pgt_tokenizer::TokenKind::Slash => SyntaxKind::SLASH, + pgt_tokenizer::TokenKind::Caret => SyntaxKind::CARET, + pgt_tokenizer::TokenKind::Percent => SyntaxKind::PERCENT, + pgt_tokenizer::TokenKind::Unknown => SyntaxKind::ERROR, + pgt_tokenizer::TokenKind::Backslash => SyntaxKind::BACKSLASH, + pgt_tokenizer::TokenKind::UnknownPrefix => { + err = "unknown literal prefix"; + SyntaxKind::IDENT + } + pgt_tokenizer::TokenKind::Eof => SyntaxKind::EOF, + pgt_tokenizer::TokenKind::Backtick => SyntaxKind::BACKTICK, + pgt_tokenizer::TokenKind::PositionalParam => SyntaxKind::POSITIONAL_PARAM, + pgt_tokenizer::TokenKind::QuotedIdent { terminated } => { + if !terminated { + err = "Missing trailing \" to terminate the quoted identifier" + } + SyntaxKind::IDENT + } + } + }; + + let err = if err.is_empty() { None } else { Some(err) }; + self.push(syntax_kind, token_text.len(), err, line_ending_count); + } + + fn extend_literal(&mut self, len: usize, kind: &pgt_tokenizer::LiteralKind) { + let mut err = ""; + + let syntax_kind = match *kind { + pgt_tokenizer::LiteralKind::Int { empty_int, base: _ } => { + if empty_int { + err = "Missing digits after the integer base prefix"; + } + SyntaxKind::INT_NUMBER + } + pgt_tokenizer::LiteralKind::Float { + empty_exponent, + base: _, + } => { + if empty_exponent { + err = "Missing digits after the exponent symbol"; + } + SyntaxKind::FLOAT_NUMBER + } + pgt_tokenizer::LiteralKind::Str { terminated } => { + if !terminated { + err = "Missing trailing `'` symbol to terminate the string literal"; + } + SyntaxKind::STRING + } + pgt_tokenizer::LiteralKind::ByteStr { terminated } => { + if !terminated { + err = "Missing trailing `'` symbol to terminate the hex bit string literal"; + } + SyntaxKind::BYTE_STRING + } + pgt_tokenizer::LiteralKind::BitStr { terminated } => { + if !terminated { + err = "Missing trailing `'` symbol to terminate the bit string literal"; + } + SyntaxKind::BIT_STRING + } + pgt_tokenizer::LiteralKind::DollarQuotedString { terminated } => { + if !terminated { + err = "Unterminated dollar quoted string literal"; + } + SyntaxKind::DOLLAR_QUOTED_STRING + } + pgt_tokenizer::LiteralKind::UnicodeEscStr { terminated } => { + if !terminated { + err = "Missing trailing `'` symbol to terminate the unicode escape string literal"; + } + SyntaxKind::BYTE_STRING + } + pgt_tokenizer::LiteralKind::EscStr { terminated } => { + if !terminated { + err = "Missing trailing `'` symbol to terminate the escape string literal"; + } + SyntaxKind::ESC_STRING + } + }; + + let err = if err.is_empty() { None } else { Some(err) }; + self.push(syntax_kind, len, err, None); + } +} diff --git a/crates/pgt_lexer/src/lib.rs b/crates/pgt_lexer/src/lib.rs index 32bbdd42..2d8779a7 100644 --- a/crates/pgt_lexer/src/lib.rs +++ b/crates/pgt_lexer/src/lib.rs @@ -1,191 +1,14 @@ mod codegen; -pub mod diagnostics; +mod lexed; +mod lexer; -use diagnostics::ScanError; -use pg_query::protobuf::{KeywordKind, ScanToken}; -use pgt_text_size::{TextLen, TextRange, TextSize}; -use regex::Regex; -use std::{collections::VecDeque, sync::LazyLock}; +pub use crate::codegen::syntax_kind::SyntaxKind; +pub use crate::lexed::{LexDiagnostic, Lexed}; +pub use crate::lexer::Lexer; -pub use crate::codegen::SyntaxKind; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum TokenType { - Whitespace, - NoKeyword, - UnreservedKeyword, - ColNameKeyword, - TypeFuncNameKeyword, - ReservedKeyword, -} - -impl From<&ScanToken> for TokenType { - fn from(token: &ScanToken) -> TokenType { - match token.token { - // SqlComment | CComment - 275 | 276 => TokenType::Whitespace, - _ => match token.keyword_kind() { - KeywordKind::NoKeyword => TokenType::NoKeyword, - KeywordKind::UnreservedKeyword => TokenType::UnreservedKeyword, - KeywordKind::ColNameKeyword => TokenType::ColNameKeyword, - KeywordKind::TypeFuncNameKeyword => TokenType::TypeFuncNameKeyword, - KeywordKind::ReservedKeyword => TokenType::ReservedKeyword, - }, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Token { - pub kind: SyntaxKind, - pub text: String, - pub span: TextRange, - pub token_type: TokenType, -} - -impl Token { - pub fn eof(pos: usize) -> Token { - Token { - kind: SyntaxKind::Eof, - text: "".to_string(), - span: TextRange::at(TextSize::try_from(pos).unwrap(), TextSize::from(0)), - token_type: TokenType::Whitespace, - } - } -} - -pub static WHITESPACE_TOKENS: &[SyntaxKind] = &[ - SyntaxKind::Whitespace, - SyntaxKind::Tab, - SyntaxKind::Newline, - SyntaxKind::SqlComment, - SyntaxKind::CComment, -]; - -static PATTERN_LEXER: LazyLock = LazyLock::new(|| { - #[cfg(windows)] - { - // On Windows, treat \r\n as a single newline token - Regex::new(r"(?P +)|(?P(\r\n|\n)+)|(?P\t+)").unwrap() - } - #[cfg(not(windows))] - { - // On other platforms, just check for \n - Regex::new(r"(?P +)|(?P\n+)|(?P\t+)").unwrap() - } -}); - -fn whitespace_tokens(input: &str) -> VecDeque { - let mut tokens = VecDeque::new(); - - for cap in PATTERN_LEXER.captures_iter(input) { - if let Some(whitespace) = cap.name("whitespace") { - tokens.push_back(Token { - token_type: TokenType::Whitespace, - kind: SyntaxKind::Whitespace, - text: whitespace.as_str().to_string(), - span: TextRange::new( - TextSize::from(u32::try_from(whitespace.start()).unwrap()), - TextSize::from(u32::try_from(whitespace.end()).unwrap()), - ), - }); - } else if let Some(newline) = cap.name("newline") { - tokens.push_back(Token { - token_type: TokenType::Whitespace, - kind: SyntaxKind::Newline, - text: newline.as_str().to_string(), - span: TextRange::new( - TextSize::from(u32::try_from(newline.start()).unwrap()), - TextSize::from(u32::try_from(newline.end()).unwrap()), - ), - }); - } else if let Some(tab) = cap.name("tab") { - tokens.push_back(Token { - token_type: TokenType::Whitespace, - kind: SyntaxKind::Tab, - text: tab.as_str().to_string(), - span: TextRange::new( - TextSize::from(u32::try_from(tab.start()).unwrap()), - TextSize::from(u32::try_from(tab.end()).unwrap()), - ), - }); - } else { - panic!("No match"); - }; - } - - tokens -} - -/// Turn a string of potentially valid sql code into a list of tokens, including their range in the source text. -/// -/// The implementation is primarily using libpg_querys `scan` method, and fills in the gaps with tokens that are not parsed by the library, e.g. whitespace. -pub fn lex(text: &str) -> Result, Vec> { - let mut whitespace_tokens = whitespace_tokens(text); - - // tokens from pg_query.rs - let mut pgt_query_tokens = match pg_query::scan(text) { - Ok(r) => r.tokens.into_iter().collect::>(), - Err(err) => return Err(ScanError::from_pg_query_err(err, text)), - }; - - // merge the two token lists - let mut tokens: Vec = Vec::new(); - let mut pos = TextSize::from(0); - - while pos < text.text_len() { - if !pgt_query_tokens.is_empty() - && TextSize::from(u32::try_from(pgt_query_tokens[0].start).unwrap()) == pos - { - let pgt_query_token = pgt_query_tokens.pop_front().unwrap(); - - // the lexer returns byte indices, so we need to slice - let token_text = &text[usize::try_from(pgt_query_token.start).unwrap() - ..usize::try_from(pgt_query_token.end).unwrap()]; - - let len = token_text.text_len(); - let has_whitespace = token_text.contains(" ") || token_text.contains("\n"); - tokens.push(Token { - token_type: TokenType::from(&pgt_query_token), - kind: SyntaxKind::from(&pgt_query_token), - text: token_text.to_string(), - span: TextRange::new( - TextSize::from(u32::try_from(pgt_query_token.start).unwrap()), - TextSize::from(u32::try_from(pgt_query_token.end).unwrap()), - ), - }); - pos += len; - - if has_whitespace { - while !whitespace_tokens.is_empty() - && whitespace_tokens[0].span.start() < TextSize::from(u32::from(pos)) - { - whitespace_tokens.pop_front(); - } - } - - continue; - } - - if !whitespace_tokens.is_empty() - && whitespace_tokens[0].span.start() == TextSize::from(u32::from(pos)) - { - let whitespace_token = whitespace_tokens.pop_front().unwrap(); - let len = whitespace_token.text.text_len(); - tokens.push(whitespace_token); - pos += len; - continue; - } - - let usize_pos = usize::from(pos); - panic!( - "No token found at position {:?}: '{:?}'", - pos, - text.get(usize_pos..usize_pos + 1) - ); - } - - Ok(tokens) +/// Lex the input string into tokens and diagnostics +pub fn lex(input: &str) -> Lexed { + Lexer::new(input).lex() } #[cfg(test)] @@ -193,101 +16,106 @@ mod tests { use super::*; #[test] - fn test_special_chars() { - let input = "insert into c (name, full_name) values ('Å', 1);"; - let tokens = lex(input).unwrap(); - assert!(!tokens.is_empty()); - } + fn test_basic_lexing() { + let input = "SELECT * FROM users WHERE id = 1;"; + let lexed = lex(input); + + // Check we have tokens + assert!(!lexed.is_empty()); + + // Iterate over tokens and collect identifiers + let mut identifiers = Vec::new(); + for (idx, kind) in lexed.tokens().enumerate() { + if kind == SyntaxKind::IDENT { + identifiers.push((lexed.text(idx), lexed.range(idx))); + } + } - #[test] - fn test_tab_tokens() { - let input = "select\t1"; - let tokens = lex(input).unwrap(); - assert_eq!(tokens[1].kind, SyntaxKind::Tab); + // Should find at least "users" and "id" as identifiers + assert!(identifiers.len() >= 2); } #[test] - fn test_newline_tokens() { - let input = "select\n1"; - let tokens = lex(input).unwrap(); - assert_eq!(tokens[1].kind, SyntaxKind::Newline); + fn test_lexing_with_errors() { + let input = "SELECT 'unterminated string"; + let lexed = lex(input); + + // Should have tokens + assert!(!lexed.is_empty()); + + // Should have an error for unterminated string + let errors = lexed.errors(); + assert!(!errors.is_empty()); + // Check the error message exists + assert!(!errors[0].message.to_string().is_empty()); } #[test] - fn test_consecutive_newlines() { - // Test with multiple consecutive newlines - #[cfg(windows)] - let input = "select\r\n\r\n1"; - #[cfg(not(windows))] - let input = "select\n\n1"; - - let tokens = lex(input).unwrap(); - - // Check that we have exactly one newline token between "select" and "1" - assert_eq!(tokens[0].kind, SyntaxKind::Select); - assert_eq!(tokens[1].kind, SyntaxKind::Newline); - assert_eq!(tokens[2].kind, SyntaxKind::Iconst); + fn test_token_ranges() { + let input = "SELECT id"; + let lexed = lex(input); + + // First token should be a keyword (SELECT gets parsed as a keyword) + let _first_kind = lexed.kind(0); + assert_eq!(u32::from(lexed.range(0).start()), 0); + assert_eq!(u32::from(lexed.range(0).end()), 6); + assert_eq!(lexed.text(0), "SELECT"); + + // Find the id token + for (idx, kind) in lexed.tokens().enumerate() { + if kind == SyntaxKind::IDENT && lexed.text(idx) == "id" { + assert_eq!(u32::from(lexed.range(idx).start()), 7); + assert_eq!(u32::from(lexed.range(idx).end()), 9); + } + } } #[test] - fn test_whitespace_tokens() { - let input = "select 1"; - let tokens = lex(input).unwrap(); - assert_eq!(tokens[1].kind, SyntaxKind::Whitespace); + fn test_empty_input() { + let input = ""; + let lexed = lex(input); + assert_eq!(lexed.len(), 1); + assert_eq!(lexed.kind(0), SyntaxKind::EOF); } #[test] - fn test_lexer() { - let input = "select 1; \n -- some comment \n select 2\t"; - - let tokens = lex(input).unwrap(); - let mut tokens_iter = tokens.iter(); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Select); - assert_eq!(token.text, "select"); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Whitespace); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Iconst); - assert_eq!(token.text, "1"); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Ascii59); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Whitespace); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Newline); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Whitespace); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::SqlComment); - assert_eq!(token.text, "-- some comment "); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Newline); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Whitespace); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Select); - assert_eq!(token.text, "select"); - - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Whitespace); + fn test_whitespace_handling() { + let input = " SELECT \n id "; + let lexed = lex(input); + + // Collect non-whitespace tokens + let mut non_whitespace = Vec::new(); + for (idx, kind) in lexed.tokens().enumerate() { + if !matches!( + kind, + SyntaxKind::SPACE | SyntaxKind::TAB | SyntaxKind::LINE_ENDING | SyntaxKind::EOF + ) { + non_whitespace.push(lexed.text(idx)); + } + } - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Iconst); - assert_eq!(token.text, "2"); + assert_eq!(non_whitespace.len(), 2); // SELECT and id + } - let token = tokens_iter.next().unwrap(); - assert_eq!(token.kind, SyntaxKind::Tab); + #[test] + fn finds_lex_errors() { + // Test with unterminated block comment + let input = "/* unterminated comment"; + let lexed = lex(input); + let errors = lexed.errors(); + + // Should have error for unterminated block comment + assert!(!errors.is_empty()); + assert!(errors[0].message.to_string().contains("Missing trailing")); + assert!(errors[0].span.start() < errors[0].span.end()); + + // Test with unterminated string + let input2 = "SELECT 'unterminated string"; + let lexed2 = lex(input2); + let errors2 = lexed2.errors(); + + // Should have error for unterminated string + assert!(!errors2.is_empty()); + assert!(errors2[0].message.to_string().contains("Missing trailing")); } } diff --git a/crates/pgt_lexer_codegen/Cargo.toml b/crates/pgt_lexer_codegen/Cargo.toml index c5878646..b50465b0 100644 --- a/crates/pgt_lexer_codegen/Cargo.toml +++ b/crates/pgt_lexer_codegen/Cargo.toml @@ -10,12 +10,16 @@ name = "pgt_lexer_codegen" repository.workspace = true version = "0.0.0" - [dependencies] -pgt_query_proto_parser.workspace = true -proc-macro2.workspace = true -quote = "1.0.33" +anyhow = { workspace = true } +convert_case = { workspace = true } +proc-macro2.workspace = true +prost-reflect = { workspace = true } +protox = { workspace = true } +quote.workspace = true + +[build-dependencies] +ureq = "2.9" [lib] -doctest = false proc-macro = true diff --git a/crates/pgt_lexer_codegen/README.md b/crates/pgt_lexer_codegen/README.md index 843ac2f8..57bdaa34 100644 --- a/crates/pgt_lexer_codegen/README.md +++ b/crates/pgt_lexer_codegen/README.md @@ -1,7 +1 @@ -# pgt_lexer_codegen - -This crate is responsible for reading `libpg_query`'s protobuf file and turning it into the Rust enum `SyntaxKind`. - -It does so by reading the file from the installed git submodule, parsing it with a protobuf parser, and using a procedural macro to generate the enum. - -Rust requires procedural macros to be defined in a different crate than where they're used, hence this \_codegen crate. +Heavily inspired by and copied from [squawk_parser](https://github.com/sbdchd/squawk/tree/9acfecbbb7f3c7eedcbaf060e7b25f9afa136db3/crates/squawk_parser). Thanks for making all the hard work MIT-licensed! diff --git a/crates/pgt_lexer_codegen/build.rs b/crates/pgt_lexer_codegen/build.rs new file mode 100644 index 00000000..70c9635d --- /dev/null +++ b/crates/pgt_lexer_codegen/build.rs @@ -0,0 +1,49 @@ +use std::env; +use std::fs; +use std::io::Write; +use std::path::PathBuf; + +// TODO make this selectable via feature flags +static LIBPG_QUERY_TAG: &str = "17-6.1.0"; + +/// Downloads the `kwlist.h` file from the specified version of `libpg_query` +fn main() -> Result<(), Box> { + let version = LIBPG_QUERY_TAG.to_string(); + + // Check for the postgres header file in the source tree first + let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?); + let headers_dir = manifest_dir.join("postgres").join(&version); + let kwlist_path = headers_dir.join("kwlist.h"); + + // Only download if the file doesn't exist + if !kwlist_path.exists() { + println!( + "cargo:warning=Downloading kwlist.h for libpg_query {}", + version + ); + + fs::create_dir_all(&headers_dir)?; + + let proto_url = format!( + "https://raw.githubusercontent.com/pganalyze/libpg_query/{}/src/postgres/include/parser/kwlist.h", + version + ); + + let response = ureq::get(&proto_url).call()?; + let content = response.into_string()?; + + let mut file = fs::File::create(&kwlist_path)?; + file.write_all(content.as_bytes())?; + + println!("cargo:warning=Successfully downloaded kwlist.h"); + } + + println!( + "cargo:rustc-env=PG_QUERY_KWLIST_PATH={}", + kwlist_path.display() + ); + + println!("cargo:rerun-if-changed={}", kwlist_path.display()); + + Ok(()) +} diff --git a/crates/pgt_lexer_codegen/postgres/17-6.1.0/kwlist.h b/crates/pgt_lexer_codegen/postgres/17-6.1.0/kwlist.h new file mode 100644 index 00000000..658d7ff6 --- /dev/null +++ b/crates/pgt_lexer_codegen/postgres/17-6.1.0/kwlist.h @@ -0,0 +1,518 @@ +/*------------------------------------------------------------------------- + * + * kwlist.h + * + * The keyword lists are kept in their own source files for use by + * automatic tools. The exact representation of a keyword is determined + * by the PG_KEYWORD macro, which is not defined in this file; it can + * be defined by the caller for special purposes. + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/include/parser/kwlist.h + * + *------------------------------------------------------------------------- + */ + +/* there is deliberately not an #ifndef KWLIST_H here */ + +/* + * List of keyword (name, token-value, category, bare-label-status) entries. + * + * Note: gen_keywordlist.pl requires the entries to appear in ASCII order. + */ + +/* name, value, category, is-bare-label */ +PG_KEYWORD("abort", ABORT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("absent", ABSENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("absolute", ABSOLUTE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("access", ACCESS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("action", ACTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("add", ADD_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("admin", ADMIN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("after", AFTER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("aggregate", AGGREGATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("all", ALL, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("also", ALSO, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("alter", ALTER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("always", ALWAYS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("analyse", ANALYSE, RESERVED_KEYWORD, BARE_LABEL) /* British spelling */ +PG_KEYWORD("analyze", ANALYZE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("and", AND, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("any", ANY, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("array", ARRAY, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("as", AS, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("asc", ASC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("asensitive", ASENSITIVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("assertion", ASSERTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("assignment", ASSIGNMENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("asymmetric", ASYMMETRIC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("at", AT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("atomic", ATOMIC, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("attach", ATTACH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("attribute", ATTRIBUTE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("authorization", AUTHORIZATION, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("backward", BACKWARD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("before", BEFORE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("begin", BEGIN_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("between", BETWEEN, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("bigint", BIGINT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("binary", BINARY, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("bit", BIT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("boolean", BOOLEAN_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("both", BOTH, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("breadth", BREADTH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("by", BY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cache", CACHE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("call", CALL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("called", CALLED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cascade", CASCADE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cascaded", CASCADED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("case", CASE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cast", CAST, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("catalog", CATALOG_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("chain", CHAIN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("char", CHAR_P, COL_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("character", CHARACTER, COL_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("characteristics", CHARACTERISTICS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("check", CHECK, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("checkpoint", CHECKPOINT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("class", CLASS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("close", CLOSE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cluster", CLUSTER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("coalesce", COALESCE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("collate", COLLATE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("collation", COLLATION, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("column", COLUMN, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("columns", COLUMNS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("comment", COMMENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("comments", COMMENTS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("commit", COMMIT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("committed", COMMITTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("compression", COMPRESSION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("concurrently", CONCURRENTLY, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("conditional", CONDITIONAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("configuration", CONFIGURATION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("conflict", CONFLICT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("connection", CONNECTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("constraint", CONSTRAINT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("constraints", CONSTRAINTS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("content", CONTENT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("continue", CONTINUE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("conversion", CONVERSION_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("copy", COPY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cost", COST, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("create", CREATE, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("cross", CROSS, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("csv", CSV, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cube", CUBE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current", CURRENT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_catalog", CURRENT_CATALOG, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_date", CURRENT_DATE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_role", CURRENT_ROLE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_schema", CURRENT_SCHEMA, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_time", CURRENT_TIME, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_timestamp", CURRENT_TIMESTAMP, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_user", CURRENT_USER, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cursor", CURSOR, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cycle", CYCLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("data", DATA_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("database", DATABASE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("day", DAY_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("deallocate", DEALLOCATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("dec", DEC, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("decimal", DECIMAL_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("declare", DECLARE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("default", DEFAULT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("defaults", DEFAULTS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("deferrable", DEFERRABLE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("deferred", DEFERRED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("definer", DEFINER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("delete", DELETE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("delimiter", DELIMITER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("delimiters", DELIMITERS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("depends", DEPENDS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("depth", DEPTH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("desc", DESC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("detach", DETACH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("dictionary", DICTIONARY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("disable", DISABLE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("discard", DISCARD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("distinct", DISTINCT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("do", DO, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("document", DOCUMENT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("domain", DOMAIN_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("double", DOUBLE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("drop", DROP, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("each", EACH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("else", ELSE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("empty", EMPTY_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("enable", ENABLE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("encoding", ENCODING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("encrypted", ENCRYPTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("end", END_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("enum", ENUM_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("error", ERROR_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("escape", ESCAPE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("event", EVENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("except", EXCEPT, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("exclude", EXCLUDE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("excluding", EXCLUDING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("exclusive", EXCLUSIVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("execute", EXECUTE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("exists", EXISTS, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("explain", EXPLAIN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("expression", EXPRESSION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("extension", EXTENSION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("external", EXTERNAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("extract", EXTRACT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("false", FALSE_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("family", FAMILY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("fetch", FETCH, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("filter", FILTER, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("finalize", FINALIZE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("first", FIRST_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("float", FLOAT_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("following", FOLLOWING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("for", FOR, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("force", FORCE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("foreign", FOREIGN, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("format", FORMAT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("forward", FORWARD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("freeze", FREEZE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("from", FROM, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("full", FULL, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("function", FUNCTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("functions", FUNCTIONS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("generated", GENERATED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("global", GLOBAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("grant", GRANT, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("granted", GRANTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("greatest", GREATEST, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("group", GROUP_P, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("grouping", GROUPING, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("groups", GROUPS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("handler", HANDLER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("having", HAVING, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("header", HEADER_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("hold", HOLD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("hour", HOUR_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("identity", IDENTITY_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("if", IF_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("ilike", ILIKE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("immediate", IMMEDIATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("immutable", IMMUTABLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("implicit", IMPLICIT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("import", IMPORT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("in", IN_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("include", INCLUDE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("including", INCLUDING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("increment", INCREMENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("indent", INDENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("index", INDEX, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("indexes", INDEXES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("inherit", INHERIT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("inherits", INHERITS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("initially", INITIALLY, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("inline", INLINE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("inner", INNER_P, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("inout", INOUT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("input", INPUT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("insensitive", INSENSITIVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("insert", INSERT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("instead", INSTEAD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("int", INT_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("integer", INTEGER, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("intersect", INTERSECT, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("interval", INTERVAL, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("into", INTO, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("invoker", INVOKER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("is", IS, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("isnull", ISNULL, TYPE_FUNC_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("isolation", ISOLATION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("join", JOIN, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json", JSON, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_array", JSON_ARRAY, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_arrayagg", JSON_ARRAYAGG, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_exists", JSON_EXISTS, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_object", JSON_OBJECT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_objectagg", JSON_OBJECTAGG, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_query", JSON_QUERY, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_scalar", JSON_SCALAR, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_serialize", JSON_SERIALIZE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_table", JSON_TABLE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("json_value", JSON_VALUE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("keep", KEEP, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("key", KEY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("keys", KEYS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("label", LABEL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("language", LANGUAGE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("large", LARGE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("last", LAST_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("lateral", LATERAL_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("leading", LEADING, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("leakproof", LEAKPROOF, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("least", LEAST, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("left", LEFT, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("level", LEVEL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("like", LIKE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("limit", LIMIT, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("listen", LISTEN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("load", LOAD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("local", LOCAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("localtime", LOCALTIME, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("localtimestamp", LOCALTIMESTAMP, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("location", LOCATION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("lock", LOCK_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("locked", LOCKED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("logged", LOGGED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("mapping", MAPPING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("match", MATCH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("matched", MATCHED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("materialized", MATERIALIZED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("maxvalue", MAXVALUE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("merge", MERGE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("merge_action", MERGE_ACTION, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("method", METHOD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("minute", MINUTE_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("minvalue", MINVALUE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("mode", MODE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("month", MONTH_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("move", MOVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("name", NAME_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("names", NAMES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("national", NATIONAL, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("natural", NATURAL, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("nchar", NCHAR, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("nested", NESTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("new", NEW, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("next", NEXT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nfc", NFC, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nfd", NFD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nfkc", NFKC, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nfkd", NFKD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("no", NO, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("none", NONE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("normalize", NORMALIZE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("normalized", NORMALIZED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("not", NOT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nothing", NOTHING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("notify", NOTIFY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("notnull", NOTNULL, TYPE_FUNC_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("nowait", NOWAIT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("null", NULL_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nullif", NULLIF, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("nulls", NULLS_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("numeric", NUMERIC, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("object", OBJECT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("of", OF, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("off", OFF, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("offset", OFFSET, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("oids", OIDS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("old", OLD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("omit", OMIT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("on", ON, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("only", ONLY, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("operator", OPERATOR, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("option", OPTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("options", OPTIONS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("or", OR, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("order", ORDER, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("ordinality", ORDINALITY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("others", OTHERS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("out", OUT_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("outer", OUTER_P, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("over", OVER, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("overlaps", OVERLAPS, TYPE_FUNC_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("overlay", OVERLAY, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("overriding", OVERRIDING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("owned", OWNED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("owner", OWNER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("parallel", PARALLEL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("parameter", PARAMETER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("parser", PARSER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("partial", PARTIAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("partition", PARTITION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("passing", PASSING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("password", PASSWORD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("path", PATH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("placing", PLACING, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("plan", PLAN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("plans", PLANS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("policy", POLICY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("position", POSITION, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("preceding", PRECEDING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("precision", PRECISION, COL_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("prepare", PREPARE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("prepared", PREPARED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("preserve", PRESERVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("primary", PRIMARY, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("prior", PRIOR, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("privileges", PRIVILEGES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("procedural", PROCEDURAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("procedure", PROCEDURE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("procedures", PROCEDURES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("program", PROGRAM, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("publication", PUBLICATION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("quote", QUOTE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("quotes", QUOTES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("range", RANGE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("read", READ, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("real", REAL, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("reassign", REASSIGN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("recheck", RECHECK, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("recursive", RECURSIVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("ref", REF_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("references", REFERENCES, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("referencing", REFERENCING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("refresh", REFRESH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("reindex", REINDEX, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("relative", RELATIVE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("release", RELEASE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("rename", RENAME, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("repeatable", REPEATABLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("replace", REPLACE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("replica", REPLICA, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("reset", RESET, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("restart", RESTART, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("restrict", RESTRICT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("return", RETURN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("returning", RETURNING, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("returns", RETURNS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("revoke", REVOKE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("right", RIGHT, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("role", ROLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("rollback", ROLLBACK, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("rollup", ROLLUP, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("routine", ROUTINE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("routines", ROUTINES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("row", ROW, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("rows", ROWS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("rule", RULE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("savepoint", SAVEPOINT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("scalar", SCALAR, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("schema", SCHEMA, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("schemas", SCHEMAS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("scroll", SCROLL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("search", SEARCH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("second", SECOND_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("security", SECURITY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("select", SELECT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("sequence", SEQUENCE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("sequences", SEQUENCES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("serializable", SERIALIZABLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("server", SERVER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("session", SESSION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("session_user", SESSION_USER, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("set", SET, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("setof", SETOF, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("sets", SETS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("share", SHARE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("show", SHOW, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("similar", SIMILAR, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("simple", SIMPLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("skip", SKIP, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("smallint", SMALLINT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("snapshot", SNAPSHOT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("some", SOME, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("source", SOURCE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("sql", SQL_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("stable", STABLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("standalone", STANDALONE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("start", START, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("statement", STATEMENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("statistics", STATISTICS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("stdin", STDIN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("stdout", STDOUT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("storage", STORAGE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("stored", STORED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("strict", STRICT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("string", STRING_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("strip", STRIP_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("subscription", SUBSCRIPTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("substring", SUBSTRING, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("support", SUPPORT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("symmetric", SYMMETRIC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("sysid", SYSID, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("system", SYSTEM_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("system_user", SYSTEM_USER, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("table", TABLE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("tables", TABLES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("tablesample", TABLESAMPLE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("tablespace", TABLESPACE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("target", TARGET, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("temp", TEMP, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("template", TEMPLATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("temporary", TEMPORARY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("text", TEXT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("then", THEN, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("ties", TIES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("time", TIME, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("timestamp", TIMESTAMP, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("to", TO, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("trailing", TRAILING, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("transaction", TRANSACTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("transform", TRANSFORM, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("treat", TREAT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("trigger", TRIGGER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("trim", TRIM, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("true", TRUE_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("truncate", TRUNCATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("trusted", TRUSTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("type", TYPE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("types", TYPES_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("uescape", UESCAPE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unbounded", UNBOUNDED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("uncommitted", UNCOMMITTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unconditional", UNCONDITIONAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unencrypted", UNENCRYPTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("union", UNION, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("unique", UNIQUE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unknown", UNKNOWN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unlisten", UNLISTEN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unlogged", UNLOGGED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("until", UNTIL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("update", UPDATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("user", USER, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("using", USING, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("vacuum", VACUUM, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("valid", VALID, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("validate", VALIDATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("validator", VALIDATOR, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("value", VALUE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("values", VALUES, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("varchar", VARCHAR, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("variadic", VARIADIC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("varying", VARYING, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("verbose", VERBOSE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("version", VERSION_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("view", VIEW, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("views", VIEWS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("volatile", VOLATILE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("when", WHEN, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("where", WHERE, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("whitespace", WHITESPACE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("window", WINDOW, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("with", WITH, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("within", WITHIN, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("without", WITHOUT, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("work", WORK, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("wrapper", WRAPPER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("write", WRITE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("xml", XML_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlattributes", XMLATTRIBUTES, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlconcat", XMLCONCAT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlelement", XMLELEMENT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlexists", XMLEXISTS, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlforest", XMLFOREST, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlnamespaces", XMLNAMESPACES, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlparse", XMLPARSE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlpi", XMLPI, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlroot", XMLROOT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlserialize", XMLSERIALIZE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmltable", XMLTABLE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("year", YEAR_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("yes", YES_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("zone", ZONE, UNRESERVED_KEYWORD, BARE_LABEL) diff --git a/crates/pgt_lexer_codegen/src/keywords.rs b/crates/pgt_lexer_codegen/src/keywords.rs new file mode 100644 index 00000000..f0104c8d --- /dev/null +++ b/crates/pgt_lexer_codegen/src/keywords.rs @@ -0,0 +1,43 @@ +// from https://github.com/sbdchd/squawk/blob/ac9f90c3b2be8d2c46fd5454eb48975afd268dbe/crates/xtask/src/keywords.rs +use anyhow::{Context, Ok, Result}; +use std::path; + +fn parse_header() -> Result> { + // use the environment variable set by the build script to locate the kwlist.h file + let kwlist_file = path::PathBuf::from(env!("PG_QUERY_KWLIST_PATH")); + let data = std::fs::read_to_string(kwlist_file).context("Failed to read kwlist.h")?; + + let mut keywords = Vec::new(); + + for line in data.lines() { + if line.starts_with("PG_KEYWORD") { + let line = line + .split(&['(', ')']) + .nth(1) + .context("Invalid kwlist.h structure")?; + + let row_items: Vec<&str> = line.split(',').collect(); + + match row_items[..] { + [name, _value, _category, _is_bare_label] => { + let name = name.trim().replace('\"', ""); + keywords.push(name); + } + _ => anyhow::bail!("Problem reading kwlist.h row"), + } + } + } + + Ok(keywords) +} + +pub(crate) struct KeywordKinds { + pub(crate) all_keywords: Vec, +} + +pub(crate) fn keyword_kinds() -> Result { + let mut all_keywords = parse_header()?; + all_keywords.sort(); + + Ok(KeywordKinds { all_keywords }) +} diff --git a/crates/pgt_lexer_codegen/src/lib.rs b/crates/pgt_lexer_codegen/src/lib.rs index 8f492e4b..b620b6a6 100644 --- a/crates/pgt_lexer_codegen/src/lib.rs +++ b/crates/pgt_lexer_codegen/src/lib.rs @@ -1,29 +1,9 @@ +mod keywords; mod syntax_kind; -use pgt_query_proto_parser::ProtoParser; -use quote::quote; -use std::{env, path, path::Path}; +use syntax_kind::syntax_kind_mod; #[proc_macro] -pub fn lexer_codegen(_item: proc_macro::TokenStream) -> proc_macro::TokenStream { - let parser = ProtoParser::new(&proto_file_path()); - let proto_file = parser.parse(); - - let syntax_kind = syntax_kind::syntax_kind_mod(&proto_file); - - quote! { - use pg_query::{protobuf, protobuf::ScanToken, protobuf::Token, NodeEnum, NodeRef}; - - #syntax_kind - } - .into() -} - -fn proto_file_path() -> path::PathBuf { - Path::new(env!("CARGO_MANIFEST_DIR")) - .ancestors() - .nth(2) - .unwrap() - .join("libpg_query/protobuf/pg_query.proto") - .to_path_buf() +pub fn syntax_kind_codegen(_input: proc_macro::TokenStream) -> proc_macro::TokenStream { + syntax_kind_mod().into() } diff --git a/crates/pgt_lexer_codegen/src/syntax_kind.rs b/crates/pgt_lexer_codegen/src/syntax_kind.rs index 091b1e02..07b7a419 100644 --- a/crates/pgt_lexer_codegen/src/syntax_kind.rs +++ b/crates/pgt_lexer_codegen/src/syntax_kind.rs @@ -1,111 +1,121 @@ -use std::collections::HashSet; - -use pgt_query_proto_parser::{Node, ProtoFile, Token}; -use proc_macro2::{Ident, Literal}; +use convert_case::{Case, Casing}; +use proc_macro2::TokenStream; use quote::{format_ident, quote}; -pub fn syntax_kind_mod(proto_file: &ProtoFile) -> proc_macro2::TokenStream { - let custom_node_names = custom_node_names(); - let custom_node_identifiers = custom_node_identifiers(&custom_node_names); - - let node_identifiers = node_identifiers(&proto_file.nodes); +use crate::keywords::{KeywordKinds, keyword_kinds}; + +const WHITESPACE: &[&str] = &[ + "SPACE", // " " + "TAB", // "\t" + "VERTICAL_TAB", // "\x0B" + "FORM_FEED", // "\x0C" + "LINE_ENDING", // "\n" or "\r" in any combination +]; + +const PUNCT: &[(&str, &str)] = &[ + ("$", "DOLLAR"), + (";", "SEMICOLON"), + (",", "COMMA"), + ("(", "L_PAREN"), + (")", "R_PAREN"), + ("[", "L_BRACK"), + ("]", "R_BRACK"), + ("<", "L_ANGLE"), + (">", "R_ANGLE"), + ("@", "AT"), + ("#", "POUND"), + ("~", "TILDE"), + ("?", "QUESTION"), + ("&", "AMP"), + ("|", "PIPE"), + ("+", "PLUS"), + ("*", "STAR"), + ("/", "SLASH"), + ("\\", "BACKSLASH"), + ("^", "CARET"), + ("%", "PERCENT"), + ("_", "UNDERSCORE"), + (".", "DOT"), + (":", "COLON"), + ("=", "EQ"), + ("!", "BANG"), + ("-", "MINUS"), + ("`", "BACKTICK"), +]; + +const EXTRA: &[&str] = &["POSITIONAL_PARAM", "ERROR", "COMMENT", "EOF"]; + +const LITERALS: &[&str] = &[ + "BIT_STRING", + "BYTE_STRING", + "DOLLAR_QUOTED_STRING", + "ESC_STRING", + "FLOAT_NUMBER", + "INT_NUMBER", + "NULL", + "STRING", + "IDENT", +]; + +pub fn syntax_kind_mod() -> proc_macro2::TokenStream { + let keywords = keyword_kinds().expect("Failed to get keyword kinds"); + + let KeywordKinds { all_keywords, .. } = keywords; + + let mut enum_variants: Vec = Vec::new(); + let mut from_kw_match_arms: Vec = Vec::new(); + + // collect keywords + for kw in &all_keywords { + if kw.to_uppercase().contains("WHITESPACE") { + continue; // Skip whitespace as it is handled separately + } - let token_identifiers = token_identifiers(&proto_file.tokens); - let token_value_literals = token_value_literals(&proto_file.tokens); + let kind_ident = format_ident!("{}_KW", kw.to_case(Case::UpperSnake)); - let syntax_kind_from_impl = - syntax_kind_from_impl(&node_identifiers, &token_identifiers, &token_value_literals); + enum_variants.push(quote! { #kind_ident }); + from_kw_match_arms.push(quote! { + #kw => Some(SyntaxKind::#kind_ident) + }); + } - let mut enum_variants = HashSet::new(); - enum_variants.extend(&custom_node_identifiers); - enum_variants.extend(&node_identifiers); - enum_variants.extend(&token_identifiers); - let unique_enum_variants = enum_variants.into_iter().collect::>(); + // collect extra keywords + EXTRA.iter().for_each(|&name| { + let variant_name = format_ident!("{}", name); + enum_variants.push(quote! { #variant_name }); + }); + + // collect whitespace variants + WHITESPACE.iter().for_each(|&name| { + let variant_name = format_ident!("{}", name); + enum_variants.push(quote! { #variant_name }); + }); + + // collect punctuations + PUNCT.iter().for_each(|&(_ascii_name, variant)| { + let variant_name = format_ident!("{}", variant); + enum_variants.push(quote! { #variant_name }); + }); + + // collect literals + LITERALS.iter().for_each(|&name| { + let variant_name = format_ident!("{}", name); + enum_variants.push(quote! { #variant_name }); + }); quote! { - /// An u32 enum of all valid syntax elements (nodes and tokens) of the postgres - /// sql dialect, and a few custom ones that are not parsed by pg_query.rs, such - /// as `Whitespace`. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] - #[repr(u32)] + #[repr(u16)] pub enum SyntaxKind { - #(#unique_enum_variants),*, - } - - #syntax_kind_from_impl - } -} - -fn custom_node_names() -> Vec<&'static str> { - vec![ - "SourceFile", - "Comment", - "Whitespace", - "Newline", - "Tab", - "Stmt", - "Eof", - ] -} - -fn custom_node_identifiers(custom_node_names: &[&str]) -> Vec { - custom_node_names - .iter() - .map(|&node_name| format_ident!("{}", node_name)) - .collect() -} - -fn node_identifiers(nodes: &[Node]) -> Vec { - nodes - .iter() - .map(|node| format_ident!("{}", &node.name)) - .collect() -} - -fn token_identifiers(tokens: &[Token]) -> Vec { - tokens - .iter() - .map(|token| format_ident!("{}", &token.name)) - .collect() -} - -fn token_value_literals(tokens: &[Token]) -> Vec { - tokens - .iter() - .map(|token| Literal::i32_unsuffixed(token.value)) - .collect() -} - -fn syntax_kind_from_impl( - node_identifiers: &[Ident], - token_identifiers: &[Ident], - token_value_literals: &[Literal], -) -> proc_macro2::TokenStream { - quote! { - /// Converts a `pg_query` node to a `SyntaxKind` - impl From<&NodeEnum> for SyntaxKind { - fn from(node: &NodeEnum) -> SyntaxKind { - match node { - #(NodeEnum::#node_identifiers(_) => SyntaxKind::#node_identifiers),* - } - } - - } - - impl From for SyntaxKind { - fn from(token: Token) -> SyntaxKind { - match i32::from(token) { - #(#token_value_literals => SyntaxKind::#token_identifiers),*, - _ => panic!("Unknown token: {:?}", token), - } - } + #(#enum_variants),*, } - impl From<&ScanToken> for SyntaxKind { - fn from(token: &ScanToken) -> SyntaxKind { - match token.token { - #(#token_value_literals => SyntaxKind::#token_identifiers),*, - _ => panic!("Unknown token: {:?}", token.token), + impl SyntaxKind { + pub(crate) fn from_keyword(ident: &str) -> Option { + let lower_ident = ident.to_ascii_lowercase(); + match lower_ident.as_str() { + #(#from_kw_match_arms),*, + _ => None } } } diff --git a/crates/pgt_query_ext/Cargo.toml b/crates/pgt_query_ext/Cargo.toml index c6754b67..3e6b57c1 100644 --- a/crates/pgt_query_ext/Cargo.toml +++ b/crates/pgt_query_ext/Cargo.toml @@ -14,11 +14,9 @@ version = "0.0.0" [dependencies] petgraph = "0.6.4" -pg_query.workspace = true -pgt_diagnostics.workspace = true -pgt_lexer.workspace = true -pgt_query_ext_codegen.workspace = true -pgt_text_size.workspace = true +pg_query.workspace = true +pgt_diagnostics.workspace = true +pgt_text_size.workspace = true [lib] doctest = false diff --git a/crates/pgt_query_ext/src/codegen.rs b/crates/pgt_query_ext/src/codegen.rs deleted file mode 100644 index 8278383b..00000000 --- a/crates/pgt_query_ext/src/codegen.rs +++ /dev/null @@ -1 +0,0 @@ -pgt_query_ext_codegen::codegen!(); diff --git a/crates/pgt_query_ext/src/lib.rs b/crates/pgt_query_ext/src/lib.rs index c1f5fb49..a087ec60 100644 --- a/crates/pgt_query_ext/src/lib.rs +++ b/crates/pgt_query_ext/src/lib.rs @@ -9,16 +9,11 @@ //! - `get_node_properties` to get the properties of a node //! - `get_nodes` to get all the nodes in the AST as a petgraph tree //! - `ChildrenIterator` to iterate over the children of a node -mod codegen; pub mod diagnostics; pub use pg_query::protobuf; pub use pg_query::{Error, NodeEnum, Result}; -pub use codegen::{ - ChildrenIterator, Node, TokenProperty, get_location, get_node_properties, get_nodes, -}; - pub fn parse(sql: &str) -> Result { pg_query::parse(sql).map(|parsed| { parsed diff --git a/crates/pgt_query_ext_codegen/src/get_location.rs b/crates/pgt_query_ext_codegen/src/get_location.rs deleted file mode 100644 index fa6fa8b2..00000000 --- a/crates/pgt_query_ext_codegen/src/get_location.rs +++ /dev/null @@ -1,122 +0,0 @@ -use pgt_query_proto_parser::{FieldType, Node, ProtoFile}; -use proc_macro2::{Ident, TokenStream}; -use quote::{format_ident, quote}; - -pub fn get_location_mod(proto_file: &ProtoFile) -> proc_macro2::TokenStream { - let manual_node_names = manual_node_names(); - - let node_identifiers = node_identifiers(&proto_file.nodes, &manual_node_names); - let location_idents = location_idents(&proto_file.nodes, &manual_node_names); - - quote! { - /// Returns the location of a node - pub fn get_location(node: &NodeEnum) -> Option { - let loc = get_location_internal(node); - if loc.is_some() { - usize::try_from(loc.unwrap()).ok() - } else { - None - } - } - - fn get_location_internal(node: &NodeEnum) -> Option { - let location = match node { - // for some nodes, the location of the node itself is after their children location. - // we implement the logic for those nodes manually. - // if you add one, make sure to add its name to `manual_node_names()`. - NodeEnum::BoolExpr(n) => { - let a = n.args.iter().min_by(|a, b| { - let loc_a = get_location_internal(&a.node.as_ref().unwrap()); - let loc_b = get_location_internal(&b.node.as_ref().unwrap()); - loc_a.cmp(&loc_b) - }); - get_location_internal(&a.unwrap().node.as_ref().unwrap()) - }, - NodeEnum::AExpr(n) => get_location_internal(&n.lexpr.as_ref().unwrap().node.as_ref().unwrap()), - NodeEnum::WindowDef(n) => { - if n.partition_clause.len() > 0 || n.order_clause.len() > 0 { - // the location is not correct if its the definition clause, e.g. for - // window w as (partition by a order by b) - // the location is the start of the `partition` token - None - } else { - Some(n.location) - } - }, - NodeEnum::CollateClause(n) => get_location_internal(&n.arg.as_ref().unwrap().node.as_ref().unwrap()), - NodeEnum::TypeCast(n) => get_location_internal(&n.arg.as_ref().unwrap().node.as_ref().unwrap()), - NodeEnum::ColumnDef(n) => if n.colname.len() > 0 { - Some(n.location) - } else { - None - }, - NodeEnum::NullTest(n) => if n.arg.is_some() { - get_location_internal(&n.arg.as_ref().unwrap().node.as_ref().unwrap()) - } else { - Some(n.location) - }, - NodeEnum::PublicationObjSpec(n) => { - match &n.pubtable { - Some(pubtable) => match &pubtable.relation { - Some(range_var) => Some(range_var.location), - None => Some(n.location), - }, - None => Some(n.location), - } - }, - NodeEnum::BooleanTest(n) => { - if n.arg.is_some() { - get_location_internal(&n.arg.as_ref().unwrap().node.as_ref().unwrap()) - } else { - Some(n.location) - } - }, - #(NodeEnum::#node_identifiers(n) => #location_idents),* - }; - if location.is_some() && location.unwrap() < 0 { - None - } else { - location - } - } - } -} - -fn manual_node_names() -> Vec<&'static str> { - vec![ - "BoolExpr", - "AExpr", - "WindowDef", - "CollateClause", - "TypeCast", - "ColumnDef", - "NullTest", - "PublicationObjSpec", - ] -} - -fn location_idents(nodes: &[Node], exclude_nodes: &[&str]) -> Vec { - nodes - .iter() - .filter(|n| !exclude_nodes.contains(&n.name.as_str())) - .map(|node| { - if node - .fields - .iter() - .any(|n| n.name == "location" && n.field_type == FieldType::Int32) - { - quote! { Some(n.location) } - } else { - quote! { None } - } - }) - .collect() -} - -fn node_identifiers(nodes: &[Node], exclude_nodes: &[&str]) -> Vec { - nodes - .iter() - .filter(|n| !exclude_nodes.contains(&n.name.as_str())) - .map(|node| format_ident!("{}", &node.name)) - .collect() -} diff --git a/crates/pgt_query_ext_codegen/src/get_node_properties.rs b/crates/pgt_query_ext_codegen/src/get_node_properties.rs deleted file mode 100644 index 9581304b..00000000 --- a/crates/pgt_query_ext_codegen/src/get_node_properties.rs +++ /dev/null @@ -1,1006 +0,0 @@ -use pgt_query_proto_parser::{FieldType, Node, ProtoFile}; -use proc_macro2::{Ident, TokenStream}; -use quote::{format_ident, quote}; - -pub fn get_node_properties_mod(proto_file: &ProtoFile) -> proc_macro2::TokenStream { - let node_identifiers = node_identifiers(&proto_file.nodes); - let node_handlers = node_handlers(&proto_file.nodes); - - quote! { - #[derive(Debug, Clone, PartialEq)] - pub struct TokenProperty { - pub value: Option, - pub kind: Option, - } - - impl TokenProperty { - pub fn new(value: Option, kind: Option) -> TokenProperty { - if value.is_none() && kind.is_none() { - panic!("TokenProperty must have either value or kind"); - } - TokenProperty { value, kind } - } - } - - impl From for TokenProperty { - fn from(value: i32) -> TokenProperty { - TokenProperty { - value: Some(value.to_string()), - kind: None, - } - } - } - - impl From for TokenProperty { - fn from(value: u32) -> TokenProperty { - TokenProperty { - value: Some(value.to_string()), - kind: None, - } - } - } - - - impl From for TokenProperty { - fn from(value: i64) -> TokenProperty { - TokenProperty { - value: Some(value.to_string()), - kind: None, - } - } - } - - impl From for TokenProperty { - fn from(value: u64) -> TokenProperty { - TokenProperty { - value: Some(value.to_string()), - kind: None, - } - } - } - - impl From for TokenProperty { - fn from(value: f64) -> TokenProperty { - TokenProperty { - value: Some(value.to_string()), - kind: None, - } - } - } - - impl From for TokenProperty { - fn from(value: bool) -> TokenProperty { - TokenProperty { - value: Some(value.to_string()), - kind: None, - } - } - } - - impl From for TokenProperty { - fn from(value: String) -> TokenProperty { - assert!(value.len() > 0, "String property value has length 0"); - TokenProperty { - value: Some(value.to_lowercase()), - kind: None, - } - } - } - - - impl From<&pg_query::protobuf::Integer> for TokenProperty { - fn from(node: &pg_query::protobuf::Integer) -> TokenProperty { - TokenProperty { - value: Some(node.ival.to_string()), - kind: Some(SyntaxKind::Iconst) - } - } - } - - impl From<&pg_query::protobuf::Boolean> for TokenProperty { - fn from(node: &pg_query::protobuf::Boolean) -> TokenProperty { - TokenProperty { - value: Some(node.boolval.to_string()), - kind: match node.boolval { - true => Some(SyntaxKind::TrueP), - false => Some(SyntaxKind::FalseP), - } - } - } - } - - impl From for TokenProperty { - fn from(kind: SyntaxKind) -> TokenProperty { - TokenProperty { - value: None, - kind: Some(kind), - } - } - } - - impl From for TokenProperty { - fn from(token: Token) -> TokenProperty { - TokenProperty { - value: None, - kind: Some(SyntaxKind::from(token)), - } - } - } - - pub fn get_node_properties(node: &NodeEnum, parent: Option<&NodeEnum>) -> Vec { - let mut tokens: Vec = Vec::new(); - - match node { - #(NodeEnum::#node_identifiers(n) => {#node_handlers}),*, - }; - - tokens - } - - } -} - -fn node_identifiers(nodes: &[Node]) -> Vec { - nodes - .iter() - .map(|node| format_ident!("{}", &node.name)) - .collect() -} - -fn node_handlers(nodes: &[Node]) -> Vec { - nodes - .iter() - .map(|node| { - let string_property_handlers = string_property_handlers(node); - let custom_handlers = custom_handlers(node); - quote! { - #custom_handlers - #(#string_property_handlers)* - } - }) - .collect() -} - -fn custom_handlers(node: &Node) -> TokenStream { - match node.name.as_str() { - "SelectStmt" => quote! { - tokens.push(TokenProperty::from(Token::Select)); - if n.distinct_clause.len() > 0 { - tokens.push(TokenProperty::from(Token::Distinct)); - } - if n.values_lists.len() > 0 { - tokens.push(TokenProperty::from(Token::Values)); - } - if n.from_clause.len() > 0 { - tokens.push(TokenProperty::from(Token::From)); - } - if n.where_clause.is_some() { - tokens.push(TokenProperty::from(Token::Where)); - } - if n.group_clause.len() > 0 { - tokens.push(TokenProperty::from(Token::GroupP)); - tokens.push(TokenProperty::from(Token::By)); - } - match n.op() { - protobuf::SetOperation::Undefined => {}, - protobuf::SetOperation::SetopNone => {}, - protobuf::SetOperation::SetopUnion => tokens.push(TokenProperty::from(Token::Union)), - protobuf::SetOperation::SetopIntersect => tokens.push(TokenProperty::from(Token::Intersect)), - protobuf::SetOperation::SetopExcept => tokens.push(TokenProperty::from(Token::Except)), - _ => panic!("Unknown SelectStmt op {:#?}", n.op()), - } - if n.all { - tokens.push(TokenProperty::from(Token::All)); - } - }, - "BoolExpr" => quote! { - match n.boolop() { - protobuf::BoolExprType::AndExpr => tokens.push(TokenProperty::from(Token::And)), - protobuf::BoolExprType::OrExpr => tokens.push(TokenProperty::from(Token::Or)), - protobuf::BoolExprType::NotExpr => tokens.push(TokenProperty::from(Token::Not)), - _ => panic!("Unknown BoolExpr {:#?}", n.boolop()), - } - }, - "JoinExpr" => quote! { - tokens.push(TokenProperty::from(Token::Join)); - tokens.push(TokenProperty::from(Token::On)); - match n.jointype() { - protobuf::JoinType::JoinInner => tokens.push(TokenProperty::from(Token::InnerP)), - protobuf::JoinType::JoinLeft => tokens.push(TokenProperty::from(Token::Left)), - protobuf::JoinType::JoinFull => tokens.push(TokenProperty::from(Token::Full)), - protobuf::JoinType::JoinRight => tokens.push(TokenProperty::from(Token::Right)), - _ => panic!("Unknown JoinExpr jointype {:#?}", n.jointype()), - } - - }, - "ResTarget" => quote! { - if n.name.len() > 0 { - tokens.push(TokenProperty::from(Token::As)); - } - }, - "Integer" => quote! { - tokens.push(TokenProperty::from(n)); - }, - "DefElem" => quote! { - match n.defname.as_str() { - "location" => { - tokens.push(TokenProperty::from(Token::Default)); - }, - "connection_limit" => { - tokens.push(TokenProperty::from(Token::Limit)); - tokens.push(TokenProperty::from(Token::Iconst)); - }, - "owner" => { - tokens.push(TokenProperty::from(Token::Owner)); - } - _ => {} - } - match n.defaction() { - protobuf::DefElemAction::DefelemUnspec => tokens.push(TokenProperty::from(Token::Ascii61)), - _ => panic!("Unknown DefElem {:#?}", n.defaction()), - } - }, - "Alias" => quote! { - tokens.push(TokenProperty::from(Token::As)); - }, - "CollateClause" => quote! { - tokens.push(TokenProperty::from(Token::Collate)); - }, - "AExpr" => quote! { - match n.kind() { - protobuf::AExprKind::AexprOp => {}, // do nothing - protobuf::AExprKind::AexprOpAny => tokens.push(TokenProperty::from(Token::Any)), - protobuf::AExprKind::AexprIn => tokens.push(TokenProperty::from(Token::InP)), - _ => panic!("Unknown AExpr kind {:#?}", n.kind()), - } - }, - "WindowDef" => quote! { - if n.partition_clause.len() > 0 || n.order_clause.len() > 0 { - tokens.push(TokenProperty::from(Token::Window)); - tokens.push(TokenProperty::from(Token::As)); - } - if n.partition_clause.len() > 0 { - tokens.push(TokenProperty::from(Token::Partition)); - tokens.push(TokenProperty::from(Token::By)); - } - }, - "Boolean" => quote! { - tokens.push(TokenProperty::from(n)); - }, - "AStar" => quote! { - tokens.push(TokenProperty::from(Token::Ascii42)); - }, - "FuncCall" => quote! { - if n.funcname.len() == 1 && n.args.len() == 0 { - // check if count(*) - if let Some(node) = &n.funcname[0].node { - if let NodeEnum::String(n) = node { - if n.sval == "count" { - tokens.push(TokenProperty::from(Token::Ascii42)); - } - } - } - } - if n.agg_filter.is_some() { - tokens.push(TokenProperty::from(Token::Filter)); - tokens.push(TokenProperty::from(Token::Where)); - } - if n.over.is_some() { - tokens.push(TokenProperty::from(Token::Over)); - } - }, - "SqlvalueFunction" => quote! { - match n.op() { - protobuf::SqlValueFunctionOp::SvfopCurrentRole => tokens.push(TokenProperty::from(Token::CurrentRole)), - protobuf::SqlValueFunctionOp::SvfopCurrentUser => tokens.push(TokenProperty::from(Token::CurrentUser)), - _ => panic!("Unknown SqlvalueFunction {:#?}", n.op()), - } - }, - "SortBy" => quote! { - tokens.push(TokenProperty::from(Token::Order)); - tokens.push(TokenProperty::from(Token::By)); - match n.sortby_dir() { - protobuf::SortByDir::SortbyAsc => tokens.push(TokenProperty::from(Token::Asc)), - protobuf::SortByDir::SortbyDesc => tokens.push(TokenProperty::from(Token::Desc)), - _ => {} - } - }, - "AConst" => quote! { - if n.isnull { - tokens.push(TokenProperty::from(Token::NullP)); - } - }, - "AlterTableStmt" => quote! { - tokens.push(TokenProperty::from(Token::Alter)); - tokens.push(TokenProperty::from(Token::Table)); - }, - "AlterTableCmd" => quote! { - match n.subtype() { - protobuf::AlterTableType::AtColumnDefault => { - tokens.push(TokenProperty::from(Token::Alter)); - tokens.push(TokenProperty::from(Token::Column)); - tokens.push(TokenProperty::from(Token::Set)); - tokens.push(TokenProperty::from(Token::Default)); - }, - protobuf::AlterTableType::AtAddConstraint => tokens.push(TokenProperty::from(Token::AddP)), - protobuf::AlterTableType::AtAlterColumnType => { - tokens.push(TokenProperty::from(Token::Alter)); - tokens.push(TokenProperty::from(Token::Column)); - tokens.push(TokenProperty::from(Token::TypeP)); - }, - protobuf::AlterTableType::AtDropColumn => { - tokens.push(TokenProperty::from(Token::Drop)); - tokens.push(TokenProperty::from(Token::Column)); - }, - _ => panic!("Unknown AlterTableCmd {:#?}", n.subtype()), - } - }, - "VariableSetStmt" => quote! { - tokens.push(TokenProperty::from(Token::Set)); - match n.kind() { - protobuf::VariableSetKind::VarSetValue => tokens.push(TokenProperty::from(Token::To)), - _ => panic!("Unknown VariableSetStmt {:#?}", n.kind()), - } - }, - "CreatePolicyStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::Policy)); - tokens.push(TokenProperty::from(Token::On)); - if n.roles.len() > 0 { - tokens.push(TokenProperty::from(Token::To)); - } - if n.qual.is_some() { - tokens.push(TokenProperty::from(Token::Using)); - } - if n.with_check.is_some() { - tokens.push(TokenProperty::from(Token::With)); - tokens.push(TokenProperty::from(Token::Check)); - } - }, - "CopyStmt" => quote! { - tokens.push(TokenProperty::from(Token::Copy)); - tokens.push(TokenProperty::from(Token::From)); - }, - "RenameStmt" => quote! { - tokens.push(TokenProperty::from(Token::Alter)); - tokens.push(TokenProperty::from(Token::Table)); - tokens.push(TokenProperty::from(Token::Rename)); - tokens.push(TokenProperty::from(Token::To)); - }, - "Constraint" => quote! { - match n.contype() { - protobuf::ConstrType::ConstrNotnull => { - tokens.push(TokenProperty::from(Token::Not)); - tokens.push(TokenProperty::from(Token::NullP)); - }, - protobuf::ConstrType::ConstrDefault => tokens.push(TokenProperty::from(Token::Default)), - protobuf::ConstrType::ConstrCheck => tokens.push(TokenProperty::from(Token::Check)), - protobuf::ConstrType::ConstrPrimary => { - tokens.push(TokenProperty::from(Token::Primary)); - tokens.push(TokenProperty::from(Token::Key)); - }, - protobuf::ConstrType::ConstrForeign => tokens.push(TokenProperty::from(Token::References)), - protobuf::ConstrType::ConstrUnique => tokens.push(TokenProperty::from(Token::Unique)), - _ => panic!("Unknown Constraint {:#?}", n.contype()), - }; - if n.options.len() > 0 { - tokens.push(TokenProperty::from(Token::With)); - } - }, - "PartitionSpec" => quote! { - tokens.push(TokenProperty::from(Token::Partition)); - tokens.push(TokenProperty::from(Token::By)); - }, - "InsertStmt" => quote! { - tokens.push(TokenProperty::from(Token::Insert)); - tokens.push(TokenProperty::from(Token::Into)); - }, - "DeleteStmt" => quote! { - tokens.push(TokenProperty::from(Token::DeleteP)); - tokens.push(TokenProperty::from(Token::From)); - if n.where_clause.is_some() { - tokens.push(TokenProperty::from(Token::Where)); - } - if n.using_clause.len() > 0 { - tokens.push(TokenProperty::from(Token::Using)); - } - }, - "ViewStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::View)); - if n.query.is_some() { - tokens.push(TokenProperty::from(Token::As)); - // check if SelectStmt with WithClause with recursive set to true - if let Some(NodeEnum::SelectStmt(select_stmt)) = n.query.as_ref().and_then(|query| query.node.as_ref()) { - if select_stmt.with_clause.is_some() && select_stmt.with_clause.as_ref().unwrap().recursive { - tokens.push(TokenProperty::from(Token::Recursive)); - } - } - } - if n.replace { - tokens.push(TokenProperty::from(Token::Or)); - tokens.push(TokenProperty::from(Token::Replace)); - } - if let Some(n) = &n.view { - match n.relpersistence.as_str() { - // Temporary - "t" => tokens.push(TokenProperty::from(Token::Temporary)), - _ => {}, - } - } - match n.with_check_option() { - protobuf::ViewCheckOption::LocalCheckOption => { - tokens.push(TokenProperty::from(Token::With)); - tokens.push(TokenProperty::from(Token::Local)); - tokens.push(TokenProperty::from(Token::Check)); - tokens.push(TokenProperty::from(Token::Option)); - }, - protobuf::ViewCheckOption::CascadedCheckOption => { - tokens.push(TokenProperty::from(Token::With)); - tokens.push(TokenProperty::from(Token::Cascaded)); - tokens.push(TokenProperty::from(Token::Check)); - tokens.push(TokenProperty::from(Token::Option)); - }, - _ => {} - } - }, - "CreateStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::Table)); - if n.tablespacename.len() > 0 { - tokens.push(TokenProperty::from(Token::Tablespace)); - } - if n.options.len() > 0 { - tokens.push(TokenProperty::from(Token::With)); - } - if n.if_not_exists { - tokens.push(TokenProperty::from(Token::IfP)); - tokens.push(TokenProperty::from(Token::Not)); - tokens.push(TokenProperty::from(Token::Exists)); - } - if n.partbound.is_some() { - tokens.push(TokenProperty::from(Token::Partition)); - tokens.push(TokenProperty::from(Token::Of)); - tokens.push(TokenProperty::from(Token::For)); - tokens.push(TokenProperty::from(Token::Values)); - } - if let Some(n) = &n.relation { - match n.relpersistence.as_str() { - // Unlogged - "u" => tokens.push(TokenProperty::from(Token::Unlogged)), - // Temporary - "t" => tokens.push(TokenProperty::from(Token::Temporary)), - _ => {}, - } - if n.inh { - tokens.push(TokenProperty::from(Token::Inherits)); - } - } - }, - "TableLikeClause" => quote! { - tokens.push(TokenProperty::from(Token::Like)); - // CREATE_TABLE_LIKE_ALL - if n.options == 0x7FFFFFFF { - tokens.push(TokenProperty::from(Token::Including)); - tokens.push(TokenProperty::from(Token::All)); - } else { - tokens.push(TokenProperty::from(Token::Excluding)); - tokens.push(TokenProperty::from(Token::All)); - } - }, - "TransactionStmt" => quote! { - match n.kind() { - protobuf::TransactionStmtKind::TransStmtBegin => tokens.push(TokenProperty::from(Token::BeginP)), - protobuf::TransactionStmtKind::TransStmtCommit => tokens.push(TokenProperty::from(Token::Commit)), - _ => panic!("Unknown TransactionStmt {:#?}", n.kind()) - } - }, - "PartitionBoundSpec" => quote! { - tokens.push(TokenProperty::from(Token::From)); - tokens.push(TokenProperty::from(Token::To)); - }, - "CaseExpr" => quote! { - tokens.push(TokenProperty::from(Token::Case)); - tokens.push(TokenProperty::from(Token::EndP)); - if n.defresult.is_some() { - tokens.push(TokenProperty::from(Token::Else)); - } - }, - "NullTest" => quote! { - match n.nulltesttype() { - protobuf::NullTestType::IsNull => tokens.push(TokenProperty::from(Token::Is)), - protobuf::NullTestType::IsNotNull => { - tokens.push(TokenProperty::from(Token::Is)); - tokens.push(TokenProperty::from(Token::Not)); - }, - _ => panic!("Unknown NullTest {:#?}", n.nulltesttype()), - } - tokens.push(TokenProperty::from(Token::NullP)); - }, - "CreateFunctionStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - if n.is_procedure { - tokens.push(TokenProperty::from(Token::Procedure)); - } else { - tokens.push(TokenProperty::from(Token::Function)); - } - if n.replace { - tokens.push(TokenProperty::from(Token::Or)); - tokens.push(TokenProperty::from(Token::Replace)); - } - if let Some(return_type) = &n.return_type { - tokens.push(TokenProperty::from(Token::Returns)); - if return_type.setof { - tokens.push(TokenProperty::from(Token::Setof)); - } - } - for option in &n.options { - if let Some(NodeEnum::DefElem(node)) = &option.node { - if node.defname == "strict" { - if let Some(NodeEnum::Boolean(node)) = - node.arg.as_ref().and_then(|arg| arg.node.as_ref()) - { - if node.boolval { - tokens.push(TokenProperty::from(Token::NullP)); - tokens.push(TokenProperty::from(Token::On)); - tokens.push(TokenProperty::from(Token::NullP)); - tokens.push(TokenProperty::from(Token::InputP)); - } else { - tokens.push(TokenProperty::from(Token::On)); - tokens.push(TokenProperty::from(Token::NullP)); - tokens.push(TokenProperty::from(Token::InputP)); - } - } - } - } - } - }, - "FunctionParameter" => quote! { - match n.mode() { - protobuf::FunctionParameterMode::FuncParamIn => tokens.push(TokenProperty::from(Token::InP)), - protobuf::FunctionParameterMode::FuncParamOut => tokens.push(TokenProperty::from(Token::OutP)), - protobuf::FunctionParameterMode::FuncParamInout => tokens.push(TokenProperty::from(Token::Inout)), - protobuf::FunctionParameterMode::FuncParamVariadic => tokens.push(TokenProperty::from(Token::Variadic)), - // protobuf::FunctionParameterMode::FuncParamTable => tokens.push(TokenProperty::from(Token::Table)), - protobuf::FunctionParameterMode::FuncParamDefault => {}, // do nothing - _ => panic!("Unknown FunctionParameter {:#?}", n.mode()), - }; - if n.defexpr.is_some() { - tokens.push(TokenProperty::from(Token::Default)); - } - }, - "NamedArgExpr" => quote! { - // => - tokens.push(TokenProperty::from(Token::EqualsGreater)); - }, - "CaseWhen" => quote! { - tokens.push(TokenProperty::from(Token::When)); - tokens.push(TokenProperty::from(Token::Then)); - }, - "TypeCast" => quote! { - tokens.push(TokenProperty::from(Token::Typecast)); - }, - "CreateDomainStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::DomainP)); - if n.type_name.is_some() { - tokens.push(TokenProperty::from(Token::As)); - } - }, - "List" => quote! { - if parent.is_some() { - // if parent is `DefineStmt`, we need to check whether an ORDER BY needs to be added - if let NodeEnum::DefineStmt(define_stmt) = parent.unwrap() { - // there *seems* to be an integer node in the last position of the DefineStmt args that - // defines whether the list contains an order by statement - let integer = define_stmt.args.last() - .and_then(|node| node.node.as_ref()) - .and_then(|node| if let NodeEnum::Integer(n) = node { Some(n.ival) } else { None }); - if integer.is_none() { - panic!("DefineStmt of type ObjectAggregate has no integer node in last position of args"); - } - // if the integer is 1, then there is an order by statement - // we add it to the `List` node because that seems to make most sense based off the grammar definition - // ref: https://github.com/postgres/postgres/blob/REL_15_STABLE/src/backend/parser/gram.y#L8355 - // ``` - // aggr_args: - // | '(' aggr_args_list ORDER BY aggr_args_list ')' - // ``` - if integer.unwrap() == 1 { - tokens.push(TokenProperty::from(Token::Order)); - tokens.push(TokenProperty::from(Token::By)); - } - } - } - }, - "DefineStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - if n.replace { - tokens.push(TokenProperty::from(Token::Or)); - tokens.push(TokenProperty::from(Token::Replace)); - } - match n.kind() { - protobuf::ObjectType::ObjectAggregate => { - tokens.push(TokenProperty::from(Token::Aggregate)); - - // n.args is always an array with two nodes - assert_eq!(n.args.len(), 2, "DefineStmt of type ObjectAggregate does not have exactly 2 args"); - // the first is either a List or a Node { node: None } - - if let Some(node) = &n.args.first() { - if node.node.is_none() { - // if first element is a Node { node: None }, then it's "*" - tokens.push(TokenProperty::from(Token::Ascii42)); - } } - // if its a list, we handle it in the handler for `List` - }, - protobuf::ObjectType::ObjectType => { - tokens.push(TokenProperty::from(Token::TypeP)); - }, - _ => panic!("Unknown DefineStmt {:#?}", n.kind()), - } - }, - "CreateSchemaStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::Schema)); - if n.if_not_exists { - tokens.push(TokenProperty::from(Token::IfP)); - tokens.push(TokenProperty::from(Token::Not)); - tokens.push(TokenProperty::from(Token::Exists)); - } - if n.authrole.is_some() { - tokens.push(TokenProperty::from(Token::Authorization)); - } - }, - "CreateEnumStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::TypeP)); - tokens.push(TokenProperty::from(Token::As)); - tokens.push(TokenProperty::from(Token::EnumP)); - }, - "CreateCastStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::Cast)); - tokens.push(TokenProperty::from(Token::As)); - if n.inout { - tokens.push(TokenProperty::from(Token::With)); - tokens.push(TokenProperty::from(Token::Inout)); - } else if n.func.is_some() { - tokens.push(TokenProperty::from(Token::With)); - tokens.push(TokenProperty::from(Token::Function)); - } else { - tokens.push(TokenProperty::from(Token::Without)); - tokens.push(TokenProperty::from(Token::Function)); - } - match n.context() { - protobuf::CoercionContext::CoercionImplicit => { - tokens.push(TokenProperty::from(Token::As)); - tokens.push(TokenProperty::from(Token::ImplicitP)); - }, - protobuf::CoercionContext::CoercionAssignment => { - tokens.push(TokenProperty::from(Token::As)); - tokens.push(TokenProperty::from(Token::Assignment)); - }, - protobuf::CoercionContext::CoercionPlpgsql => {}, - protobuf::CoercionContext::CoercionExplicit => {}, - _ => panic!("Unknown CreateCastStmt {:#?}", n.context()) - } - }, - "CreateRangeStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::TypeP)); - tokens.push(TokenProperty::from(Token::As)); - tokens.push(TokenProperty::from(Token::Range)); - }, - "IndexStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - if n.unique { - tokens.push(TokenProperty::from(Token::Unique)); - } - tokens.push(TokenProperty::from(Token::Index)); - if n.concurrent { - tokens.push(TokenProperty::from(Token::Concurrently)); - } - if n.if_not_exists { - tokens.push(TokenProperty::from(Token::IfP)); - tokens.push(TokenProperty::from(Token::Not)); - tokens.push(TokenProperty::from(Token::Exists)); - } - tokens.push(TokenProperty::from(Token::On)); - // access_method is btree by default - if n.access_method.len() > 0 { - tokens.push(TokenProperty::from(Token::Using)); - } - if n.index_including_params.len() > 0 { - tokens.push(TokenProperty::from(Token::Include)); - } - if n.options.len() > 0 { - tokens.push(TokenProperty::from(Token::With)); - } - // table_space is an empty string by default - if n.table_space.len() > 0 { - tokens.push(TokenProperty::from(Token::Tablespace)); - } - }, - "IndexElem" => quote! { - if n.collation.len() > 0 { - tokens.push(TokenProperty::from(Token::Collate)); - } - match n.nulls_ordering() { - protobuf::SortByNulls::SortbyNullsDefault => {}, - protobuf::SortByNulls::SortbyNullsFirst => { - tokens.push(TokenProperty::from(Token::NullsP)); - tokens.push(TokenProperty::from(Token::FirstP)); - }, - protobuf::SortByNulls::SortbyNullsLast => { - tokens.push(TokenProperty::from(Token::NullsP)); - tokens.push(TokenProperty::from(Token::LastP)); - }, - _ => panic!("Unknown IndexElem {:#?}", n.nulls_ordering()), - } - }, - "CreateTableSpaceStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::Tablespace)); - tokens.push(TokenProperty::from(Token::Location)); - if n.owner.is_some() { - tokens.push(TokenProperty::from(Token::Owner)); - } - if n.options.len() > 0 { - tokens.push(TokenProperty::from(Token::With)); - } - }, - "CreatePublicationStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::Publication)); - if n.for_all_tables { - tokens.push(TokenProperty::from(Token::For)); - tokens.push(TokenProperty::from(Token::All)); - tokens.push(TokenProperty::from(Token::Tables)); - } - if let Some(n) = n.options.first() { - tokens.push(TokenProperty::from(Token::With)); - } - if let Some(n) = n.pubobjects.first() { - tokens.push(TokenProperty::from(Token::For)); - if let Some(NodeEnum::PublicationObjSpec(n)) = &n.node { - match n.pubobjtype() { - protobuf::PublicationObjSpecType::PublicationobjTable => { - tokens.push(TokenProperty::from(Token::Table)); - }, - protobuf::PublicationObjSpecType::PublicationobjTablesInSchema => { - tokens.push(TokenProperty::from(Token::Tables)); - tokens.push(TokenProperty::from(Token::InP)); - tokens.push(TokenProperty::from(Token::Schema)); - }, - _ => panic!("Unknown CreatePublicationStmt {:#?}", n.pubobjtype()) - } - } - } - if let Some(n) = n.pubobjects.last() { - if let Some(NodeEnum::PublicationObjSpec(n)) = &n.node { - match n.pubobjtype() { - protobuf::PublicationObjSpecType::PublicationobjTablesInSchema => { - tokens.push(TokenProperty::from(Token::Tables)); - tokens.push(TokenProperty::from(Token::InP)); - tokens.push(TokenProperty::from(Token::Schema)); - }, - _ => {} - } - } - } - }, - "PublicationTable" => quote! { - if n.where_clause.is_some() { - tokens.push(TokenProperty::from(Token::Where)); - } - }, - "BooleanTest" => quote! { - match n.booltesttype() { - protobuf::BoolTestType::IsTrue => { - tokens.push(TokenProperty::from(Token::Is)); - tokens.push(TokenProperty::from(Token::TrueP)); - }, - protobuf::BoolTestType::IsNotTrue => { - tokens.push(TokenProperty::from(Token::Is)); - tokens.push(TokenProperty::from(Token::Not)); - tokens.push(TokenProperty::from(Token::TrueP)); - }, - protobuf::BoolTestType::IsFalse => { - tokens.push(TokenProperty::from(Token::Is)); - tokens.push(TokenProperty::from(Token::FalseP)); - }, - protobuf::BoolTestType::IsNotFalse => { - tokens.push(TokenProperty::from(Token::Is)); - tokens.push(TokenProperty::from(Token::Not)); - tokens.push(TokenProperty::from(Token::FalseP)); - }, - _ => panic!("Unknown BooleanTest {:#?}", n.booltesttype()), - } - }, - "CompositeTypeStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::TypeP)); - tokens.push(TokenProperty::from(Token::As)); - }, - "CreatedbStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::Database)); - }, - "CreateExtensionStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - tokens.push(TokenProperty::from(Token::Extension)); - if n.if_not_exists { - tokens.push(TokenProperty::from(Token::IfP)); - tokens.push(TokenProperty::from(Token::Not)); - tokens.push(TokenProperty::from(Token::Exists)); - } - }, - "CreateConversionStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - if n.def { - tokens.push(TokenProperty::from(Token::Default)); - } - tokens.push(TokenProperty::from(Token::ConversionP)); - if n.for_encoding_name.len() > 0 { - tokens.push(TokenProperty::from(Token::For)); - } - if n.to_encoding_name.len() > 0 { - tokens.push(TokenProperty::from(Token::To)); - } - if n.func_name.len() == 1 { - tokens.push(TokenProperty::from(Token::From)); - } else if n.func_name.len() > 1 { - panic!("Encountered multiple defined func_name elements in CreateConversionStmt"); - } - }, - "CreateTransformStmt" => quote! { - tokens.push(TokenProperty::from(Token::Create)); - if n.replace { - tokens.push(TokenProperty::from(Token::Or)); - tokens.push(TokenProperty::from(Token::Replace)); - } - tokens.push(TokenProperty::from(Token::Transform)); - if n.type_name.is_some() { - tokens.push(TokenProperty::from(Token::For)); - } - tokens.push(TokenProperty::from(Token::Language)); - if n.fromsql.is_some() { - tokens.push(TokenProperty::from(Token::From)); - tokens.push(TokenProperty::from(Token::SqlP)); - tokens.push(TokenProperty::from(Token::With)); - tokens.push(TokenProperty::from(Token::Function)); - } - if n.tosql.is_some() { - tokens.push(TokenProperty::from(Token::To)); - tokens.push(TokenProperty::from(Token::SqlP)); - tokens.push(TokenProperty::from(Token::With)); - tokens.push(TokenProperty::from(Token::Function)); - } - }, - "TypeName" => quote! { - let names = n.names - .iter() - .filter_map(|n| if let Some(NodeEnum::String(s)) = &n.node { Some(s.sval.clone()) } else { None }) - .collect::>(); - - if names.len() == 2 && names[0] == "pg_catalog" { - match names[1].as_str() { - "float8" => { - tokens.push(TokenProperty::from(Token::DoubleP)); - tokens.push(TokenProperty::from(Token::Precision)); - }, - "interval" => { - // Adapted from https://github.com/postgres/postgres/blob/REL_15_STABLE/src/backend/utils/adt/timestamp.c#L1103 - const MONTH: i32 = 1; - const YEAR: i32 = 2; - const DAY: i32 = 3; - const HOUR: i32 = 10; - const MINUTE: i32 = 11; - const SECOND: i32 = 12; - - let fields = &n.typmods.first() - .and_then(|node| node.node.as_ref()) - .and_then(|node| if let NodeEnum::AConst(n) = node { n.val.clone() } else { None }) - .and_then(|node| if let protobuf::a_const::Val::Ival(n) = node { Some(n.ival) } else { None }); - - if let Some(fields) = fields { - match fields.clone() { - // YEAR TO MONTH - i if i == 1 << YEAR | 1 << MONTH => { - tokens.push(TokenProperty::from(Token::To)); - tokens.push(TokenProperty::from(Token::MonthP)); - }, - // DAY TO HOUR - i if i == 1 << DAY | 1 << HOUR => { - tokens.push(TokenProperty::from(Token::To)); - tokens.push(TokenProperty::from(Token::HourP)); - }, - // DAY TO MINUTE - i if i == 1 << DAY | 1 << HOUR | 1 << MINUTE => { - tokens.push(TokenProperty::from(Token::To)); - tokens.push(TokenProperty::from(Token::MinuteP)); - }, - // DAY TO SECOND - i if i == 1 << DAY | 1 << HOUR | 1 << MINUTE | 1 << SECOND => { - tokens.push(TokenProperty::from(Token::To)); - tokens.push(TokenProperty::from(Token::SecondP)); - }, - // HOUR TO MINUTE - i if i == 1 << HOUR | 1 << MINUTE => { - tokens.push(TokenProperty::from(Token::To)); - tokens.push(TokenProperty::from(Token::MinuteP)); - }, - // HOUR TO SECOND - i if i == 1 << HOUR | 1 << MINUTE | 1 << SECOND => { - tokens.push(TokenProperty::from(Token::To)); - tokens.push(TokenProperty::from(Token::SecondP)); - }, - // MINUTE TO SECOND - i if i == 1 << MINUTE | 1 << SECOND => { - tokens.push(TokenProperty::from(Token::To)); - tokens.push(TokenProperty::from(Token::SecondP)); - }, - _ => panic!("Unknown Interval fields {:#?}", fields), - } - } - }, - "timestamptz" => { - tokens.push(TokenProperty::from(Token::Timestamp)); - tokens.push(TokenProperty::from(Token::With)); - tokens.push(TokenProperty::from(Token::Time)); - tokens.push(TokenProperty::from(Token::Zone)); - } - "timetz" => { - tokens.push(TokenProperty::from(Token::Time)); - tokens.push(TokenProperty::from(Token::With)); - tokens.push(TokenProperty::from(Token::Time)); - tokens.push(TokenProperty::from(Token::Zone)); - } - _ => {} - } - } - }, - "TruncateStmt" => quote! { - tokens.push(TokenProperty::from(Token::Truncate)); - tokens.push(TokenProperty::from(Token::Table)); - if n.restart_seqs { - tokens.push(TokenProperty::from(Token::Restart)); - tokens.push(TokenProperty::from(Token::IdentityP)); - } else { - tokens.push(TokenProperty::from(Token::ContinueP)); - tokens.push(TokenProperty::from(Token::IdentityP)); - } - match n.behavior { - // DropRestrict - 1 => tokens.push(TokenProperty::from(Token::Restrict)), - // DropCascade - 2 => tokens.push(TokenProperty::from(Token::Cascade)), - _ => {} - } - }, - _ => quote! {}, - } -} - -fn string_property_handlers(node: &Node) -> Vec { - node.fields - .iter() - .filter_map(|field| { - if field.repeated { - return None; - } - let field_name = format_ident!("{}", field.name.as_str()); - match field.field_type { - // just handle string values for now - FieldType::String => Some(quote! { - // most string values are never None, but an empty string - if n.#field_name.len() > 0 { - tokens.push(TokenProperty::from(n.#field_name.to_owned())); - } - }), - _ => None, - } - }) - .collect() -} diff --git a/crates/pgt_query_ext_codegen/src/get_nodes.rs b/crates/pgt_query_ext_codegen/src/get_nodes.rs deleted file mode 100644 index e0381331..00000000 --- a/crates/pgt_query_ext_codegen/src/get_nodes.rs +++ /dev/null @@ -1,141 +0,0 @@ -use pgt_query_proto_parser::{FieldType, Node, ProtoFile}; -use proc_macro2::{Ident, TokenStream}; -use quote::{format_ident, quote}; - -pub fn get_nodes_mod(proto_file: &ProtoFile) -> proc_macro2::TokenStream { - let manual_node_names = manual_node_names(); - - let node_identifiers = node_identifiers(&proto_file.nodes, &manual_node_names); - let node_handlers = node_handlers(&proto_file.nodes, &manual_node_names); - - quote! { - #[derive(Debug, Clone)] - pub struct Node { - pub inner: NodeEnum, - pub depth: usize, - pub properties: Vec, - pub location: Option, - } - - /// Returns all children of the node, recursively - /// location is resolved manually - pub fn get_nodes(node: &NodeEnum) -> StableGraph { - let mut g = StableGraph::::new(); - - let root_node_idx = g.add_node(Node { - inner: node.to_owned(), - depth: 0, - properties: get_node_properties(node, None), - location: get_location(node), - }); - - // Parent node idx, Node, depth - let mut stack: VecDeque<(NodeIndex, NodeEnum, usize)> = - VecDeque::from(vec![(root_node_idx, node.to_owned(), 0)]); - while !stack.is_empty() { - let (parent_idx, node, depth) = stack.pop_front().unwrap(); - let current_depth = depth + 1; - let mut handle_child = |c: NodeEnum| { - if match &c { - // all "simple nodes" are not handled individually but merged with their parent - NodeEnum::String(n) => true, - NodeEnum::Integer(n) => true, - NodeEnum::Float(n) => true, - NodeEnum::Boolean(n) => true, - NodeEnum::BitString(n) => true, - _ => false - } { - g[parent_idx].properties.extend(get_node_properties(&c, Some(&node))); - } else { - let node_idx = g.add_node(Node { - depth: current_depth, - properties: get_node_properties(&c, Some(&node)), - location: get_location(&c), - inner: c.to_owned(), - }); - g.add_edge(parent_idx, node_idx, ()); - stack.push_back((node_idx, c.to_owned(), current_depth)); - } - }; - match &node { - // `AConst` is the only node with a `one of` property, so we handle it manually - // if you need to handle other nodes manually, add them to the `manual_node_names` function below - NodeEnum::AConst(n) => { - if n.val.is_some() { - handle_child(match n.val.to_owned().unwrap() { - pg_query::protobuf::a_const::Val::Ival(v) => NodeEnum::Integer(v), - pg_query::protobuf::a_const::Val::Fval(v) => NodeEnum::Float(v), - pg_query::protobuf::a_const::Val::Boolval(v) => NodeEnum::Boolean(v), - pg_query::protobuf::a_const::Val::Sval(v) => NodeEnum::String(v), - pg_query::protobuf::a_const::Val::Bsval(v) => NodeEnum::BitString(v), - }); - } - } - #(NodeEnum::#node_identifiers(n) => {#node_handlers}),*, - }; - } - g - } - } -} - -fn manual_node_names() -> Vec<&'static str> { - vec!["AConst"] -} - -fn node_identifiers(nodes: &[Node], exclude_nodes: &[&str]) -> Vec { - nodes - .iter() - .filter(|node| !exclude_nodes.contains(&node.name.as_str())) - .map(|node| format_ident!("{}", &node.name)) - .collect() -} - -fn node_handlers(nodes: &[Node], exclude_nodes: &[&str]) -> Vec { - nodes - .iter() - .filter(|node| !exclude_nodes.contains(&node.name.as_str())) - .map(|node| { - let property_handlers = property_handlers(node); - quote! { - #(#property_handlers)* - } - }) - .collect() -} - -fn property_handlers(node: &Node) -> Vec { - node.fields - .iter() - .filter_map(|field| { - let field_name = format_ident!("{}", field.name.as_str()); - if field.field_type == FieldType::Node && field.repeated { - Some(quote! { - n.#field_name - .iter() - .for_each(|x| if x.node.is_some() { - handle_child(x.node.as_ref().unwrap().to_owned()); - }); - }) - } else if field.field_type == FieldType::Node && !field.is_one_of { - if field.node_name == Some("Node".to_owned()) { - Some(quote! { - if n.#field_name.is_some() { - handle_child(n.#field_name.to_owned().unwrap().node.unwrap()); - } - }) - } else { - let enum_variant_name = - format_ident!("{}", field.enum_variant_name.as_ref().unwrap().as_str()); - Some(quote! { - if n.#field_name.is_some() { - handle_child(NodeEnum::#enum_variant_name(n.#field_name.to_owned().unwrap())); - } - }) - } - } else { - None - } - }) - .collect() -} diff --git a/crates/pgt_query_ext_codegen/src/lib.rs b/crates/pgt_query_ext_codegen/src/lib.rs deleted file mode 100644 index c4f39c0e..00000000 --- a/crates/pgt_query_ext_codegen/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -mod get_location; -mod get_node_properties; -mod get_nodes; -mod node_iterator; - -use get_location::get_location_mod; -use get_node_properties::get_node_properties_mod; -use get_nodes::get_nodes_mod; -use node_iterator::node_iterator_mod; -use pgt_query_proto_parser::ProtoParser; -use quote::quote; -use std::{env, path, path::Path}; - -#[proc_macro] -pub fn codegen(_input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let parser = ProtoParser::new(&proto_file_path()); - let proto_file = parser.parse(); - - let get_location = get_location_mod(&proto_file); - let get_node_properties = get_node_properties_mod(&proto_file); - let get_nodes = get_nodes_mod(&proto_file); - let iterator = node_iterator_mod(&proto_file); - - quote! { - use pgt_lexer::SyntaxKind; - use std::collections::VecDeque; - use pg_query::{protobuf, protobuf::ScanToken, protobuf::Token, NodeEnum, NodeRef}; - use std::cmp::{min, Ordering}; - use std::fmt::{Display, Formatter}; - use petgraph::stable_graph::{StableGraph}; - use petgraph::graph::{NodeIndex}; - - #get_location - #get_node_properties - #get_nodes - #iterator - } - .into() -} - -fn proto_file_path() -> path::PathBuf { - Path::new(env!("CARGO_MANIFEST_DIR")) - .ancestors() - .nth(2) - .unwrap() - .join("libpg_query/protobuf/pg_query.proto") - .to_path_buf() -} diff --git a/crates/pgt_query_ext_codegen/src/node_iterator.rs b/crates/pgt_query_ext_codegen/src/node_iterator.rs deleted file mode 100644 index 526966df..00000000 --- a/crates/pgt_query_ext_codegen/src/node_iterator.rs +++ /dev/null @@ -1,123 +0,0 @@ -use pgt_query_proto_parser::{FieldType, Node, ProtoFile}; -use proc_macro2::{Ident, TokenStream}; -use quote::{format_ident, quote}; - -pub fn node_iterator_mod(proto_file: &ProtoFile) -> proc_macro2::TokenStream { - let manual_node_names = manual_node_names(); - - let node_identifiers = node_identifiers(&proto_file.nodes, &manual_node_names); - let node_handlers = node_handlers(&proto_file.nodes, &manual_node_names); - - quote! { - #[derive(Debug, Clone)] - pub struct ChildrenIterator { - stack: VecDeque<(NodeEnum, usize)>, - nodes: Vec, - } - - impl ChildrenIterator { - pub fn new(root: NodeEnum) -> Self { - Self { - stack: VecDeque::from(vec![(root, 0)]), - nodes: Vec::new(), - } - } - } - - impl Iterator for ChildrenIterator { - type Item = NodeEnum; - - fn next(&mut self) -> Option { - if self.stack.is_empty() { - return None; - } - - let (node, depth) = self.stack.pop_front().unwrap(); - - let current_depth = depth + 1; - - match &node { - // `AConst` is the only node with a `one of` property, so we handle it manually - // if you need to handle other nodes manually, add them to the `manual_node_names` function below - NodeEnum::AConst(n) => { - // if n.val.is_some() { - // let new_node = match n.val.as_ref().unwrap() { - // pg_query::protobuf::a_const::Val::Ival(v) => Box::new(NodeEnum::Integer(v.clone())), - // pg_query::protobuf::a_const::Val::Fval(v) => Box::new(NodeEnum::Float(v.clone())), - // pg_query::protobuf::a_const::Val::Boolval(v) => Box::new(NodeEnum::Boolean(v.clone())), - // pg_query::protobuf::a_const::Val::Sval(v) => Box::new(NodeEnum::String(v.clone())), - // pg_query::protobuf::a_const::Val::Bsval(v) => Box::new(NodeEnum::BitString(v.clone())), - // }; - // self.stack.push_back((&new_node, current_depth)); - // self.boxed_nodes.push(new_node); - // } - } - #(NodeEnum::#node_identifiers(n) => {#node_handlers}),*, - }; - - Some(node) - } - } - } -} - -fn manual_node_names() -> Vec<&'static str> { - vec!["AConst"] -} - -fn node_identifiers(nodes: &[Node], exclude_nodes: &[&str]) -> Vec { - nodes - .iter() - .filter(|node| !exclude_nodes.contains(&node.name.as_str())) - .map(|node| format_ident!("{}", &node.name)) - .collect() -} - -fn node_handlers(nodes: &[Node], exclude_nodes: &[&str]) -> Vec { - nodes - .iter() - .filter(|node| !exclude_nodes.contains(&node.name.as_str())) - .map(|node| { - let property_handlers = property_handlers(node); - quote! { - #(#property_handlers)* - } - }) - .collect() -} - -fn property_handlers(node: &Node) -> Vec { - node.fields - .iter() - .filter_map(|field| { - let field_name = format_ident!("{}", field.name.as_str()); - if field.field_type == FieldType::Node && field.repeated { - Some(quote! { - n.#field_name - .iter() - .for_each(|x| if x.node.is_some() { - self.stack.push_back((x.node.as_ref().unwrap().to_owned(), current_depth)); - }); - }) - } else if field.field_type == FieldType::Node && !field.is_one_of { - if field.node_name == Some("Node".to_owned()) { - Some(quote! { - if n.#field_name.is_some() { - self.stack.push_back((n.#field_name.to_owned().unwrap().node.unwrap(), current_depth)); - } - }) - } else { - let enum_variant_name = - format_ident!("{}", field.enum_variant_name.as_ref().unwrap().as_str()); - Some(quote! { - if n.#field_name.is_some() { - self.stack.push_back((NodeEnum::#enum_variant_name(n.#field_name.to_owned().unwrap()), current_depth)); - } - }) - } - } else { - None - } - }) - .collect() -} diff --git a/crates/pgt_statement_splitter/src/diagnostics.rs b/crates/pgt_statement_splitter/src/diagnostics.rs index bcff6e80..d543d4e5 100644 --- a/crates/pgt_statement_splitter/src/diagnostics.rs +++ b/crates/pgt_statement_splitter/src/diagnostics.rs @@ -1,6 +1,9 @@ use pgt_diagnostics::{Diagnostic, MessageAndDescription}; +use pgt_lexer::{LexDiagnostic, Lexed}; use pgt_text_size::TextRange; +use crate::splitter::SplitError; + /// A specialized diagnostic for the statement splitter parser. /// /// Parser diagnostics are always **errors**. @@ -23,3 +26,22 @@ impl SplitDiagnostic { } } } + +impl From for SplitDiagnostic { + fn from(lex_diagnostic: LexDiagnostic) -> Self { + Self { + span: Some(lex_diagnostic.span), + message: lex_diagnostic.message, + } + } +} + +impl SplitDiagnostic { + pub fn from_split_error(split_error: SplitError, lexed: &Lexed) -> Self { + let range = lexed.range(split_error.token); + Self { + span: Some(range), + message: MessageAndDescription::from(split_error.msg), + } + } +} diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index c53ae78c..de028336 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -2,19 +2,40 @@ //! //! This crate provides a function to split a SQL source string into individual statements. pub mod diagnostics; -mod parser; +mod splitter; -use parser::{Parser, ParserResult, source}; -use pgt_lexer::diagnostics::ScanError; +use diagnostics::SplitDiagnostic; +use pgt_lexer::Lexer; +use pgt_text_size::TextRange; +use splitter::{Splitter, source}; -pub fn split(sql: &str) -> Result> { - let tokens = pgt_lexer::lex(sql)?; +pub struct SplitResult { + pub ranges: Vec, + pub errors: Vec, +} + +pub fn split(sql: &str) -> SplitResult { + let lexed = Lexer::new(sql).lex(); + + let mut splitter = Splitter::new(&lexed); + + source(&mut splitter); + + let split_result = splitter.finish(); - let mut parser = Parser::new(tokens); + let mut errors: Vec = lexed.errors().into_iter().map(Into::into).collect(); - source(&mut parser); + errors.extend( + split_result + .errors + .into_iter() + .map(|err| SplitDiagnostic::from_split_error(err, &lexed)), + ); - Ok(parser.finish()) + SplitResult { + ranges: split_result.ranges, + errors, + } } #[cfg(test)] @@ -28,13 +49,13 @@ mod tests { struct Tester { input: String, - parse: ParserResult, + result: SplitResult, } impl From<&str> for Tester { fn from(input: &str) -> Self { Tester { - parse: split(input).expect("Failed to split"), + result: split(input), input: input.to_string(), } } @@ -43,25 +64,25 @@ mod tests { impl Tester { fn expect_statements(&self, expected: Vec<&str>) -> &Self { assert_eq!( - self.parse.ranges.len(), + self.result.ranges.len(), expected.len(), "Expected {} statements for input {}, got {}: {:?}", expected.len(), self.input, - self.parse.ranges.len(), - self.parse + self.result.ranges.len(), + self.result .ranges .iter() .map(|r| &self.input[*r]) .collect::>() ); - for (range, expected) in self.parse.ranges.iter().zip(expected.iter()) { + for (range, expected) in self.result.ranges.iter().zip(expected.iter()) { assert_eq!(*expected, self.input[*range].to_string()); } assert!( - self.parse.ranges.is_sorted_by_key(|r| r.start()), + self.result.ranges.is_sorted_by_key(|r| r.start()), "Ranges are not sorted" ); @@ -70,15 +91,15 @@ mod tests { fn expect_errors(&self, expected: Vec) -> &Self { assert_eq!( - self.parse.errors.len(), + self.result.errors.len(), expected.len(), "Expected {} errors, got {}: {:?}", expected.len(), - self.parse.errors.len(), - self.parse.errors + self.result.errors.len(), + self.result.errors ); - for (err, expected) in self.parse.errors.iter().zip(expected.iter()) { + for (err, expected) in self.result.errors.iter().zip(expected.iter()) { assert_eq!(expected, err); } @@ -93,13 +114,6 @@ mod tests { ); } - #[test] - fn failing_lexer() { - let input = "select 1443ddwwd33djwdkjw13331333333333"; - let res = split(input).unwrap_err(); - assert!(!res.is_empty()); - } - #[test] #[timeout(1000)] fn basic() { @@ -161,7 +175,7 @@ mod tests { Tester::from("\ninsert select 1\n\nselect 3") .expect_statements(vec!["insert select 1", "select 3"]) .expect_errors(vec![SplitDiagnostic::new( - format!("Expected {:?}", SyntaxKind::Into), + format!("Expected {:?}", SyntaxKind::INTO_KW), TextRange::new(8.into(), 14.into()), )]); } diff --git a/crates/pgt_statement_splitter/src/parser.rs b/crates/pgt_statement_splitter/src/parser.rs deleted file mode 100644 index 241d0c70..00000000 --- a/crates/pgt_statement_splitter/src/parser.rs +++ /dev/null @@ -1,237 +0,0 @@ -mod common; -mod data; -mod ddl; -mod dml; - -pub use common::source; - -use pgt_lexer::{SyntaxKind, Token, WHITESPACE_TOKENS}; -use pgt_text_size::{TextRange, TextSize}; - -use crate::diagnostics::SplitDiagnostic; - -/// Main parser that exposes the `cstree` api, and collects errors and statements -/// It is modelled after a Pratt Parser. For a gentle introduction to Pratt Parsing, see https://matklad.github.io/2020/04/13/simple-but-powerful-pratt-parsing.html -pub struct Parser { - /// The statement ranges are defined by the indices of the start/end tokens - stmt_ranges: Vec<(usize, usize)>, - - /// The syntax errors accumulated during parsing - errors: Vec, - - current_stmt_start: Option, - - tokens: Vec, - - eof_token: Token, - - current_pos: usize, -} - -#[derive(Debug)] -pub struct ParserResult { - /// The ranges of the parsed statements - pub ranges: Vec, - /// The syntax errors accumulated during parsing - pub errors: Vec, -} - -impl Parser { - pub fn new(tokens: Vec) -> Self { - let eof_token = Token::eof(usize::from( - tokens - .last() - .map(|t| t.span.end()) - .unwrap_or(TextSize::from(0)), - )); - - // Place `current_pos` on the first relevant token - let mut current_pos = 0; - while is_irrelevant_token(tokens.get(current_pos).unwrap_or(&eof_token)) { - current_pos += 1; - } - - Self { - stmt_ranges: Vec::new(), - eof_token, - errors: Vec::new(), - current_stmt_start: None, - tokens, - current_pos, - } - } - - pub fn finish(self) -> ParserResult { - ParserResult { - ranges: self - .stmt_ranges - .iter() - .map(|(start_token_pos, end_token_pos)| { - let from = self.tokens.get(*start_token_pos); - let to = self.tokens.get(*end_token_pos).unwrap_or(&self.eof_token); - - TextRange::new(from.unwrap().span.start(), to.span.end()) - }) - .collect(), - errors: self.errors, - } - } - - pub fn start_stmt(&mut self) { - assert!( - self.current_stmt_start.is_none(), - "cannot start statement within statement at {:?}", - self.tokens.get(self.current_stmt_start.unwrap()) - ); - self.current_stmt_start = Some(self.current_pos); - } - - pub fn close_stmt(&mut self) { - assert!( - self.current_stmt_start.is_some(), - "Must start statement before closing it." - ); - - let start_token_pos = self.current_stmt_start.unwrap(); - - assert!( - self.current_pos > start_token_pos, - "Must close the statement on a token that's later than the start token." - ); - - let (end_token_pos, _) = self.find_last_relevant().unwrap(); - - self.stmt_ranges.push((start_token_pos, end_token_pos)); - - self.current_stmt_start = None; - } - - fn current(&self) -> &Token { - match self.tokens.get(self.current_pos) { - Some(token) => token, - None => &self.eof_token, - } - } - - /// Advances the parser to the next relevant token and returns it. - /// - /// NOTE: This will skip irrelevant tokens. - fn advance(&mut self) -> &Token { - // can't reuse any `find_next_relevant` logic because of Mr. Borrow Checker - let (pos, token) = self - .tokens - .iter() - .enumerate() - .skip(self.current_pos + 1) - .find(|(_, t)| is_relevant(t)) - .unwrap_or((self.tokens.len(), &self.eof_token)); - - self.current_pos = pos; - token - } - - fn look_ahead(&self) -> Option<&Token> { - self.tokens - .iter() - .skip(self.current_pos + 1) - .find(|t| is_relevant(t)) - } - - /// Returns `None` if there are no previous relevant tokens - fn look_back(&self) -> Option<&Token> { - self.find_last_relevant().map(|it| it.1) - } - - /// Will advance if the `kind` matches the current token. - /// Otherwise, will add a diagnostic to the internal `errors`. - pub fn expect(&mut self, kind: SyntaxKind) { - if self.current().kind == kind { - self.advance(); - } else { - self.errors.push(SplitDiagnostic::new( - format!("Expected {:#?}", kind), - self.current().span, - )); - } - } - - fn find_last_relevant(&self) -> Option<(usize, &Token)> { - self.tokens - .iter() - .enumerate() - .take(self.current_pos) - .rfind(|(_, t)| is_relevant(t)) - } -} - -#[cfg(windows)] -/// Returns true if the token is relevant for the parsing process -/// -/// On windows, a newline is represented by `\r\n` which is two characters. -fn is_irrelevant_token(t: &Token) -> bool { - WHITESPACE_TOKENS.contains(&t.kind) - // double new lines are relevant, single ones are not - && (t.kind != SyntaxKind::Newline || t.text == "\r\n" || t.text.chars().count() == 1) -} - -#[cfg(not(windows))] -/// Returns true if the token is relevant for the parsing process -fn is_irrelevant_token(t: &Token) -> bool { - WHITESPACE_TOKENS.contains(&t.kind) - // double new lines are relevant, single ones are not - && (t.kind != SyntaxKind::Newline || t.text.chars().count() == 1) -} - -fn is_relevant(t: &Token) -> bool { - !is_irrelevant_token(t) -} - -#[cfg(test)] -mod tests { - use pgt_lexer::SyntaxKind; - - use crate::parser::Parser; - - #[test] - fn advance_works_as_expected() { - let sql = r#" - create table users ( - id serial primary key, - name text, - email text - ); - "#; - let tokens = pgt_lexer::lex(sql).unwrap(); - let total_num_tokens = tokens.len(); - - let mut parser = Parser::new(tokens); - - let expected = vec![ - (SyntaxKind::Create, 2), - (SyntaxKind::Table, 4), - (SyntaxKind::Ident, 6), - (SyntaxKind::Ascii40, 8), - (SyntaxKind::Ident, 11), - (SyntaxKind::Ident, 13), - (SyntaxKind::Primary, 15), - (SyntaxKind::Key, 17), - (SyntaxKind::Ascii44, 18), - (SyntaxKind::NameP, 21), - (SyntaxKind::TextP, 23), - (SyntaxKind::Ascii44, 24), - (SyntaxKind::Ident, 27), - (SyntaxKind::TextP, 29), - (SyntaxKind::Ascii41, 32), - (SyntaxKind::Ascii59, 33), - ]; - - for (kind, pos) in expected { - assert_eq!(parser.current().kind, kind); - assert_eq!(parser.current_pos, pos); - parser.advance(); - } - - assert_eq!(parser.current().kind, SyntaxKind::Eof); - assert_eq!(parser.current_pos, total_num_tokens); - } -} diff --git a/crates/pgt_statement_splitter/src/parser/common.rs b/crates/pgt_statement_splitter/src/parser/common.rs deleted file mode 100644 index 4c4ab986..00000000 --- a/crates/pgt_statement_splitter/src/parser/common.rs +++ /dev/null @@ -1,307 +0,0 @@ -use pgt_lexer::{SyntaxKind, Token, TokenType, WHITESPACE_TOKENS}; - -use super::{ - Parser, - data::at_statement_start, - ddl::{alter, create}, - dml::{cte, delete, insert, select, update}, -}; - -pub fn source(p: &mut Parser) { - loop { - match p.current() { - Token { - kind: SyntaxKind::Eof, - .. - } => { - break; - } - Token { - // we might want to ignore TokenType::NoKeyword here too - // but this will lead to invalid statements to not being picked up - token_type: TokenType::Whitespace, - .. - } => { - p.advance(); - } - Token { - kind: SyntaxKind::Ascii92, - .. - } => { - plpgsql_command(p); - } - _ => { - statement(p); - } - } - } -} - -pub(crate) fn statement(p: &mut Parser) { - p.start_stmt(); - match p.current().kind { - SyntaxKind::With => { - cte(p); - } - SyntaxKind::Select => { - select(p); - } - SyntaxKind::Insert => { - insert(p); - } - SyntaxKind::Update => { - update(p); - } - SyntaxKind::DeleteP => { - delete(p); - } - SyntaxKind::Create => { - create(p); - } - SyntaxKind::Alter => { - alter(p); - } - _ => { - unknown(p, &[]); - } - } - p.close_stmt(); -} - -pub(crate) fn parenthesis(p: &mut Parser) { - p.expect(SyntaxKind::Ascii40); - - let mut depth = 1; - - loop { - match p.current().kind { - SyntaxKind::Ascii40 => { - p.advance(); - depth += 1; - } - SyntaxKind::Ascii41 | SyntaxKind::Eof => { - p.advance(); - depth -= 1; - if depth == 0 { - break; - } - } - _ => { - p.advance(); - } - } - } -} - -pub(crate) fn plpgsql_command(p: &mut Parser) { - p.expect(SyntaxKind::Ascii92); - - loop { - match p.current().kind { - SyntaxKind::Newline => { - p.advance(); - break; - } - _ => { - // advance the parser to the next token without ignoring irrelevant tokens - // we would skip a newline with `advance()` - p.current_pos += 1; - } - } - } -} - -pub(crate) fn case(p: &mut Parser) { - p.expect(SyntaxKind::Case); - - loop { - match p.current().kind { - SyntaxKind::EndP => { - p.advance(); - break; - } - _ => { - p.advance(); - } - } - } -} - -pub(crate) fn unknown(p: &mut Parser, exclude: &[SyntaxKind]) { - loop { - match p.current() { - Token { - kind: SyntaxKind::Ascii59, - .. - } => { - p.advance(); - break; - } - Token { - kind: SyntaxKind::Eof, - .. - } => { - break; - } - Token { - kind: SyntaxKind::Newline, - .. - } => { - if p.look_back().is_some_and(|t| t.kind == SyntaxKind::Ascii44) { - p.advance(); - } else { - break; - } - } - Token { - kind: SyntaxKind::Case, - .. - } => { - case(p); - } - Token { - kind: SyntaxKind::Ascii92, - .. - } => { - // pgsql commands e.g. - // - // ``` - // \if test - // ``` - // - // we wait for "\" and check if the previous token is a newline - - // newline is a whitespace, but we do not want to ignore it here - let irrelevant = WHITESPACE_TOKENS - .iter() - .filter(|t| **t != SyntaxKind::Newline) - .collect::>(); - - // go back from the current position without ignoring irrelevant tokens - if p.tokens - .iter() - .take(p.current_pos) - .rev() - .find(|t| !irrelevant.contains(&&t.kind)) - .is_some_and(|t| t.kind == SyntaxKind::Newline) - { - break; - } - p.advance(); - } - Token { - kind: SyntaxKind::Ascii40, - .. - } => { - parenthesis(p); - } - t => match at_statement_start(t.kind, exclude) { - Some(SyntaxKind::Select) => { - let prev = p.look_back().map(|t| t.kind); - if [ - // for policies, with for select - SyntaxKind::For, - // for create view / table as - SyntaxKind::As, - // for create rule - SyntaxKind::On, - // for create rule - SyntaxKind::Also, - // for create rule - SyntaxKind::Instead, - // for UNION - SyntaxKind::Union, - // for UNION ALL - SyntaxKind::All, - // for UNION ... EXCEPT - SyntaxKind::Except, - // for grant - SyntaxKind::Grant, - SyntaxKind::Ascii44, - ] - .iter() - .all(|x| Some(x) != prev.as_ref()) - { - break; - } - - p.advance(); - } - Some(SyntaxKind::Insert) | Some(SyntaxKind::Update) | Some(SyntaxKind::DeleteP) => { - let prev = p.look_back().map(|t| t.kind); - if [ - // for create trigger - SyntaxKind::Before, - SyntaxKind::After, - // for policies, e.g. for insert - SyntaxKind::For, - // e.g. on insert or delete - SyntaxKind::Or, - // e.g. INSTEAD OF INSERT - SyntaxKind::Of, - // for create rule - SyntaxKind::On, - // for create rule - SyntaxKind::Also, - // for create rule - SyntaxKind::Instead, - // for grant - SyntaxKind::Grant, - SyntaxKind::Ascii44, - // Do update in INSERT stmt - SyntaxKind::Do, - ] - .iter() - .all(|x| Some(x) != prev.as_ref()) - { - break; - } - p.advance(); - } - Some(SyntaxKind::With) => { - let next = p.look_ahead().map(|t| t.kind); - if [ - // WITH ORDINALITY should not start a new statement - SyntaxKind::Ordinality, - // WITH CHECK should not start a new statement - SyntaxKind::Check, - // TIMESTAMP WITH TIME ZONE should not start a new statement - SyntaxKind::Time, - SyntaxKind::Grant, - SyntaxKind::Admin, - SyntaxKind::Inherit, - SyntaxKind::Set, - ] - .iter() - .all(|x| Some(x) != next.as_ref()) - { - break; - } - p.advance(); - } - - Some(SyntaxKind::Create) => { - let prev = p.look_back().map(|t| t.kind); - if [ - // for grant - SyntaxKind::Grant, - SyntaxKind::Ascii44, - ] - .iter() - .all(|x| Some(x) != prev.as_ref()) - { - break; - } - - p.advance(); - } - Some(_) => { - break; - } - None => { - p.advance(); - } - }, - } - } -} diff --git a/crates/pgt_statement_splitter/src/parser/ddl.rs b/crates/pgt_statement_splitter/src/parser/ddl.rs deleted file mode 100644 index d9f233c2..00000000 --- a/crates/pgt_statement_splitter/src/parser/ddl.rs +++ /dev/null @@ -1,15 +0,0 @@ -use pgt_lexer::SyntaxKind; - -use super::{Parser, common::unknown}; - -pub(crate) fn create(p: &mut Parser) { - p.expect(SyntaxKind::Create); - - unknown(p, &[SyntaxKind::With]); -} - -pub(crate) fn alter(p: &mut Parser) { - p.expect(SyntaxKind::Alter); - - unknown(p, &[SyntaxKind::Alter]); -} diff --git a/crates/pgt_statement_splitter/src/parser/dml.rs b/crates/pgt_statement_splitter/src/parser/dml.rs deleted file mode 100644 index 015c50b6..00000000 --- a/crates/pgt_statement_splitter/src/parser/dml.rs +++ /dev/null @@ -1,59 +0,0 @@ -use pgt_lexer::SyntaxKind; - -use super::{ - Parser, - common::{parenthesis, unknown}, -}; - -pub(crate) fn cte(p: &mut Parser) { - p.expect(SyntaxKind::With); - - loop { - p.expect(SyntaxKind::Ident); - p.expect(SyntaxKind::As); - parenthesis(p); - - if p.current().kind == SyntaxKind::Ascii44 { - p.advance(); - } else { - break; - } - } - - unknown( - p, - &[ - SyntaxKind::Select, - SyntaxKind::Insert, - SyntaxKind::Update, - SyntaxKind::DeleteP, - SyntaxKind::Merge, - ], - ); -} - -pub(crate) fn select(p: &mut Parser) { - p.expect(SyntaxKind::Select); - - unknown(p, &[]); -} - -pub(crate) fn insert(p: &mut Parser) { - p.expect(SyntaxKind::Insert); - p.expect(SyntaxKind::Into); - - unknown(p, &[SyntaxKind::Select]); -} - -pub(crate) fn update(p: &mut Parser) { - p.expect(SyntaxKind::Update); - - unknown(p, &[]); -} - -pub(crate) fn delete(p: &mut Parser) { - p.expect(SyntaxKind::DeleteP); - p.expect(SyntaxKind::From); - - unknown(p, &[]); -} diff --git a/crates/pgt_statement_splitter/src/splitter.rs b/crates/pgt_statement_splitter/src/splitter.rs new file mode 100644 index 00000000..cfb4716d --- /dev/null +++ b/crates/pgt_statement_splitter/src/splitter.rs @@ -0,0 +1,168 @@ +mod common; +mod data; +mod ddl; +mod dml; + +pub use common::source; + +use pgt_lexer::{Lexed, SyntaxKind}; +use pgt_text_size::TextRange; + +pub struct SplitResult { + pub ranges: Vec, + pub errors: Vec, +} + +pub static TRIVIA_TOKENS: &[SyntaxKind] = &[ + SyntaxKind::SPACE, + SyntaxKind::TAB, + SyntaxKind::VERTICAL_TAB, + SyntaxKind::FORM_FEED, + SyntaxKind::COMMENT, + // LINE_ENDING is relevant +]; + +/// Internal error type used during splitting +#[derive(Debug, Clone)] +pub struct SplitError { + pub msg: String, + pub token: usize, +} + +pub struct Splitter<'a> { + lexed: &'a Lexed<'a>, + current_pos: usize, + stmt_ranges: Vec<(usize, usize)>, + errors: Vec, + current_stmt_start: Option, +} + +impl<'a> Splitter<'a> { + pub fn new(lexed: &'a Lexed<'a>) -> Self { + Self { + lexed, + current_pos: 0, + stmt_ranges: Vec::new(), + errors: Vec::new(), + current_stmt_start: None, + } + } + + pub fn finish(self) -> SplitResult { + let ranges = self + .stmt_ranges + .iter() + .map(|(start_token_pos, end_token_pos)| { + let from = self.lexed.range(*start_token_pos).start(); + let to = self.lexed.range(*end_token_pos).end(); + TextRange::new(from, to) + }) + .collect(); + + SplitResult { + ranges, + errors: self.errors, + } + } + + pub fn start_stmt(&mut self) { + assert!( + self.current_stmt_start.is_none(), + "cannot start statement within statement", + ); + self.current_stmt_start = Some(self.current_pos); + } + + pub fn close_stmt(&mut self) { + assert!( + self.current_stmt_start.is_some(), + "Must start statement before closing it." + ); + + let start_token_pos = self.current_stmt_start.unwrap(); + + assert!( + self.current_pos > start_token_pos, + "Must close the statement on a token that's later than the start token: {} > {}", + self.current_pos, + start_token_pos + ); + + let end_token_pos = (0..self.current_pos) + .rev() + .find(|&idx| !self.is_trivia(idx)) + .unwrap(); + + self.stmt_ranges.push((start_token_pos, end_token_pos)); + + self.current_stmt_start = None; + } + + fn current(&self) -> SyntaxKind { + self.lexed.kind(self.current_pos) + } + + fn kind(&self, idx: usize) -> SyntaxKind { + self.lexed.kind(idx) + } + + /// Advances the parser to the next relevant token and returns it. + /// + /// NOTE: This will skip trivia tokens. + fn advance(&mut self) -> SyntaxKind { + let pos = (self.current_pos + 1..self.lexed.len()) + .find(|&idx| !self.is_trivia(idx)) + .expect("lexed should have non-trivia eof token"); + + self.current_pos = pos; + self.lexed.kind(pos) + } + + fn look_ahead(&self, ignore_trivia: bool) -> SyntaxKind { + let pos = if ignore_trivia { + (self.current_pos + 1..self.lexed.len()) + .find(|&idx| !self.is_trivia(idx)) + .expect("lexed should have non-trivia eof token") + } else { + (self.current_pos + 1..self.lexed.len()) + .next() + .expect("lexed should have a eof token") + }; + self.lexed.kind(pos) + } + + /// Returns `None` if there are no previous relevant tokens + fn look_back(&self, ignore_trivia: bool) -> Option { + if ignore_trivia { + (0..self.current_pos) + .rev() + .find(|&idx| !self.is_trivia(idx)) + .map(|idx| self.lexed.kind(idx)) + } else { + (0..self.current_pos) + .next_back() + .map(|idx| self.lexed.kind(idx)) + } + } + + fn is_trivia(&self, idx: usize) -> bool { + match self.lexed.kind(idx) { + k if TRIVIA_TOKENS.contains(&k) => true, + SyntaxKind::LINE_ENDING => self.lexed.line_ending_count(idx) < 2, + _ => false, + } + } + + /// Will advance if the `kind` matches the current token. + /// Otherwise, will add a diagnostic to the internal `errors`. + fn expect(&mut self, kind: SyntaxKind) { + if self.current() == kind { + self.advance(); + } else { + self.errors.push(SplitError { + msg: format!("Expected {:#?}", kind), + token: self.current_pos, + }); + } + } +} diff --git a/crates/pgt_statement_splitter/src/splitter/common.rs b/crates/pgt_statement_splitter/src/splitter/common.rs new file mode 100644 index 00000000..4f2cd069 --- /dev/null +++ b/crates/pgt_statement_splitter/src/splitter/common.rs @@ -0,0 +1,275 @@ +use super::TRIVIA_TOKENS; +use pgt_lexer::SyntaxKind; + +use super::{ + Splitter, + data::at_statement_start, + ddl::{alter, create}, + dml::{cte, delete, insert, select, update}, +}; + +pub fn source(p: &mut Splitter) { + loop { + match p.current() { + SyntaxKind::EOF => { + break; + } + kind if TRIVIA_TOKENS.contains(&kind) || kind == SyntaxKind::LINE_ENDING => { + p.advance(); + } + SyntaxKind::BACKSLASH => { + plpgsql_command(p); + } + _ => { + statement(p); + } + } + } +} + +pub(crate) fn statement(p: &mut Splitter) { + p.start_stmt(); + match p.current() { + SyntaxKind::WITH_KW => { + cte(p); + } + SyntaxKind::SELECT_KW => { + select(p); + } + SyntaxKind::INSERT_KW => { + insert(p); + } + SyntaxKind::UPDATE_KW => { + update(p); + } + SyntaxKind::DELETE_KW => { + delete(p); + } + SyntaxKind::CREATE_KW => { + create(p); + } + SyntaxKind::ALTER_KW => { + alter(p); + } + _ => { + unknown(p, &[]); + } + } + p.close_stmt(); +} + +pub(crate) fn parenthesis(p: &mut Splitter) { + p.expect(SyntaxKind::L_PAREN); + + let mut depth = 1; + + loop { + match p.current() { + SyntaxKind::L_PAREN => { + p.advance(); + depth += 1; + } + SyntaxKind::R_PAREN | SyntaxKind::EOF => { + p.advance(); + depth -= 1; + if depth == 0 { + break; + } + } + _ => { + p.advance(); + } + } + } +} + +pub(crate) fn plpgsql_command(p: &mut Splitter) { + p.expect(SyntaxKind::BACKSLASH); + + loop { + match p.current() { + SyntaxKind::LINE_ENDING => { + p.advance(); + break; + } + _ => { + // advance the splitter to the next token without ignoring irrelevant tokens + // we would skip a newline with `advance()` + p.current_pos += 1; + } + } + } +} + +pub(crate) fn case(p: &mut Splitter) { + p.expect(SyntaxKind::CASE_KW); + + loop { + match p.current() { + SyntaxKind::END_KW => { + p.advance(); + break; + } + _ => { + p.advance(); + } + } + } +} + +pub(crate) fn unknown(p: &mut Splitter, exclude: &[SyntaxKind]) { + loop { + match p.current() { + SyntaxKind::SEMICOLON => { + p.advance(); + break; + } + SyntaxKind::EOF => { + break; + } + SyntaxKind::LINE_ENDING => { + if p.look_back(true).is_some_and(|t| t == SyntaxKind::COMMA) { + p.advance(); + } else { + break; + } + } + SyntaxKind::CASE_KW => { + case(p); + } + SyntaxKind::BACKSLASH => { + // pgsql commands + // we want to check if the previous token non-trivia token is a LINE_ENDING + // we cannot use the is_trivia() method because that would exclude LINE_ENDINGs + // with count > 1 + if (0..p.current_pos) + .rev() + .find_map(|idx| { + let kind = p.kind(idx); + if !TRIVIA_TOKENS.contains(&kind) { + Some(kind) + } else { + None + } + }) + .is_some_and(|t| t == SyntaxKind::LINE_ENDING) + { + break; + } + p.advance(); + } + SyntaxKind::L_PAREN => { + parenthesis(p); + } + t => match at_statement_start(t, exclude) { + Some(SyntaxKind::SELECT_KW) => { + let prev = p.look_back(true); + if [ + // for policies, with for select + SyntaxKind::FOR_KW, + // for create view / table as + SyntaxKind::AS_KW, + // for create rule + SyntaxKind::ON_KW, + // for create rule + SyntaxKind::ALSO_KW, + // for create rule + SyntaxKind::INSTEAD_KW, + // for UNION + SyntaxKind::UNION_KW, + // for UNION ALL + SyntaxKind::ALL_KW, + // for UNION ... EXCEPT + SyntaxKind::EXCEPT_KW, + // for grant + SyntaxKind::GRANT_KW, + SyntaxKind::COMMA, + ] + .iter() + .all(|x| Some(x) != prev.as_ref()) + { + break; + } + + p.advance(); + } + Some(SyntaxKind::INSERT_KW) + | Some(SyntaxKind::UPDATE_KW) + | Some(SyntaxKind::DELETE_KW) => { + let prev = p.look_back(true); + if [ + // for create trigger + SyntaxKind::BEFORE_KW, + SyntaxKind::AFTER_KW, + // for policies, e.g. for insert + SyntaxKind::FOR_KW, + // e.g. on insert or delete + SyntaxKind::OR_KW, + // e.g. INSTEAD OF INSERT + SyntaxKind::OF_KW, + // for create rule + SyntaxKind::ON_KW, + // for create rule + SyntaxKind::ALSO_KW, + // for create rule + SyntaxKind::INSTEAD_KW, + // for grant + SyntaxKind::GRANT_KW, + SyntaxKind::COMMA, + // Do update in INSERT stmt + SyntaxKind::DO_KW, + ] + .iter() + .all(|x| Some(x) != prev.as_ref()) + { + break; + } + p.advance(); + } + Some(SyntaxKind::WITH_KW) => { + let next = p.look_ahead(true); + if [ + // WITH ORDINALITY should not start a new statement + SyntaxKind::ORDINALITY_KW, + // WITH CHECK should not start a new statement + SyntaxKind::CHECK_KW, + // TIMESTAMP WITH TIME ZONE should not start a new statement + SyntaxKind::TIME_KW, + SyntaxKind::GRANT_KW, + SyntaxKind::ADMIN_KW, + SyntaxKind::INHERIT_KW, + SyntaxKind::SET_KW, + ] + .iter() + .all(|x| x != &next) + { + break; + } + p.advance(); + } + + Some(SyntaxKind::CREATE_KW) => { + let prev = p.look_back(true); + if [ + // for grant + SyntaxKind::GRANT_KW, + SyntaxKind::COMMA, + ] + .iter() + .all(|x| Some(x) != prev.as_ref()) + { + break; + } + + p.advance(); + } + Some(_) => { + break; + } + None => { + p.advance(); + } + }, + } + } +} diff --git a/crates/pgt_statement_splitter/src/parser/data.rs b/crates/pgt_statement_splitter/src/splitter/data.rs similarity index 62% rename from crates/pgt_statement_splitter/src/parser/data.rs rename to crates/pgt_statement_splitter/src/splitter/data.rs index c0792c39..0827484b 100644 --- a/crates/pgt_statement_splitter/src/parser/data.rs +++ b/crates/pgt_statement_splitter/src/splitter/data.rs @@ -3,15 +3,15 @@ use pgt_lexer::SyntaxKind; // All tokens listed here must be explicitly handled in the `unknown` function to ensure that we do // not break in the middle of another statement that contains a statement start token. // -// All of these statements must have a dedicated parser function called from the `statement` function +// All of these statements must have a dedicated splitter function called from the `statement` function static STATEMENT_START_TOKENS: &[SyntaxKind] = &[ - SyntaxKind::With, - SyntaxKind::Select, - SyntaxKind::Insert, - SyntaxKind::Update, - SyntaxKind::DeleteP, - SyntaxKind::Create, - SyntaxKind::Alter, + SyntaxKind::WITH_KW, + SyntaxKind::SELECT_KW, + SyntaxKind::INSERT_KW, + SyntaxKind::UPDATE_KW, + SyntaxKind::DELETE_KW, + SyntaxKind::CREATE_KW, + SyntaxKind::ALTER_KW, ]; pub(crate) fn at_statement_start(kind: SyntaxKind, exclude: &[SyntaxKind]) -> Option<&SyntaxKind> { diff --git a/crates/pgt_statement_splitter/src/splitter/ddl.rs b/crates/pgt_statement_splitter/src/splitter/ddl.rs new file mode 100644 index 00000000..449288aa --- /dev/null +++ b/crates/pgt_statement_splitter/src/splitter/ddl.rs @@ -0,0 +1,15 @@ +use pgt_lexer::SyntaxKind; + +use super::{Splitter, common::unknown}; + +pub(crate) fn create(p: &mut Splitter) { + p.expect(SyntaxKind::CREATE_KW); + + unknown(p, &[SyntaxKind::WITH_KW]); +} + +pub(crate) fn alter(p: &mut Splitter) { + p.expect(SyntaxKind::ALTER_KW); + + unknown(p, &[SyntaxKind::ALTER_KW]); +} diff --git a/crates/pgt_statement_splitter/src/splitter/dml.rs b/crates/pgt_statement_splitter/src/splitter/dml.rs new file mode 100644 index 00000000..9c833301 --- /dev/null +++ b/crates/pgt_statement_splitter/src/splitter/dml.rs @@ -0,0 +1,59 @@ +use pgt_lexer::SyntaxKind; + +use super::{ + Splitter, + common::{parenthesis, unknown}, +}; + +pub(crate) fn cte(p: &mut Splitter) { + p.expect(SyntaxKind::WITH_KW); + + loop { + p.expect(SyntaxKind::IDENT); + p.expect(SyntaxKind::AS_KW); + parenthesis(p); + + if p.current() == SyntaxKind::COMMA { + p.advance(); + } else { + break; + } + } + + unknown( + p, + &[ + SyntaxKind::SELECT_KW, + SyntaxKind::INSERT_KW, + SyntaxKind::UPDATE_KW, + SyntaxKind::DELETE_KW, + SyntaxKind::MERGE_KW, + ], + ); +} + +pub(crate) fn select(p: &mut Splitter) { + p.expect(SyntaxKind::SELECT_KW); + + unknown(p, &[]); +} + +pub(crate) fn insert(p: &mut Splitter) { + p.expect(SyntaxKind::INSERT_KW); + p.expect(SyntaxKind::INTO_KW); + + unknown(p, &[SyntaxKind::SELECT_KW]); +} + +pub(crate) fn update(p: &mut Splitter) { + p.expect(SyntaxKind::UPDATE_KW); + + unknown(p, &[]); +} + +pub(crate) fn delete(p: &mut Splitter) { + p.expect(SyntaxKind::DELETE_KW); + p.expect(SyntaxKind::FROM_KW); + + unknown(p, &[]); +} diff --git a/crates/pgt_statement_splitter/tests/statement_splitter_tests.rs b/crates/pgt_statement_splitter/tests/statement_splitter_tests.rs index e0534725..a4cf3259 100644 --- a/crates/pgt_statement_splitter/tests/statement_splitter_tests.rs +++ b/crates/pgt_statement_splitter/tests/statement_splitter_tests.rs @@ -22,7 +22,7 @@ fn test_statement_splitter() { let contents = fs::read_to_string(&path).unwrap(); - let split = pgt_statement_splitter::split(&contents).expect("Failed to split"); + let split = pgt_statement_splitter::split(&contents); assert_eq!( split.ranges.len(), diff --git a/crates/pgt_query_ext_codegen/Cargo.toml b/crates/pgt_tokenizer/Cargo.toml similarity index 62% rename from crates/pgt_query_ext_codegen/Cargo.toml rename to crates/pgt_tokenizer/Cargo.toml index c3a0f20d..9cd4bf5e 100644 --- a/crates/pgt_query_ext_codegen/Cargo.toml +++ b/crates/pgt_tokenizer/Cargo.toml @@ -6,17 +6,14 @@ edition.workspace = true homepage.workspace = true keywords.workspace = true license.workspace = true -name = "pgt_query_ext_codegen" +name = "pgt_tokenizer" repository.workspace = true version = "0.0.0" [dependencies] -proc-macro2.workspace = true -quote.workspace = true -pgt_query_proto_parser.workspace = true +[dev-dependencies] +insta.workspace = true [lib] -doctest = false -proc-macro = true diff --git a/crates/pgt_tokenizer/README.md b/crates/pgt_tokenizer/README.md new file mode 100644 index 00000000..8fc21d34 --- /dev/null +++ b/crates/pgt_tokenizer/README.md @@ -0,0 +1 @@ +Heavily inspired by and copied from [squawk_lexer](https://github.com/sbdchd/squawk/tree/9acfecbbb7f3c7eedcbaf060e7b25f9afa136db3/crates/squawk_lexer). Thanks for making all the hard work MIT-licensed! diff --git a/crates/pgt_tokenizer/src/cursor.rs b/crates/pgt_tokenizer/src/cursor.rs new file mode 100644 index 00000000..64710f29 --- /dev/null +++ b/crates/pgt_tokenizer/src/cursor.rs @@ -0,0 +1,73 @@ +use std::str::Chars; + +/// Peekable iterator over a char sequence. +/// +/// Next characters can be peeked via `first` method, +/// and position can be shifted forward via `bump` method. +/// based on: +/// - +/// - +/// +pub(crate) struct Cursor<'a> { + /// Iterator over chars. Slightly faster than a &str. + chars: Chars<'a>, + len_remaining: usize, +} + +pub(crate) const EOF_CHAR: char = '\0'; + +impl<'a> Cursor<'a> { + pub(crate) fn new(input: &'a str) -> Cursor<'a> { + Cursor { + len_remaining: input.len(), + chars: input.chars(), + } + } + + /// Peeks the next symbol from the input stream without consuming it. + /// If requested position doesn't exist, `EOF_CHAR` is returned. + /// However, getting `EOF_CHAR` doesn't always mean actual end of file, + /// it should be checked with `is_eof` method. + pub(crate) fn first(&self) -> char { + // `.next()` optimizes better than `.nth(0)` + self.chars.clone().next().unwrap_or(EOF_CHAR) + } + + /// Peeks the second next symbol from the input stream without consuming it. + /// If requested position doesn't exist, `EOF_CHAR` is returned. + /// However, getting `EOF_CHAR` doesn't always mean actual end of file, + /// it should be checked with `is_eof` method. + pub(crate) fn second(&self) -> char { + self.chars.clone().nth(1).unwrap_or(EOF_CHAR) + } + + /// Checks if there is nothing more to consume. + pub(crate) fn is_eof(&self) -> bool { + self.chars.as_str().is_empty() + } + + /// Returns amount of already consumed symbols. + pub(crate) fn pos_within_token(&self) -> u32 { + (self.len_remaining - self.chars.as_str().len()) as u32 + } + + /// Resets the number of bytes consumed to 0. + pub(crate) fn reset_pos_within_token(&mut self) { + self.len_remaining = self.chars.as_str().len(); + } + + /// Moves to the next character. + pub(crate) fn bump(&mut self) -> Option { + let c = self.chars.next()?; + Some(c) + } + + /// Eats symbols while predicate returns true or until the end of file is reached. + pub(crate) fn eat_while(&mut self, mut predicate: impl FnMut(char) -> bool) { + // It was tried making optimized version of this for eg. line comments, but + // LLVM can inline all of this and compile it down to fast iteration over bytes. + while predicate(self.first()) && !self.is_eof() { + self.bump(); + } + } +} diff --git a/crates/pgt_tokenizer/src/lib.rs b/crates/pgt_tokenizer/src/lib.rs new file mode 100644 index 00000000..787adcaa --- /dev/null +++ b/crates/pgt_tokenizer/src/lib.rs @@ -0,0 +1,830 @@ +mod cursor; +mod token; +use cursor::{Cursor, EOF_CHAR}; +pub use token::{Base, LiteralKind, Token, TokenKind}; + +// via: https://github.com/postgres/postgres/blob/db0c96cc18aec417101e37e59fcc53d4bf647915/src/backend/parser/scan.l#L346 +// ident_start [A-Za-z\200-\377_] +const fn is_ident_start(c: char) -> bool { + matches!(c, 'a'..='z' | 'A'..='Z' | '_' | '\u{80}'..='\u{FF}') +} + +// ident_cont [A-Za-z\200-\377_0-9\$] +const fn is_ident_cont(c: char) -> bool { + matches!(c, 'a'..='z' | 'A'..='Z' | '_' | '0'..='9' | '$' | '\u{80}'..='\u{FF}') +} + +// whitespace +// - https://github.com/postgres/postgres/blob/db0c96cc18aec417101e37e59fcc53d4bf647915/src/backend/parser/scansup.c#L107-L128 +// - https://github.com/postgres/postgres/blob/db0c96cc18aec417101e37e59fcc53d4bf647915/src/backend/parser/scan.l#L204-L229 + +const fn is_space(c: char) -> bool { + matches!( + c, ' ' // space + ) +} + +const fn is_tab(c: char) -> bool { + matches!( + c, '\t' // tab + ) +} + +const fn is_line_ending(c: char) -> bool { + matches!( + c, + '\n' | '\r' // newline or carriage return + ) +} + +const fn is_vertical_tab(c: char) -> bool { + matches!( + c, '\u{000B}' // vertical tab + ) +} + +const fn is_form_feed(c: char) -> bool { + matches!( + c, '\u{000C}' // form feed + ) +} + +impl Cursor<'_> { + // see: https://github.com/rust-lang/rust/blob/ba1d7f4a083e6402679105115ded645512a7aea8/compiler/rustc_lexer/src/lib.rs#L339 + pub(crate) fn advance_token(&mut self) -> Token { + let Some(first_char) = self.bump() else { + return Token::new(TokenKind::Eof, 0); + }; + let token_kind = match first_char { + // Slash, comment or block comment. + '/' => match self.first() { + '*' => self.block_comment(), + _ => TokenKind::Slash, + }, + '-' => match self.first() { + '-' => self.line_comment(), + _ => TokenKind::Minus, + }, + + c if is_space(c) => { + self.eat_while(is_space); + TokenKind::Space + } + + c if is_tab(c) => { + self.eat_while(is_tab); + TokenKind::Tab + } + + c if is_line_ending(c) => self.line_ending_sequence(c), + + c if is_vertical_tab(c) => { + self.eat_while(is_vertical_tab); + TokenKind::VerticalTab + } + + c if is_form_feed(c) => { + self.eat_while(is_form_feed); + TokenKind::FormFeed + } + + // https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-STRINGS-UESCAPE + 'u' | 'U' => match self.first() { + '&' => { + self.bump(); + self.prefixed_string( + |terminated| LiteralKind::UnicodeEscStr { terminated }, + true, + ) + } + _ => self.ident_or_unknown_prefix(), + }, + + // escaped strings + 'e' | 'E' => { + self.prefixed_string(|terminated| LiteralKind::EscStr { terminated }, false) + } + + // bit string + 'b' | 'B' => { + self.prefixed_string(|terminated| LiteralKind::BitStr { terminated }, false) + } + + // hexadecimal byte string + 'x' | 'X' => { + self.prefixed_string(|terminated| LiteralKind::ByteStr { terminated }, false) + } + + // Identifier (this should be checked after other variant that can + // start as identifier). + c if is_ident_start(c) => self.ident(), + + // Numeric literal. + // see: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS-NUMERIC + c @ '0'..='9' => { + let literal_kind = self.number(c); + TokenKind::Literal { kind: literal_kind } + } + '.' => match self.first() { + '0'..='9' => { + let literal_kind = self.number('.'); + TokenKind::Literal { kind: literal_kind } + } + _ => TokenKind::Dot, + }, + // One-symbol tokens. + ';' => TokenKind::Semi, + '\\' => TokenKind::Backslash, + ',' => TokenKind::Comma, + '(' => TokenKind::OpenParen, + ')' => TokenKind::CloseParen, + '[' => TokenKind::OpenBracket, + ']' => TokenKind::CloseBracket, + '@' => TokenKind::At, + '#' => TokenKind::Pound, + '~' => TokenKind::Tilde, + '?' => TokenKind::Question, + ':' => TokenKind::Colon, + '$' => { + // Dollar quoted strings + if is_ident_start(self.first()) || self.first() == '$' { + self.dollar_quoted_string() + } else { + // Parameters + while self.first().is_ascii_digit() { + self.bump(); + } + TokenKind::PositionalParam + } + } + '`' => TokenKind::Backtick, + '=' => TokenKind::Eq, + '!' => TokenKind::Bang, + '<' => TokenKind::Lt, + '>' => TokenKind::Gt, + '&' => TokenKind::And, + '|' => TokenKind::Or, + '+' => TokenKind::Plus, + '*' => TokenKind::Star, + '^' => TokenKind::Caret, + '%' => TokenKind::Percent, + + // String literal + '\'' => { + let terminated = self.single_quoted_string(); + let kind = LiteralKind::Str { terminated }; + TokenKind::Literal { kind } + } + + // Quoted indentifiers + '"' => { + let terminated = self.double_quoted_string(); + TokenKind::QuotedIdent { terminated } + } + _ => TokenKind::Unknown, + }; + let res = Token::new(token_kind, self.pos_within_token()); + self.reset_pos_within_token(); + res + } + pub(crate) fn ident(&mut self) -> TokenKind { + self.eat_while(is_ident_cont); + TokenKind::Ident + } + + fn ident_or_unknown_prefix(&mut self) -> TokenKind { + // Start is already eaten, eat the rest of identifier. + self.eat_while(is_ident_cont); + // Known prefixes must have been handled earlier. So if + // we see a prefix here, it is definitely an unknown prefix. + match self.first() { + '#' | '"' | '\'' => TokenKind::UnknownPrefix, + _ => TokenKind::Ident, + } + } + + // see: https://github.com/postgres/postgres/blob/db0c96cc18aec417101e37e59fcc53d4bf647915/src/backend/parser/scan.l#L227 + // comment ("--"{non_newline}*) + pub(crate) fn line_comment(&mut self) -> TokenKind { + self.bump(); + + self.eat_while(|c| c != '\n'); + TokenKind::LineComment + } + + // see: https://github.com/postgres/postgres/blob/db0c96cc18aec417101e37e59fcc53d4bf647915/src/backend/parser/scan.l#L324-L344 + pub(crate) fn block_comment(&mut self) -> TokenKind { + self.bump(); + + let mut depth = 1usize; + while let Some(c) = self.bump() { + match c { + '/' if self.first() == '*' => { + self.bump(); + depth += 1; + } + '*' if self.first() == '/' => { + self.bump(); + depth -= 1; + if depth == 0 { + // This block comment is closed, so for a construction like "/* */ */" + // there will be a successfully parsed block comment "/* */" + // and " */" will be processed separately. + break; + } + } + _ => (), + } + } + + TokenKind::BlockComment { + terminated: depth == 0, + } + } + + // invariant: we care about the number of consecutive newlines so we count them. + // + // Postgres considers a DOS-style \r\n sequence as two successive newlines, but we care about + // logical line breaks and consider \r\n as one logical line break + fn line_ending_sequence(&mut self, prev: char) -> TokenKind { + // already consumed first line ending character (\n or \r) + let mut line_breaks = 1; + + // started with \r, check if it's part of \r\n + if prev == '\r' && self.first() == '\n' { + // consume the \n - \r\n still counts as 1 logical line break + self.bump(); + } + + // continue checking for more line endings + loop { + match self.first() { + '\r' if self.second() == '\n' => { + self.bump(); // consume \r + self.bump(); // consume \n + line_breaks += 1; + } + '\n' => { + self.bump(); + line_breaks += 1; + } + '\r' => { + self.bump(); + line_breaks += 1; + } + _ => break, + } + } + + TokenKind::LineEnding { count: line_breaks } + } + + fn prefixed_string( + &mut self, + mk_kind: fn(bool) -> LiteralKind, + allows_double: bool, + ) -> TokenKind { + match self.first() { + '\'' => { + self.bump(); + let terminated = self.single_quoted_string(); + let kind = mk_kind(terminated); + TokenKind::Literal { kind } + } + '"' if allows_double => { + self.bump(); + let terminated = self.double_quoted_string(); + TokenKind::QuotedIdent { terminated } + } + _ => self.ident_or_unknown_prefix(), + } + } + + fn number(&mut self, first_digit: char) -> LiteralKind { + let mut base = Base::Decimal; + if first_digit == '0' { + // Attempt to parse encoding base. + match self.first() { + // https://github.com/postgres/postgres/blob/db0c96cc18aec417101e37e59fcc53d4bf647915/src/backend/parser/scan.l#L403 + 'b' | 'B' => { + base = Base::Binary; + self.bump(); + if !self.eat_decimal_digits() { + return LiteralKind::Int { + base, + empty_int: true, + }; + } + } + // https://github.com/postgres/postgres/blob/db0c96cc18aec417101e37e59fcc53d4bf647915/src/backend/parser/scan.l#L402 + 'o' | 'O' => { + base = Base::Octal; + self.bump(); + if !self.eat_decimal_digits() { + return LiteralKind::Int { + base, + empty_int: true, + }; + } + } + // https://github.com/postgres/postgres/blob/db0c96cc18aec417101e37e59fcc53d4bf647915/src/backend/parser/scan.l#L401 + 'x' | 'X' => { + base = Base::Hexadecimal; + self.bump(); + if !self.eat_hexadecimal_digits() { + return LiteralKind::Int { + base, + empty_int: true, + }; + } + } + // Not a base prefix; consume additional digits. + '0'..='9' | '_' => { + self.eat_decimal_digits(); + } + + // Also not a base prefix; nothing more to do here. + '.' | 'e' | 'E' => {} + + // Just a 0. + _ => { + return LiteralKind::Int { + base, + empty_int: false, + }; + } + } + } else { + // No base prefix, parse number in the usual way. + self.eat_decimal_digits(); + }; + + match self.first() { + '.' => { + // might have stuff after the ., and if it does, it needs to start + // with a number + self.bump(); + let mut empty_exponent = false; + if self.first().is_ascii_digit() { + self.eat_decimal_digits(); + match self.first() { + 'e' | 'E' => { + self.bump(); + empty_exponent = !self.eat_float_exponent(); + } + _ => (), + } + } else { + match self.first() { + 'e' | 'E' => { + self.bump(); + empty_exponent = !self.eat_float_exponent(); + } + _ => (), + } + } + LiteralKind::Float { + base, + empty_exponent, + } + } + 'e' | 'E' => { + self.bump(); + let empty_exponent = !self.eat_float_exponent(); + LiteralKind::Float { + base, + empty_exponent, + } + } + _ => LiteralKind::Int { + base, + empty_int: false, + }, + } + } + + fn single_quoted_string(&mut self) -> bool { + // Parse until either quotes are terminated or error is detected. + loop { + match self.first() { + // Quotes might be terminated. + '\'' => { + self.bump(); + + match self.first() { + // encountered an escaped quote '' + '\'' => { + self.bump(); + } + // encountered terminating quote + _ => return true, + } + } + // End of file, stop parsing. + EOF_CHAR if self.is_eof() => break, + // Skip the character. + _ => { + self.bump(); + } + } + } + // String was not terminated. + false + } + + /// Eats double-quoted string and returns true + /// if string is terminated. + fn double_quoted_string(&mut self) -> bool { + while let Some(c) = self.bump() { + match c { + '"' if self.first() == '"' => { + // Bump again to skip escaped character. + self.bump(); + } + '"' => { + return true; + } + _ => (), + } + } + // End of file reached. + false + } + + // https://www.postgresql.org/docs/16/sql-syntax-lexical.html#SQL-SYNTAX-DOLLAR-QUOTING + fn dollar_quoted_string(&mut self) -> TokenKind { + // Get the start sequence of the dollar quote, i.e., 'foo' in + // $foo$hello$foo$ + let mut start = vec![]; + while let Some(c) = self.bump() { + match c { + '$' => { + self.bump(); + break; + } + _ => { + start.push(c); + } + } + } + + // we have a dollar quoted string deliminated with `$$` + if start.is_empty() { + loop { + self.eat_while(|c| c != '$'); + if self.is_eof() { + return TokenKind::Literal { + kind: LiteralKind::DollarQuotedString { terminated: false }, + }; + } + // eat $ + self.bump(); + if self.first() == '$' { + self.bump(); + return TokenKind::Literal { + kind: LiteralKind::DollarQuotedString { terminated: true }, + }; + } + } + } else { + loop { + self.eat_while(|c| c != start[0]); + if self.is_eof() { + return TokenKind::Literal { + kind: LiteralKind::DollarQuotedString { terminated: false }, + }; + } + + // might be the start of our start/end sequence + let mut match_count = 0; + for start_char in &start { + if self.first() == *start_char { + self.bump(); + match_count += 1; + } else { + self.bump(); + break; + } + } + + // closing '$' + let terminated = match_count == start.len(); + if self.first() == '$' && terminated { + self.bump(); + return TokenKind::Literal { + kind: LiteralKind::DollarQuotedString { terminated }, + }; + } + } + } + } + + fn eat_decimal_digits(&mut self) -> bool { + let mut has_digits = false; + loop { + match self.first() { + '_' => { + self.bump(); + } + '0'..='9' => { + has_digits = true; + self.bump(); + } + _ => break, + } + } + has_digits + } + + fn eat_hexadecimal_digits(&mut self) -> bool { + let mut has_digits = false; + loop { + match self.first() { + '_' => { + self.bump(); + } + '0'..='9' | 'a'..='f' | 'A'..='F' => { + has_digits = true; + self.bump(); + } + _ => break, + } + } + has_digits + } + + /// Eats the float exponent. Returns true if at least one digit was met, + /// and returns false otherwise. + fn eat_float_exponent(&mut self) -> bool { + if self.first() == '-' || self.first() == '+' { + self.bump(); + } + self.eat_decimal_digits() + } +} + +/// Creates an iterator that produces tokens from the input string. +pub fn tokenize(input: &str) -> impl Iterator + '_ { + let mut cursor = Cursor::new(input); + std::iter::from_fn(move || { + let token = cursor.advance_token(); + if token.kind != TokenKind::Eof { + Some(token) + } else { + None + } + }) +} + +#[cfg(test)] +mod tests { + use std::fmt; + + use super::*; + use insta::assert_debug_snapshot; + + struct TokenDebug<'a> { + content: &'a str, + token: Token, + } + impl fmt::Debug for TokenDebug<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?} @ {:?}", self.content, self.token.kind) + } + } + + impl<'a> TokenDebug<'a> { + fn new(token: Token, input: &'a str, start: u32) -> TokenDebug<'a> { + TokenDebug { + token, + content: &input[start as usize..(start + token.len) as usize], + } + } + } + + fn lex(input: &str) -> Vec { + let mut tokens = vec![]; + let mut start = 0; + + for token in tokenize(input) { + let length = token.len; + tokens.push(TokenDebug::new(token, input, start)); + start += length; + } + tokens + } + #[test] + fn lex_statement() { + let result = lex("select 1;"); + assert_debug_snapshot!(result); + } + + #[test] + fn block_comment() { + let result = lex(r#" +/* + * foo + * bar +*/"#); + assert_debug_snapshot!(result); + } + + #[test] + fn block_comment_unterminated() { + let result = lex(r#" +/* + * foo + * bar + /* +*/"#); + assert_debug_snapshot!(result); + } + + #[test] + fn line_comment() { + let result = lex(r#" +-- foooooooooooo bar buzz +"#); + assert_debug_snapshot!(result); + } + + #[test] + fn line_comment_whitespace() { + assert_debug_snapshot!(lex(r#" +select 'Hello' -- This is a comment +' World';"#)) + } + + #[test] + fn dollar_quoting() { + assert_debug_snapshot!(lex(r#" +$$Dianne's horse$$ +$SomeTag$Dianne's horse$SomeTag$ + +-- with dollar inside and matching tags +$foo$hello$world$bar$ +"#)) + } + + #[test] + fn dollar_strings_part2() { + assert_debug_snapshot!(lex(r#" +DO $doblock$ +end +$doblock$;"#)) + } + + #[test] + fn dollar_quote_mismatch_tags_simple() { + assert_debug_snapshot!(lex(r#" +-- dollar quoting with mismatched tags +$foo$hello world$bar$ +"#)); + } + + #[test] + fn dollar_quote_mismatch_tags_complex() { + assert_debug_snapshot!(lex(r#" +-- with dollar inside but mismatched tags +$foo$hello$world$bar$ +"#)); + } + + #[test] + fn numeric() { + assert_debug_snapshot!(lex(r#" +42 +3.5 +4. +.001 +.123e10 +5e2 +1.925e-3 +1e-10 +1e+10 +1e10 +4664.E+5 +"#)) + } + + #[test] + fn numeric_non_decimal() { + assert_debug_snapshot!(lex(r#" +0b100101 +0B10011001 +0o273 +0O755 +0x42f +0XFFFF +"#)) + } + + #[test] + fn numeric_with_seperators() { + assert_debug_snapshot!(lex(r#" +1_500_000_000 +0b10001000_00000000 +0o_1_755 +0xFFFF_FFFF +1.618_034 +"#)) + } + + #[test] + fn select_with_period() { + assert_debug_snapshot!(lex(r#" +select public.users; +"#)) + } + + #[test] + fn bitstring() { + assert_debug_snapshot!(lex(r#" +B'1001' +b'1001' +X'1FF' +x'1FF' +"#)) + } + + #[test] + fn string() { + assert_debug_snapshot!(lex(r#" +'Dianne''s horse' + +select 'foo '' +bar'; + +select 'foooo' + 'bar'; + + +'foo \\ \n \tbar' + +'forgot to close the string +"#)) + } + + #[test] + fn params() { + assert_debug_snapshot!(lex(r#" +select $1 + $2; + +select $1123123123123; + +select $; +"#)) + } + + #[test] + fn string_with_escapes() { + // https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-STRINGS-ESCAPE + + assert_debug_snapshot!(lex(r#" +E'foo' + +e'bar' + +e'\b\f\n\r\t' + +e'\0\11\777' + +e'\x0\x11\xFF' + +e'\uAAAA \UFFFFFFFF' + +"#)) + } + + #[test] + fn string_unicode_escape() { + // https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-STRINGS-UESCAPE + + assert_debug_snapshot!(lex(r#" +U&"d\0061t\+000061" + +U&"\0441\043B\043E\043D" + +u&'\0441\043B' + +U&"d!0061t!+000061" UESCAPE '!' +"#)) + } + + #[test] + fn quoted_ident() { + assert_debug_snapshot!(lex(r#" +"hello &1 -world"; + + +"hello-world +"#)) + } + + #[test] + fn quoted_ident_with_escape_quote() { + assert_debug_snapshot!(lex(r#" +"foo "" bar" +"#)) + } +} diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__bitstring.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__bitstring.snap new file mode 100644 index 00000000..ff3eec09 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__bitstring.snap @@ -0,0 +1,16 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\nB'1001'\nb'1001'\nX'1FF'\nx'1FF'\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "B'1001'" @ Literal { kind: BitStr { terminated: true } }, + "\n" @ LineEnding { count: 1 }, + "b'1001'" @ Literal { kind: BitStr { terminated: true } }, + "\n" @ LineEnding { count: 1 }, + "X'1FF'" @ Literal { kind: ByteStr { terminated: true } }, + "\n" @ LineEnding { count: 1 }, + "x'1FF'" @ Literal { kind: ByteStr { terminated: true } }, + "\n" @ LineEnding { count: 1 }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__block_comment.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__block_comment.snap new file mode 100644 index 00000000..22961ecf --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__block_comment.snap @@ -0,0 +1,9 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "/*\n * foo\n * bar\n*/" @ BlockComment { terminated: true }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__block_comment_unterminated.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__block_comment_unterminated.snap new file mode 100644 index 00000000..4dd6957e --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__block_comment_unterminated.snap @@ -0,0 +1,9 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "/*\n * foo\n * bar\n /*\n*/" @ BlockComment { terminated: false }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quote_mismatch_tags_complex.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quote_mismatch_tags_complex.snap new file mode 100644 index 00000000..7f6a6649 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quote_mismatch_tags_complex.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\n-- with dollar inside but mismatched tags\n$foo$hello$world$bar$\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "-- with dollar inside but mismatched tags" @ LineComment, + "\n" @ LineEnding { count: 1 }, + "$foo$hello$world$bar$\n" @ Literal { kind: DollarQuotedString { terminated: false } }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quote_mismatch_tags_simple.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quote_mismatch_tags_simple.snap new file mode 100644 index 00000000..9d6d43a0 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quote_mismatch_tags_simple.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\n-- dollar quoting with mismatched tags\n$foo$hello world$bar$\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "-- dollar quoting with mismatched tags" @ LineComment, + "\n" @ LineEnding { count: 1 }, + "$foo$hello world$bar$\n" @ Literal { kind: DollarQuotedString { terminated: false } }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quoting.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quoting.snap new file mode 100644 index 00000000..ad1aa07d --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_quoting.snap @@ -0,0 +1,15 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\n$$Dianne's horse$$\n$SomeTag$Dianne's horse$SomeTag$\n\n-- with dollar inside and matching tags\n$foo$hello$world$bar$\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "$$Dianne's horse$$" @ Literal { kind: DollarQuotedString { terminated: true } }, + "\n" @ LineEnding { count: 1 }, + "$SomeTag$Dianne's horse$SomeTag$" @ Literal { kind: DollarQuotedString { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, + "-- with dollar inside and matching tags" @ LineComment, + "\n" @ LineEnding { count: 1 }, + "$foo$hello$world$bar$\n" @ Literal { kind: DollarQuotedString { terminated: false } }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_strings_part2.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_strings_part2.snap new file mode 100644 index 00000000..9aa49446 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__dollar_strings_part2.snap @@ -0,0 +1,12 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\nDO $doblock$\nend\n$doblock$;\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "DO" @ Ident, + " " @ Space, + "$doblock$\nend\n$doblock$" @ Literal { kind: DollarQuotedString { terminated: true } }, + ";" @ Semi, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__lex_statement.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__lex_statement.snap new file mode 100644 index 00000000..5679f2a7 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__lex_statement.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "select" @ Ident, + " " @ Space, + "1" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + ";" @ Semi, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__line_comment.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__line_comment.snap new file mode 100644 index 00000000..1cd8782a --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__line_comment.snap @@ -0,0 +1,10 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "-- foooooooooooo bar buzz" @ LineComment, + "\n" @ LineEnding { count: 1 }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__line_comment_whitespace.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__line_comment_whitespace.snap new file mode 100644 index 00000000..3cf5fb50 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__line_comment_whitespace.snap @@ -0,0 +1,16 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\nselect 'Hello' -- This is a comment\n' World';\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "select" @ Ident, + " " @ Space, + "'Hello'" @ Literal { kind: Str { terminated: true } }, + " " @ Space, + "-- This is a comment" @ LineComment, + "\n" @ LineEnding { count: 1 }, + "' World'" @ Literal { kind: Str { terminated: true } }, + ";" @ Semi, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric.snap new file mode 100644 index 00000000..95fdb27a --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric.snap @@ -0,0 +1,30 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\n42\n3.5\n4.\n.001\n.123e10\n5e2\n1.925e-3\n1e-10\n1e+10\n1e10\n4664.E+5\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "42" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "3.5" @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, + "4." @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, + ".001" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + ".123e10" @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, + "5e2" @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, + "1.925e-3" @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, + "1e-10" @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, + "1e+10" @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, + "1e10" @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, + "4664.E+5" @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric_non_decimal.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric_non_decimal.snap new file mode 100644 index 00000000..e4430348 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric_non_decimal.snap @@ -0,0 +1,20 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\n0b100101\n0B10011001\n0o273\n0O755\n0x42f\n0XFFFF\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "0b100101" @ Literal { kind: Int { base: Binary, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "0B10011001" @ Literal { kind: Int { base: Binary, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "0o273" @ Literal { kind: Int { base: Octal, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "0O755" @ Literal { kind: Int { base: Octal, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "0x42f" @ Literal { kind: Int { base: Hexadecimal, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "0XFFFF" @ Literal { kind: Int { base: Hexadecimal, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric_with_seperators.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric_with_seperators.snap new file mode 100644 index 00000000..cd0ecb21 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__numeric_with_seperators.snap @@ -0,0 +1,18 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\n1_500_000_000\n0b10001000_00000000\n0o_1_755\n0xFFFF_FFFF\n1.618_034\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "1_500_000_000" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "0b10001000_00000000" @ Literal { kind: Int { base: Binary, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "0o_1_755" @ Literal { kind: Int { base: Octal, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "0xFFFF_FFFF" @ Literal { kind: Int { base: Hexadecimal, empty_int: false } }, + "\n" @ LineEnding { count: 1 }, + "1.618_034" @ Literal { kind: Float { base: Decimal, empty_exponent: false } }, + "\n" @ LineEnding { count: 1 }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__params.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__params.snap new file mode 100644 index 00000000..6a436417 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__params.snap @@ -0,0 +1,27 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\nselect $1 + $2;\n\nselect $1123123123123;\n\nselect $;\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "select" @ Ident, + " " @ Space, + "$1" @ PositionalParam, + " " @ Space, + "+" @ Plus, + " " @ Space, + "$2" @ PositionalParam, + ";" @ Semi, + "\n\n" @ LineEnding { count: 2 }, + "select" @ Ident, + " " @ Space, + "$1123123123123" @ PositionalParam, + ";" @ Semi, + "\n\n" @ LineEnding { count: 2 }, + "select" @ Ident, + " " @ Space, + "$" @ PositionalParam, + ";" @ Semi, + "\n" @ LineEnding { count: 1 }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__quoted_ident.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__quoted_ident.snap new file mode 100644 index 00000000..e1dffb06 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__quoted_ident.snap @@ -0,0 +1,12 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\n\"hello &1 -world\";\n\n\n\"hello-world\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "\"hello &1 -world\"" @ QuotedIdent { terminated: true }, + ";" @ Semi, + "\n\n\n" @ LineEnding { count: 3 }, + "\"hello-world\n" @ QuotedIdent { terminated: false }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__quoted_ident_with_escape_quote.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__quoted_ident_with_escape_quote.snap new file mode 100644 index 00000000..44ff06e5 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__quoted_ident_with_escape_quote.snap @@ -0,0 +1,10 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\n\"foo \"\" bar\"\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "\"foo \"\" bar\"" @ QuotedIdent { terminated: true }, + "\n" @ LineEnding { count: 1 }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__select_with_period.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__select_with_period.snap new file mode 100644 index 00000000..bc03da6a --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__select_with_period.snap @@ -0,0 +1,15 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\nselect public.users;\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "select" @ Ident, + " " @ Space, + "public" @ Ident, + "." @ Dot, + "users" @ Ident, + ";" @ Semi, + "\n" @ LineEnding { count: 1 }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string.snap new file mode 100644 index 00000000..c7e5b8ba --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string.snap @@ -0,0 +1,26 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\n'Dianne''s horse'\n\nselect 'foo ''\nbar';\n\nselect 'foooo'\n 'bar';\n\n\n'foo \\\\ \\n \\tbar'\n\n'forgot to close the string\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "'Dianne''s horse'" @ Literal { kind: Str { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, + "select" @ Ident, + " " @ Space, + "'foo ''\nbar'" @ Literal { kind: Str { terminated: true } }, + ";" @ Semi, + "\n\n" @ LineEnding { count: 2 }, + "select" @ Ident, + " " @ Space, + "'foooo'" @ Literal { kind: Str { terminated: true } }, + "\n" @ LineEnding { count: 1 }, + " " @ Space, + "'bar'" @ Literal { kind: Str { terminated: true } }, + ";" @ Semi, + "\n\n\n" @ LineEnding { count: 3 }, + "'foo \\\\ \\n \\tbar'" @ Literal { kind: Str { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, + "'forgot to close the string\n" @ Literal { kind: Str { terminated: false } }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string_unicode_escape.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string_unicode_escape.snap new file mode 100644 index 00000000..225a208a --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string_unicode_escape.snap @@ -0,0 +1,20 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\nU&\"d\\0061t\\+000061\"\n\nU&\"\\0441\\043B\\043E\\043D\"\n\nu&'\\0441\\043B'\n\nU&\"d!0061t!+000061\" UESCAPE '!'\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "U&\"d\\0061t\\+000061\"" @ QuotedIdent { terminated: true }, + "\n\n" @ LineEnding { count: 2 }, + "U&\"\\0441\\043B\\043E\\043D\"" @ QuotedIdent { terminated: true }, + "\n\n" @ LineEnding { count: 2 }, + "u&'\\0441\\043B'" @ Literal { kind: UnicodeEscStr { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, + "U&\"d!0061t!+000061\"" @ QuotedIdent { terminated: true }, + " " @ Space, + "UESCAPE" @ Ident, + " " @ Space, + "'!'" @ Literal { kind: Str { terminated: true } }, + "\n" @ LineEnding { count: 1 }, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string_with_escapes.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string_with_escapes.snap new file mode 100644 index 00000000..bbc94048 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__string_with_escapes.snap @@ -0,0 +1,20 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: "lex(r#\"\nE'foo'\n\ne'bar'\n\ne'\\b\\f\\n\\r\\t'\n\ne'\\0\\11\\777'\n\ne'\\x0\\x11\\xFF'\n\ne'\\uAAAA \\UFFFFFFFF'\n\n\"#)" +snapshot_kind: text +--- +[ + "\n" @ LineEnding { count: 1 }, + "E'foo'" @ Literal { kind: EscStr { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, + "e'bar'" @ Literal { kind: EscStr { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, + "e'\\b\\f\\n\\r\\t'" @ Literal { kind: EscStr { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, + "e'\\0\\11\\777'" @ Literal { kind: EscStr { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, + "e'\\x0\\x11\\xFF'" @ Literal { kind: EscStr { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, + "e'\\uAAAA \\UFFFFFFFF'" @ Literal { kind: EscStr { terminated: true } }, + "\n\n" @ LineEnding { count: 2 }, +] diff --git a/crates/pgt_tokenizer/src/token.rs b/crates/pgt_tokenizer/src/token.rs new file mode 100644 index 00000000..50a7d12a --- /dev/null +++ b/crates/pgt_tokenizer/src/token.rs @@ -0,0 +1,170 @@ +// based on: https://github.com/rust-lang/rust/blob/d1b7355d3d7b4ead564dbecb1d240fcc74fff21b/compiler/rustc_lexer/src/lib.rs#L58 +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum TokenKind { + /// Used when there's an error of some sort while lexing. + Unknown, + /// Examples: `12u8`, `1.0e-40`, `b"123"`. Note that `_` is an invalid + /// suffix, but may be present here on string and float literals. Users of + /// this type will need to check for and reject that case. + /// + /// See [`LiteralKind`] for more details. + Literal { + kind: LiteralKind, + }, + /// Whitespace characters. + Space, + Tab, + VerticalTab, + FormFeed, + // Handles \n, \r, and sequences + LineEnding { + count: usize, + }, + /// Identifier + /// + /// case-sensitive + Ident, + /// `;` + Semi, + /// End of file + Eof, + /// `/` + Slash, + /// `\` + Backslash, + /// `-- foo` + LineComment, + /// ``` + /// /* + /// foo + /// */ + /// ``` + BlockComment { + terminated: bool, + }, + /// `-` + Minus, + /// `:` + Colon, + /// `.` + Dot, + /// `=` + Eq, + /// `>` + Gt, + /// `&` + And, + /// `<` + Lt, + /// `!` + Bang, + /// `+` + Plus, + /// `~` + Tilde, + /// `#` + Pound, + /// `?` + Question, + /// `|` + Or, + /// `%` + Percent, + /// `^` + Caret, + /// `*` + Star, + /// `` ` `` + Backtick, + /// `@` + At, + /// `]` + CloseBracket, + /// `[` + OpenBracket, + /// `)` + CloseParen, + /// `(` + OpenParen, + /// `,` + Comma, + /// Error case that we need to report later on. + UnknownPrefix, + /// Positional Parameter, e.g., `$1` + /// + /// see: + PositionalParam, + /// Quoted Identifier, e.g., `"update"` in `update "my_table" set "a" = 5;` + /// + /// These are case-sensitive, unlike [`TokenKind::Ident`] + /// + /// see: + QuotedIdent { + terminated: bool, + }, +} + +/// Parsed token. +/// It doesn't contain information about data that has been parsed, +/// only the type of the token and its size. +#[derive(Debug, Clone, Copy)] +pub struct Token { + pub kind: TokenKind, + pub len: u32, +} + +impl Token { + pub(crate) fn new(kind: TokenKind, len: u32) -> Token { + Token { kind, len } + } +} + +/// Base of numeric literal encoding according to its prefix. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum Base { + /// Literal starts with "0b". + Binary = 2, + /// Literal starts with "0o". + Octal = 8, + /// Literal doesn't contain a prefix. + Decimal = 10, + /// Literal starts with "0x". + Hexadecimal = 16, +} + +// Enum representing the literal types supported by the lexer. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum LiteralKind { + /// Integer Numeric, e.g., `42` + /// + /// see: + Int { base: Base, empty_int: bool }, + /// Float Numeric, e.g., `1.925e-3` + /// + /// see: + Float { base: Base, empty_exponent: bool }, + /// String, e.g., `'foo'` + /// + /// see: + Str { terminated: bool }, + /// Hexidecimal Bit String, e.g., `X'1FF'` + /// + /// see: + ByteStr { terminated: bool }, + /// Bit String, e.g., `B'1001'` + /// + /// see: + BitStr { terminated: bool }, + /// Dollar Quoted String, e.g., `$$Dianne's horse$$` + /// + /// see: + DollarQuotedString { terminated: bool }, + /// Unicode Escape String, e.g., `U&'d\0061t\+000061'` + /// + /// see: + UnicodeEscStr { terminated: bool }, + /// Escape String, e.g, `E'foo'` + /// + /// see: + EscStr { terminated: bool }, +} diff --git a/crates/pgt_workspace/src/workspace/server/annotation.rs b/crates/pgt_workspace/src/workspace/server/annotation.rs index 2fdf32eb..db6a8b3b 100644 --- a/crates/pgt_workspace/src/workspace/server/annotation.rs +++ b/crates/pgt_workspace/src/workspace/server/annotation.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use dashmap::DashMap; -use pgt_lexer::{SyntaxKind, WHITESPACE_TOKENS}; +use pgt_lexer::SyntaxKind; use super::statement_identifier::StatementId; @@ -11,9 +11,18 @@ pub struct StatementAnnotations { } pub struct AnnotationStore { - db: DashMap>>, + db: DashMap>, } +const WHITESPACE_TOKENS: [SyntaxKind; 6] = [ + SyntaxKind::SPACE, + SyntaxKind::TAB, + SyntaxKind::VERTICAL_TAB, + SyntaxKind::FORM_FEED, + SyntaxKind::LINE_ENDING, + SyntaxKind::EOF, +]; + impl AnnotationStore { pub fn new() -> AnnotationStore { AnnotationStore { db: DashMap::new() } @@ -24,26 +33,26 @@ impl AnnotationStore { &self, statement_id: &StatementId, content: &str, - ) -> Option> { + ) -> Arc { if let Some(existing) = self.db.get(statement_id).map(|x| x.clone()) { return existing; } - // we swallow the error here because the lexing within the document would have already - // thrown and we wont even get here if that happened. - let annotations = pgt_lexer::lex(content).ok().map(|tokens| { - let ends_with_semicolon = tokens - .iter() - .rev() - .find(|token| !WHITESPACE_TOKENS.contains(&token.kind)) - .is_some_and(|token| token.kind == SyntaxKind::Ascii59); - - Arc::new(StatementAnnotations { - ends_with_semicolon, - }) + let lexed = pgt_lexer::lex(content); + + let ends_with_semicolon = (0..lexed.len()) + // Iterate through tokens in reverse to find the last non-whitespace token + .filter(|t| !WHITESPACE_TOKENS.contains(&lexed.kind(*t))) + .next_back() + .map(|t| lexed.kind(t) == SyntaxKind::SEMICOLON) + .unwrap_or(false); + + let annotations = Arc::new(StatementAnnotations { + ends_with_semicolon, }); - self.db.insert(statement_id.clone(), None); + self.db.insert(statement_id.clone(), annotations.clone()); + annotations } @@ -80,8 +89,7 @@ mod tests { let annotations = store.get_annotations(&statement_id, content); - assert!(annotations.is_some()); - assert_eq!(annotations.unwrap().ends_with_semicolon, *expected); + assert_eq!(annotations.ends_with_semicolon, *expected); } } } diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index 62e3da03..cc455134 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -445,9 +445,7 @@ fn get_affected(content: &str, range: TextRange) -> &str { #[cfg(test)] mod tests { - use super::*; - use pgt_diagnostics::Diagnostic; use pgt_text_size::TextRange; use crate::workspace::{ChangeFileParams, ChangeParams}; @@ -462,9 +460,7 @@ mod tests { } fn assert_document_integrity(d: &Document) { - let ranges = pgt_statement_splitter::split(&d.content) - .expect("Unexpected scan error") - .ranges; + let ranges = pgt_statement_splitter::split(&d.content).ranges; assert!( ranges.len() == d.positions.len(), @@ -479,16 +475,6 @@ mod tests { ); } - #[test] - fn open_doc_with_scan_error() { - let input = "select id from users;\n\n\n\nselect 1443ddwwd33djwdkjw13331333333333;"; - - let d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 0); - assert!(d.has_fatal_error()); - } - #[test] fn comments_at_begin() { let path = PgTPath::new("test.sql"); @@ -621,149 +607,6 @@ mod tests { assert_document_integrity(&d); } - #[test] - fn change_into_scan_error_within_statement() { - let path = PgTPath::new("test.sql"); - let input = "select id from users;\n\n\n\nselect 1;"; - - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 2); - assert!(!d.has_fatal_error()); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "d".to_string(), - range: Some(TextRange::new(33.into(), 33.into())), - }], - }; - - let changed = d.apply_file_change(&change); - - assert_eq!(d.content, "select id from users;\n\n\n\nselect 1d;"); - assert!( - changed - .iter() - .all(|c| matches!(c, StatementChange::Deleted(_))), - "should delete all statements" - ); - assert!(d.positions.is_empty(), "should clear all positions"); - assert_eq!(d.diagnostics.len(), 1, "should return a scan error"); - assert_eq!( - d.diagnostics[0].location().span, - Some(TextRange::new(32.into(), 34.into())), - "should have correct span" - ); - assert!(d.has_fatal_error()); - } - - #[test] - fn change_into_scan_error_across_statements() { - let path = PgTPath::new("test.sql"); - let input = "select id from users;\n\n\n\nselect 1;"; - - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 2); - assert!(!d.has_fatal_error()); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "1d".to_string(), - range: Some(TextRange::new(7.into(), 33.into())), - }], - }; - - let changed = d.apply_file_change(&change); - - assert_eq!(d.content, "select 1d;"); - assert!( - changed - .iter() - .all(|c| matches!(c, StatementChange::Deleted(_))), - "should delete all statements" - ); - assert!(d.positions.is_empty(), "should clear all positions"); - assert_eq!(d.diagnostics.len(), 1, "should return a scan error"); - assert_eq!( - d.diagnostics[0].location().span, - Some(TextRange::new(7.into(), 9.into())), - "should have correct span" - ); - assert!(d.has_fatal_error()); - } - - #[test] - fn change_from_invalid_to_invalid() { - let path = PgTPath::new("test.sql"); - let input = "select 1d;"; - - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 0); - assert!(d.has_fatal_error()); - assert_eq!(d.diagnostics.len(), 1); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "2e".to_string(), - range: Some(TextRange::new(7.into(), 9.into())), - }], - }; - - let changed = d.apply_file_change(&change); - - assert_eq!(d.content, "select 2e;"); - assert!(changed.is_empty(), "should not emit any changes"); - assert!(d.positions.is_empty(), "should keep positions empty"); - assert_eq!(d.diagnostics.len(), 1, "should still have a scan error"); - assert_eq!( - d.diagnostics[0].location().span, - Some(TextRange::new(7.into(), 9.into())), - "should have updated span" - ); - assert!(d.has_fatal_error()); - } - - #[test] - fn change_from_invalid_to_valid() { - let path = PgTPath::new("test.sql"); - let input = "select 1d;"; - - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 0); - assert!(d.has_fatal_error()); - assert_eq!(d.diagnostics.len(), 1); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "1".to_string(), - range: Some(TextRange::new(7.into(), 9.into())), - }], - }; - - let changed = d.apply_file_change(&change); - - assert_eq!(d.content, "select 1;"); - assert_eq!(changed.len(), 1, "should emit one change"); - assert!(matches!( - changed[0], - StatementChange::Added(AddedStatement { .. }) - )); - assert_eq!(d.positions.len(), 1, "should have one position"); - assert!(d.diagnostics.is_empty(), "should have no diagnostics"); - assert!(!d.has_fatal_error()); - } - #[test] fn within_statements() { let path = PgTPath::new("test.sql"); diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index ed0ca40f..89516b23 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -62,32 +62,21 @@ pub(crate) fn split_with_diagnostics( offset: Option, ) -> (Vec, Vec) { let o = offset.unwrap_or_else(|| 0.into()); - match pgt_statement_splitter::split(content) { - Ok(parse) => ( - parse.ranges, - parse - .errors - .into_iter() - .map(|err| { - SDiagnostic::new( - err.clone() - .with_file_span(err.location().span.map(|r| r + o)), - ) - }) - .collect(), - ), - Err(errs) => ( - vec![], - errs.into_iter() - .map(|err| { - SDiagnostic::new( - err.clone() - .with_file_span(err.location().span.map(|r| r + o)), - ) - }) - .collect(), - ), - } + let result = pgt_statement_splitter::split(content); + + ( + result.ranges, + result + .errors + .into_iter() + .map(|err| { + SDiagnostic::new( + err.clone() + .with_file_span(err.location().span.map(|r| r + o)), + ) + }) + .collect(), + ) } pub struct StatementIterator<'a> { diff --git a/docs/codegen/src/rules_docs.rs b/docs/codegen/src/rules_docs.rs index 92f0dc42..68db53db 100644 --- a/docs/codegen/src/rules_docs.rs +++ b/docs/codegen/src/rules_docs.rs @@ -442,7 +442,7 @@ fn print_diagnostics( }); // split and parse each statement - let stmts = pgt_statement_splitter::split(code).expect("unexpected parse error"); + let stmts = pgt_statement_splitter::split(code); for stmt in stmts.ranges { match pgt_query_ext::parse(&code[stmt]) { Ok(ast) => { diff --git a/xtask/rules_check/src/lib.rs b/xtask/rules_check/src/lib.rs index 68a6d650..da4b4c73 100644 --- a/xtask/rules_check/src/lib.rs +++ b/xtask/rules_check/src/lib.rs @@ -126,52 +126,47 @@ fn assert_lint( filter, }); - // split and parse each statement - match pgt_statement_splitter::split(code) { - Ok(stmts) => { - for stmt in stmts.ranges { - match pgt_query_ext::parse(&code[stmt]) { - Ok(ast) => { - for rule_diag in analyser.run(pgt_analyser::AnalyserContext { root: &ast }) - { - let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); - - let category = diag.category().expect("linter diagnostic has no code"); - let severity = settings.get_severity_from_rule_code(category).expect( + let result = pgt_statement_splitter::split(code); + for stmt in result.ranges { + match pgt_query_ext::parse(&code[stmt]) { + Ok(ast) => { + for rule_diag in analyser.run(pgt_analyser::AnalyserContext { root: &ast }) { + let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); + + let category = diag.category().expect("linter diagnostic has no code"); + let severity = settings.get_severity_from_rule_code(category).expect( "If you see this error, it means you need to run cargo codegen-configuration", ); - let error = diag - .with_severity(severity) - .with_file_path(&file_path) - .with_file_source_code(code); - - write_diagnostic(code, error)?; - } - } - Err(e) => { - let error = SyntaxDiagnostic::from(e) - .with_file_path(&file_path) - .with_file_source_code(code); - write_diagnostic(code, error)?; - } - }; + let error = diag + .with_severity(severity) + .with_file_path(&file_path) + .with_file_source_code(code); + + write_diagnostic(code, error)?; + } } - } - Err(errs) => { - // Print all diagnostics to help the user - let mut console = pgt_console::EnvConsole::default(); - for err in errs { - console.println( - pgt_console::LogLevel::Error, - markup! { - {PrintDiagnostic::verbose(&err)} - }, - ); + Err(e) => { + let error = SyntaxDiagnostic::from(e) + .with_file_path(&file_path) + .with_file_source_code(code); + write_diagnostic(code, error)?; } - bail!("Analysis of '{group}/{rule}' on the following code block returned a scan diagnostic.\n\n{code}"); + }; + } + if !result.errors.is_empty() { + // Print all diagnostics to help the user + let mut console = pgt_console::EnvConsole::default(); + for err in result.errors { + console.println( + pgt_console::LogLevel::Error, + markup! { + {PrintDiagnostic::verbose(&err)} + }, + ); } - }; + bail!("Analysis of '{group}/{rule}' on the following code block returned a scan diagnostic.\n\n{code}"); + } Ok(()) } From ffeb33f678dced3667586753287d946071ff1dc1 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Mon, 7 Jul 2025 08:41:00 +0200 Subject: [PATCH 084/114] feat: linter suppressions (#440) --- Cargo.lock | 11 + Cargo.toml | 1 + crates/pgt_analyse/src/categories.rs | 21 + crates/pgt_diagnostics/src/location.rs | 4 +- crates/pgt_suppressions/Cargo.toml | 18 + crates/pgt_suppressions/src/lib.rs | 351 ++++++++++++++ crates/pgt_suppressions/src/line_index.rs | 43 ++ crates/pgt_suppressions/src/parser.rs | 353 ++++++++++++++ crates/pgt_suppressions/src/suppression.rs | 459 ++++++++++++++++++ crates/pgt_workspace/Cargo.toml | 1 + crates/pgt_workspace/src/workspace/server.rs | 20 + .../src/workspace/server/analyser.rs | 1 + .../src/workspace/server/change.rs | 2 + .../src/workspace/server/document.rs | 6 + .../src/workspace/server/parsed_document.rs | 5 + 15 files changed, 1294 insertions(+), 2 deletions(-) create mode 100644 crates/pgt_suppressions/Cargo.toml create mode 100644 crates/pgt_suppressions/src/lib.rs create mode 100644 crates/pgt_suppressions/src/line_index.rs create mode 100644 crates/pgt_suppressions/src/parser.rs create mode 100644 crates/pgt_suppressions/src/suppression.rs diff --git a/Cargo.lock b/Cargo.lock index 074ed19b..ce816de8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2867,6 +2867,16 @@ dependencies = [ "regex", ] +[[package]] +name = "pgt_suppressions" +version = "0.0.0" +dependencies = [ + "pgt_analyse", + "pgt_diagnostics", + "pgt_text_size", + "tracing", +] + [[package]] name = "pgt_test_macros" version = "0.0.0" @@ -2976,6 +2986,7 @@ dependencies = [ "pgt_query_ext", "pgt_schema_cache", "pgt_statement_splitter", + "pgt_suppressions", "pgt_text_size", "pgt_typecheck", "rustc-hash 2.1.0", diff --git a/Cargo.toml b/Cargo.toml index b5d6dd01..15c6f02f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,6 +78,7 @@ pgt_query_ext = { path = "./crates/pgt_query_ext", version = "0.0.0 pgt_query_proto_parser = { path = "./crates/pgt_query_proto_parser", version = "0.0.0" } pgt_schema_cache = { path = "./crates/pgt_schema_cache", version = "0.0.0" } pgt_statement_splitter = { path = "./crates/pgt_statement_splitter", version = "0.0.0" } +pgt_suppressions = { path = "./crates/pgt_suppressions", version = "0.0.0" } pgt_text_edit = { path = "./crates/pgt_text_edit", version = "0.0.0" } pgt_text_size = { path = "./crates/pgt_text_size", version = "0.0.0" } pgt_tokenizer = { path = "./crates/pgt_tokenizer", version = "0.0.0" } diff --git a/crates/pgt_analyse/src/categories.rs b/crates/pgt_analyse/src/categories.rs index e5dd51c2..02819a4e 100644 --- a/crates/pgt_analyse/src/categories.rs +++ b/crates/pgt_analyse/src/categories.rs @@ -16,6 +16,27 @@ pub enum RuleCategory { Transformation, } +impl TryFrom for RuleCategory { + type Error = String; + + fn try_from(value: String) -> Result { + value.as_str().try_into() + } +} + +impl TryFrom<&str> for RuleCategory { + type Error = String; + + fn try_from(value: &str) -> Result { + match value { + "lint" => Ok(Self::Lint), + "action" => Ok(Self::Action), + "transformation" => Ok(Self::Transformation), + _ => Err(format!("Invalid Rule Category: {}", value)), + } + } +} + /// Actions that suppress rules should start with this string pub const SUPPRESSION_ACTION_CATEGORY: &str = "quickfix.suppressRule"; diff --git a/crates/pgt_diagnostics/src/location.rs b/crates/pgt_diagnostics/src/location.rs index cbd8e646..e17ace9c 100644 --- a/crates/pgt_diagnostics/src/location.rs +++ b/crates/pgt_diagnostics/src/location.rs @@ -41,13 +41,13 @@ impl Eq for Location<'_> {} #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] #[serde(rename_all = "camelCase")] -pub enum Resource

{ +pub enum Resource { /// The diagnostic is related to the content of the command line arguments. Argv, /// The diagnostic is related to the content of a memory buffer. Memory, /// The diagnostic is related to a file on the filesystem. - File(P), + File(Path), } impl

Resource

{ diff --git a/crates/pgt_suppressions/Cargo.toml b/crates/pgt_suppressions/Cargo.toml new file mode 100644 index 00000000..ee723b3b --- /dev/null +++ b/crates/pgt_suppressions/Cargo.toml @@ -0,0 +1,18 @@ + +[package] +authors.workspace = true +categories.workspace = true +description = "Provides an API that parses suppressions from SQL files, and provides a way to check if a diagnostic is suppressed." +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +name = "pgt_suppressions" +repository.workspace = true +version = "0.0.0" + +[dependencies] +pgt_analyse = { workspace = true } +pgt_diagnostics = { workspace = true } +pgt_text_size = { workspace = true } +tracing = { workspace = true } diff --git a/crates/pgt_suppressions/src/lib.rs b/crates/pgt_suppressions/src/lib.rs new file mode 100644 index 00000000..2577ea41 --- /dev/null +++ b/crates/pgt_suppressions/src/lib.rs @@ -0,0 +1,351 @@ +use std::collections::HashMap; +pub mod parser; +pub mod suppression; + +use pgt_analyse::RuleFilter; +use pgt_diagnostics::{Diagnostic, MessageAndDescription}; + +pub mod line_index; + +use line_index::LineIndex; + +use crate::{ + parser::SuppressionsParser, + suppression::{RangeSuppression, RuleSpecifier, Suppression, SuppressionDiagnostic}, +}; + +type Line = usize; + +#[derive(Debug, Default, Clone)] +pub struct Suppressions { + file_suppressions: Vec, + line_suppressions: std::collections::HashMap, + range_suppressions: Vec, + pub diagnostics: Vec, + line_index: LineIndex, +} + +impl From<&str> for Suppressions { + fn from(doc: &str) -> Self { + SuppressionsParser::parse(doc) + } +} +impl From for Suppressions { + fn from(doc: String) -> Self { + SuppressionsParser::parse(doc.as_str()) + } +} + +impl Suppressions { + /// Some diagnostics can be turned off via the configuration. + /// This will mark suppressions that try to suppress these disabled diagnostics as errors. + pub fn get_disabled_diagnostic_suppressions_as_errors( + &self, + disabled_rules: &[RuleFilter<'_>], + ) -> Vec { + let mut diagnostics = vec![]; + + { + let disabled = self + .file_suppressions + .iter() + .filter(|s| s.rule_specifier.is_disabled(disabled_rules)); + + for suppr in disabled { + diagnostics.push(suppr.to_disabled_diagnostic()); + } + } + + { + let disabled = self + .line_suppressions + .iter() + .filter(|(_, s)| s.rule_specifier.is_disabled(disabled_rules)); + + for (_, suppr) in disabled { + diagnostics.push(suppr.to_disabled_diagnostic()); + } + } + + { + let disabled = self.range_suppressions.iter().filter(|s| { + s.start_suppression + .rule_specifier + .is_disabled(disabled_rules) + }); + + for range_suppr in disabled { + diagnostics.push(range_suppr.start_suppression.to_disabled_diagnostic()); + } + } + + diagnostics + } + + pub fn get_unused_suppressions_as_errors( + &self, + diagnostics: &[D], + ) -> Vec { + let mut results = vec![]; + + let mut diagnostics_by_line: HashMap> = HashMap::new(); + for diag in diagnostics { + if let Some(line) = diag + .location() + .span + .and_then(|sp| self.line_index.line_for_offset(sp.start())) + { + let entry = diagnostics_by_line.entry(line); + entry + .and_modify(|current| { + current.push(diag); + }) + .or_insert(vec![diag]); + } + } + + // Users may use many suppressions for a single diagnostic, like so: + // ``` + // -- pgt-ignore lint/safety/banDropTable + // -- pgt-ignore lint/safety/banDropColumn + // + // ``` + // So to find a matching diagnostic for any suppression, we're moving + // down lines until we find a line where there's no suppression. + for (line, suppr) in &self.line_suppressions { + let mut expected_diagnostic_line = line + 1; + while self + .line_suppressions + .contains_key(&expected_diagnostic_line) + { + expected_diagnostic_line += 1; + } + + if diagnostics_by_line + .get(&expected_diagnostic_line) + .is_some_and(|diags| { + diags.iter().any(|d| { + d.category() + .is_some_and(|cat| match RuleSpecifier::try_from(cat.name()) { + Ok(spec) => suppr.matches(&spec), + Err(_) => false, + }) + }) + }) + { + continue; + } else { + results.push(SuppressionDiagnostic { + span: suppr.suppression_range, + message: MessageAndDescription::from( + "This suppression has no effect.".to_string(), + ), + }) + } + } + + results + } + + pub fn is_suppressed(&self, diagnostic: &D) -> bool { + diagnostic + .category() + .map(|c| match RuleSpecifier::try_from(c.name()) { + Ok(specifier) => { + self.by_file_suppression(&specifier) + || self.by_range_suppression(diagnostic, &specifier) + || self.by_line_suppression(diagnostic, &specifier) + } + Err(_) => false, + }) + .unwrap_or(false) + } + + fn by_file_suppression(&self, specifier: &RuleSpecifier) -> bool { + self.file_suppressions.iter().any(|s| s.matches(specifier)) + } + + fn by_line_suppression( + &self, + diagnostic: &D, + specifier: &RuleSpecifier, + ) -> bool { + self.get_eligible_line_suppressions_for_diagnostic(diagnostic) + .iter() + .any(|s| s.matches(specifier)) + } + + fn by_range_suppression( + &self, + diagnostic: &D, + specifier: &RuleSpecifier, + ) -> bool { + self.range_suppressions.iter().any(|range_suppr| { + range_suppr.start_suppression.matches(specifier) + && diagnostic + .location() + .span + .is_some_and(|sp| range_suppr.suppressed_range.contains_range(sp)) + }) + } + + fn get_eligible_line_suppressions_for_diagnostic( + &self, + diagnostic: &D, + ) -> Vec<&Suppression> { + diagnostic + .location() + .span + .and_then(|span| self.line_index.line_for_offset(span.start())) + .filter(|line_no| *line_no > 0) + .map(|mut line_no| { + let mut eligible = vec![]; + + // one-for-one, we're checking the lines above a diagnostic location + // until there are no more suppressions + line_no -= 1; + while let Some(suppr) = self.line_suppressions.get(&line_no) { + eligible.push(suppr); + line_no -= 1; + } + + eligible + }) + .unwrap_or_default() + } +} + +#[cfg(test)] +mod tests { + use pgt_diagnostics::{Diagnostic, MessageAndDescription}; + use pgt_text_size::TextRange; + + use crate::suppression::SuppressionDiagnostic; + + #[derive(Clone, Debug, Diagnostic)] + #[diagnostic(category = "lint", severity = Error)] + pub struct TestDiagnostic { + #[location(span)] + pub span: TextRange, + } + + #[test] + fn correctly_suppresses_diagnostics_at_top_level() { + let doc = r#" + -- pgt-ignore-all lint + + select 1; + "#; + + let len_doc: u32 = doc.len().try_into().unwrap(); + + let suppressions = super::Suppressions::from(doc); + + assert!(suppressions.is_suppressed(&TestDiagnostic { + span: TextRange::new((len_doc - 10).into(), len_doc.into()), + })); + } + + #[test] + fn correctly_suppresses_diagnostics_at_line() { + let doc = r#" + select 2; + + -- pgt-ignore lint + select 1; + "#; + + let suppressions = super::Suppressions::from(doc); + + assert!(suppressions.is_suppressed(&TestDiagnostic { + span: TextRange::new(67.into(), 76.into()), + })); + } + + #[test] + fn correctly_suppresses_with_multiple_line_diagnostics() { + let doc = r#" + select 2; + + -- pgt-ignore lint + -- pgt-ignore syntax + select 1; + "#; + + let suppressions = super::Suppressions::from(doc); + + assert!(suppressions.is_suppressed(&TestDiagnostic { + span: TextRange::new(100.into(), 109.into()), + })); + } + + #[test] + fn correctly_suppresses_diagnostics_with_ranges() { + let doc = r#" + select 2; + + -- pgt-ignore-start lint + select 1; + -- pgt-ignore-end lint + "#; + + let suppressions = super::Suppressions::from(doc); + + assert!(suppressions.is_suppressed(&TestDiagnostic { + span: TextRange::new(73.into(), 82.into()), + })); + } + + #[test] + fn marks_disabled_rule_suppressions_as_errors() { + let doc = r#" + select 2; + + -- pgt-ignore lint/safety/banDropTable + select 1; + "#; + + let suppressions = super::Suppressions::from(doc); + + let disabled_diagnostics = suppressions.get_disabled_diagnostic_suppressions_as_errors(&[ + pgt_analyse::RuleFilter::Group("safety"), + ]); + + assert_eq!(disabled_diagnostics.len(), 1); + + assert_eq!( + disabled_diagnostics[0], + SuppressionDiagnostic { + span: TextRange::new(36.into(), 74.into()), + message: MessageAndDescription::from("This rule has been disabled via the configuration. The suppression has no effect.".to_string()) + } + ); + } + + #[test] + fn marks_unused_suppressions_as_errors() { + let doc = r#" + select 2; + + -- pgt-ignore lint + select 1; + "#; + + // no diagnostics + let diagnostics: Vec = vec![]; + + let suppressions = super::Suppressions::from(doc); + + let unused_diagnostics = suppressions.get_unused_suppressions_as_errors(&diagnostics); + + assert_eq!(unused_diagnostics.len(), 1); + + assert_eq!( + unused_diagnostics[0], + SuppressionDiagnostic { + span: TextRange::new(36.into(), 54.into()), + message: MessageAndDescription::from("This suppression has no effect.".to_string()) + } + ); + } +} diff --git a/crates/pgt_suppressions/src/line_index.rs b/crates/pgt_suppressions/src/line_index.rs new file mode 100644 index 00000000..16af72dd --- /dev/null +++ b/crates/pgt_suppressions/src/line_index.rs @@ -0,0 +1,43 @@ +use pgt_text_size::TextSize; + +#[derive(Debug, Default, Clone)] +pub(crate) struct LineIndex { + line_offset: Vec, +} + +impl LineIndex { + pub fn new(doc: &str) -> Self { + let line_offset = std::iter::once(0) + .chain(doc.match_indices(&['\n', '\r']).filter_map(|(i, _)| { + let bytes = doc.as_bytes(); + + match bytes[i] { + // Filter out the `\r` in `\r\n` to avoid counting the line break twice + b'\r' if i + 1 < bytes.len() && bytes[i + 1] == b'\n' => None, + _ => Some(i + 1), + } + })) + .map(|i| TextSize::try_from(i).expect("integer overflow")) + .collect(); + + Self { line_offset } + } + + pub fn offset_for_line(&self, idx: usize) -> Option<&pgt_text_size::TextSize> { + self.line_offset.get(idx) + } + + pub fn line_for_offset(&self, offset: TextSize) -> Option { + self.line_offset + .iter() + .enumerate() + .filter_map(|(i, line_offset)| { + if offset >= *line_offset { + Some(i) + } else { + None + } + }) + .next_back() + } +} diff --git a/crates/pgt_suppressions/src/parser.rs b/crates/pgt_suppressions/src/parser.rs new file mode 100644 index 00000000..663e52fe --- /dev/null +++ b/crates/pgt_suppressions/src/parser.rs @@ -0,0 +1,353 @@ +use std::{ + iter::{Enumerate, Peekable}, + str::Lines, +}; + +use pgt_diagnostics::MessageAndDescription; +use pgt_text_size::TextRange; + +use crate::{ + Suppressions, + line_index::LineIndex, + suppression::{RangeSuppression, Suppression, SuppressionDiagnostic, SuppressionKind}, +}; + +#[derive(Debug)] +pub(crate) struct SuppressionsParser<'a> { + file_suppressions: Vec, + line_suppressions: std::collections::HashMap, + range_suppressions: Vec, + diagnostics: Vec, + lines: Peekable>>, + line_index: LineIndex, + + start_suppressions_stack: Vec, +} + +impl<'a> SuppressionsParser<'a> { + pub fn new(doc: &'a str) -> Self { + let lines = doc.lines().enumerate().peekable(); + + Self { + file_suppressions: vec![], + line_suppressions: std::collections::HashMap::default(), + range_suppressions: vec![], + diagnostics: vec![], + lines, + line_index: LineIndex::new(doc), + start_suppressions_stack: vec![], + } + } + + pub fn parse(doc: &str) -> Suppressions { + let mut parser = SuppressionsParser::new(doc); + + parser.parse_file_suppressions(); + parser.parse_suppressions(); + parser.handle_unmatched_start_suppressions(); + + Suppressions { + file_suppressions: parser.file_suppressions, + line_suppressions: parser.line_suppressions, + range_suppressions: parser.range_suppressions, + diagnostics: parser.diagnostics, + line_index: parser.line_index, + } + } + + /// Will parse the suppressions at the start of the file. + /// As soon as anything is encountered that's not a `pgt-ignore-all` + /// suppression or an empty line, this will stop. + fn parse_file_suppressions(&mut self) { + while let Some((_, preview)) = self.lines.peek() { + if preview.trim().is_empty() { + self.lines.next(); + continue; + } + + if !preview.trim().starts_with("-- pgt-ignore-all") { + return; + } + + let (idx, line) = self.lines.next().unwrap(); + + let offset = self.line_index.offset_for_line(idx).unwrap(); + + match Suppression::from_line(line, offset) { + Ok(suppr) => self.file_suppressions.push(suppr), + Err(diag) => self.diagnostics.push(diag), + } + } + } + + fn parse_suppressions(&mut self) { + for (idx, line) in self.lines.by_ref() { + if !line.trim().starts_with("-- pgt-ignore") { + continue; + } + + let offset = self.line_index.offset_for_line(idx).unwrap(); + + let suppr = match Suppression::from_line(line, offset) { + Ok(suppr) => suppr, + Err(diag) => { + self.diagnostics.push(diag); + continue; + } + }; + + match suppr.kind { + SuppressionKind::File => { + self.diagnostics.push(SuppressionDiagnostic { + span: suppr.suppression_range, + message: MessageAndDescription::from( + "File suppressions should be at the top of the file.".to_string(), + ), + }); + } + + SuppressionKind::Line => { + self.line_suppressions.insert(idx, suppr); + } + + SuppressionKind::Start => self.start_suppressions_stack.push(suppr), + SuppressionKind::End => { + let matching_start_idx = self + .start_suppressions_stack + .iter() + .enumerate() + .filter_map(|(idx, s)| { + if s.rule_specifier == suppr.rule_specifier { + Some(idx) + } else { + None + } + }) + .next_back(); + + if let Some(start_idx) = matching_start_idx { + let start = self.start_suppressions_stack.remove(start_idx); + + let full_range = TextRange::new( + start.suppression_range.start(), + suppr.suppression_range.end(), + ); + + self.range_suppressions.push(RangeSuppression { + suppressed_range: full_range, + start_suppression: start, + }); + } else { + self.diagnostics.push(SuppressionDiagnostic { + span: suppr.suppression_range, + message: MessageAndDescription::from( + "This end suppression does not have a matching start.".to_string(), + ), + }); + } + } + } + } + } + + /// If we have `pgt-ignore-start` suppressions without matching end tags after parsing the entire file, + /// we'll report diagnostics for those. + fn handle_unmatched_start_suppressions(&mut self) { + let start_suppressions = std::mem::take(&mut self.start_suppressions_stack); + + for suppr in start_suppressions { + self.diagnostics.push(SuppressionDiagnostic { + span: suppr.suppression_range, + message: MessageAndDescription::from( + "This start suppression does not have a matching end.".to_string(), + ), + }); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::suppression::{RuleSpecifier, SuppressionKind}; + + #[test] + fn test_parse_line_suppressions() { + let doc = r#" +SELECT 1; +-- pgt-ignore lint/safety/banDropColumn +SELECT 2; +"#; + let suppressions = SuppressionsParser::parse(doc); + + // Should have a line suppression on line 1 (0-based index) + let suppression = suppressions + .line_suppressions + .get(&2) + .expect("no suppression found"); + + assert_eq!(suppression.kind, SuppressionKind::Line); + assert_eq!( + suppression.rule_specifier, + RuleSpecifier::Rule( + "lint".to_string(), + "safety".to_string(), + "banDropColumn".to_string() + ) + ); + } + + #[test] + fn test_parse_multiple_line_suppressions() { + let doc = r#" +SELECT 1; +-- pgt-ignore lint/safety/banDropColumn +-- pgt-ignore lint/safety/banDropTable +-- pgt-ignore lint/safety/banDropNotNull +"#; + + let suppressions = SuppressionsParser::parse(doc); + + assert_eq!(suppressions.line_suppressions.len(), 3); + + assert_eq!( + suppressions + .line_suppressions + .get(&2) + .unwrap() + .rule_specifier + .rule(), + Some("banDropColumn") + ); + + assert_eq!( + suppressions + .line_suppressions + .get(&3) + .unwrap() + .rule_specifier + .rule(), + Some("banDropTable") + ); + + assert_eq!( + suppressions + .line_suppressions + .get(&4) + .unwrap() + .rule_specifier + .rule(), + Some("banDropNotNull") + ); + } + + #[test] + fn parses_file_level_suppressions() { + let doc = r#" +-- pgt-ignore-all lint +-- pgt-ignore-all typecheck + +SELECT 1; +-- pgt-ignore-all lint/safety +"#; + + let suppressions = SuppressionsParser::parse(doc); + + assert_eq!(suppressions.diagnostics.len(), 1); + assert_eq!(suppressions.file_suppressions.len(), 2); + + assert_eq!( + suppressions.file_suppressions[0].rule_specifier, + RuleSpecifier::Category("lint".to_string()) + ); + assert_eq!( + suppressions.file_suppressions[1].rule_specifier, + RuleSpecifier::Category("typecheck".to_string()) + ); + + assert_eq!( + suppressions.diagnostics[0].message.to_string(), + String::from("File suppressions should be at the top of the file.") + ); + } + + #[test] + fn parses_range_suppressions() { + let doc = r#" +-- pgt-ignore-start lint/safety/banDropTable +drop table users; +drop table auth; +drop table posts; +-- pgt-ignore-end lint/safety/banDropTable +"#; + + let suppressions = SuppressionsParser::parse(doc); + + assert_eq!(suppressions.range_suppressions.len(), 1); + + assert_eq!( + suppressions.range_suppressions[0], + RangeSuppression { + suppressed_range: TextRange::new(1.into(), 141.into()), + start_suppression: Suppression { + kind: SuppressionKind::Start, + rule_specifier: RuleSpecifier::Rule( + "lint".to_string(), + "safety".to_string(), + "banDropTable".to_string() + ), + suppression_range: TextRange::new(1.into(), 45.into()), + explanation: None, + }, + } + ); + } + + #[test] + fn parses_range_suppressions_with_errors() { + let doc = r#" +-- pgt-ignore-start lint/safety/banDropTable +drop table users; +-- pgt-ignore-start lint/safety/banDropTable +drop table auth; +drop table posts; +-- pgt-ignore-end lint/safety/banDropTable +-- pgt-ignore-end lint/safety/banDropColumn +"#; + + let suppressions = SuppressionsParser::parse(doc); + + assert_eq!(suppressions.range_suppressions.len(), 1); + assert_eq!(suppressions.diagnostics.len(), 2); + + // the inner, nested start/end combination is recognized. + assert_eq!( + suppressions.range_suppressions[0], + RangeSuppression { + suppressed_range: TextRange::new(64.into(), 186.into()), + start_suppression: Suppression { + kind: SuppressionKind::Start, + rule_specifier: RuleSpecifier::Rule( + "lint".to_string(), + "safety".to_string(), + "banDropTable".to_string() + ), + suppression_range: TextRange::new(64.into(), 108.into()), + explanation: None, + }, + } + ); + + // the outer end is an error + assert_eq!( + suppressions.diagnostics[0].message.to_string(), + String::from("This end suppression does not have a matching start.") + ); + + // the outer start is an error + assert_eq!( + suppressions.diagnostics[1].message.to_string(), + String::from("This start suppression does not have a matching end.") + ); + } +} diff --git a/crates/pgt_suppressions/src/suppression.rs b/crates/pgt_suppressions/src/suppression.rs new file mode 100644 index 00000000..6ebaf25c --- /dev/null +++ b/crates/pgt_suppressions/src/suppression.rs @@ -0,0 +1,459 @@ +use pgt_analyse::RuleFilter; +use pgt_diagnostics::{Category, Diagnostic, MessageAndDescription}; +use pgt_text_size::{TextRange, TextSize}; + +/// A specialized diagnostic for the typechecker. +/// +/// Type diagnostics are always **errors**. +#[derive(Clone, Debug, Diagnostic, PartialEq)] +#[diagnostic(category = "lint", severity = Warning)] +pub struct SuppressionDiagnostic { + #[location(span)] + pub span: TextRange, + #[description] + #[message] + pub message: MessageAndDescription, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) enum SuppressionKind { + File, + Line, + Start, + End, +} + +#[derive(Debug, PartialEq, Clone, Eq)] +/// Represents the suppressed rule, as written in the suppression comment. +/// e.g. `lint/safety/banDropColumn`, or `lint/safety`, or just `lint`. +/// The format of a rule specifier string is `(/(/))`. +/// +/// `RuleSpecifier` can only be constructed from a `&str` that matches a valid +/// [pgt_diagnostics::Category]. +pub(crate) enum RuleSpecifier { + Category(String), + Group(String, String), + Rule(String, String, String), +} + +impl RuleSpecifier { + pub(crate) fn category(&self) -> &str { + match self { + RuleSpecifier::Category(rule_category) => rule_category, + RuleSpecifier::Group(rule_category, _) => rule_category, + RuleSpecifier::Rule(rule_category, _, _) => rule_category, + } + } + + pub(crate) fn group(&self) -> Option<&str> { + match self { + RuleSpecifier::Category(_) => None, + RuleSpecifier::Group(_, gr) => Some(gr), + RuleSpecifier::Rule(_, gr, _) => Some(gr), + } + } + + pub(crate) fn rule(&self) -> Option<&str> { + match self { + RuleSpecifier::Rule(_, _, ru) => Some(ru), + _ => None, + } + } + + pub(crate) fn is_disabled(&self, disabled_rules: &[RuleFilter<'_>]) -> bool { + // note: it is not possible to disable entire categories via the config + let group = self.group(); + let rule = self.rule(); + + disabled_rules.iter().any(|r| match r { + RuleFilter::Group(gr) => group.is_some_and(|specifier_group| specifier_group == *gr), + RuleFilter::Rule(gr, ru) => group.is_some_and(|specifier_group| { + rule.is_some_and(|specifier_rule| specifier_group == *gr && specifier_rule == *ru) + }), + }) + } +} + +impl From<&Category> for RuleSpecifier { + fn from(category: &Category) -> Self { + let mut specifiers = category.name().split('/').map(|s| s.to_string()); + + let category_str = specifiers.next(); + let group = specifiers.next(); + let rule = specifiers.next(); + + match (category_str, group, rule) { + (Some(c), Some(g), Some(r)) => RuleSpecifier::Rule(c, g, r), + (Some(c), Some(g), None) => RuleSpecifier::Group(c, g), + (Some(c), None, None) => RuleSpecifier::Category(c), + _ => unreachable!(), + } + } +} + +impl TryFrom<&str> for RuleSpecifier { + type Error = String; + + fn try_from(specifier_str: &str) -> Result { + let cat = specifier_str + .parse::<&Category>() + .map_err(|_| "Invalid rule.".to_string())?; + + Ok(RuleSpecifier::from(cat)) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct Suppression { + pub(crate) suppression_range: TextRange, + pub(crate) kind: SuppressionKind, + pub(crate) rule_specifier: RuleSpecifier, + #[allow(unused)] + pub(crate) explanation: Option, +} + +impl Suppression { + /// Creates a suppression from a suppression comment line. + /// The line start must match `-- pgt-ignore`, otherwise, this will panic. + /// Leading whitespace is ignored. + pub(crate) fn from_line(line: &str, offset: &TextSize) -> Result { + let start_trimmed = line.trim_ascii_start(); + let leading_whitespace_offset = line.len() - start_trimmed.len(); + let trimmed = start_trimmed.trim_ascii_end(); + + assert!( + start_trimmed.starts_with("-- pgt-ignore"), + "Only try parsing suppressions from lines starting with `-- pgt-ignore`." + ); + + let full_offset = *offset + TextSize::new(leading_whitespace_offset.try_into().unwrap()); + let span = TextRange::new( + full_offset, + pgt_text_size::TextSize::new(trimmed.len().try_into().unwrap()) + full_offset, + ); + + let (line, explanation) = match trimmed.split_once(':') { + Some((suppr, explanation)) => (suppr, Some(explanation.trim())), + None => (trimmed, None), + }; + + let mut parts = line.split_ascii_whitespace(); + + let _ = parts.next(); + let kind = match parts.next().unwrap() { + "pgt-ignore-all" => SuppressionKind::File, + "pgt-ignore-start" => SuppressionKind::Start, + "pgt-ignore-end" => SuppressionKind::End, + "pgt-ignore" => SuppressionKind::Line, + k => { + return Err(SuppressionDiagnostic { + span, + message: MessageAndDescription::from(format!( + "'{}' is not a valid suppression tag.", + k, + )), + }); + } + }; + + let specifier_str = match parts.next() { + Some(it) => it, + None => { + return Err(SuppressionDiagnostic { + span, + message: MessageAndDescription::from( + "You must specify which lints to suppress.".to_string(), + ), + }); + } + }; + + let rule_specifier = + RuleSpecifier::try_from(specifier_str).map_err(|e| SuppressionDiagnostic { + span, + message: MessageAndDescription::from(e), + })?; + + Ok(Self { + rule_specifier, + kind, + suppression_range: span, + explanation: explanation.map(|e| e.to_string()), + }) + } + + pub(crate) fn matches(&self, diagnostic_specifier: &RuleSpecifier) -> bool { + let d_category = diagnostic_specifier.category(); + let d_group = diagnostic_specifier.group(); + let d_rule = diagnostic_specifier.rule(); + + match &self.rule_specifier { + // Check if we suppress the entire category + RuleSpecifier::Category(cat) if cat == d_category => return true, + + // Check if we suppress the category & group + RuleSpecifier::Group(cat, group) => { + if cat == d_category && Some(group.as_str()) == d_group { + return true; + } + } + + // Check if we suppress the category & group & specific rule + RuleSpecifier::Rule(cat, group, rule) => { + if cat == d_category + && Some(group.as_str()) == d_group + && Some(rule.as_str()) == d_rule + { + return true; + } + } + + _ => {} + } + + false + } + + pub(crate) fn to_disabled_diagnostic(&self) -> SuppressionDiagnostic { + SuppressionDiagnostic { + span: self.suppression_range, + message: MessageAndDescription::from( + "This rule has been disabled via the configuration. The suppression has no effect." + .to_string(), + ), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct RangeSuppression { + pub(crate) suppressed_range: TextRange, + pub(crate) start_suppression: Suppression, +} + +#[cfg(test)] +mod tests { + use super::*; + use pgt_text_size::{TextRange, TextSize}; + + #[test] + fn test_suppression_from_line_rule() { + let line = "-- pgt-ignore lint/safety/banDropColumn: explanation"; + let offset = &TextSize::new(0); + let suppression = Suppression::from_line(line, offset).unwrap(); + + assert_eq!(suppression.kind, SuppressionKind::Line); + assert_eq!( + suppression.rule_specifier, + RuleSpecifier::Rule( + "lint".to_string(), + "safety".to_string(), + "banDropColumn".to_string() + ) + ); + assert_eq!(suppression.explanation.as_deref(), Some("explanation")); + } + + #[test] + fn test_suppression_from_line_group() { + let line = "-- pgt-ignore lint/safety: explanation"; + let offset = &TextSize::new(0); + let suppression = Suppression::from_line(line, offset).unwrap(); + + assert_eq!(suppression.kind, SuppressionKind::Line); + assert_eq!( + suppression.rule_specifier, + RuleSpecifier::Group("lint".to_string(), "safety".to_string()) + ); + assert_eq!(suppression.explanation.as_deref(), Some("explanation")); + } + + #[test] + fn test_suppression_from_line_category() { + let line = "-- pgt-ignore lint"; + let offset = &TextSize::new(0); + let suppression = Suppression::from_line(line, offset).unwrap(); + + assert_eq!(suppression.kind, SuppressionKind::Line); + assert_eq!( + suppression.rule_specifier, + RuleSpecifier::Category("lint".to_string()) + ); + } + + #[test] + fn test_suppression_from_line_category_with_explanation() { + let line = "-- pgt-ignore lint: explanation"; + let offset = &TextSize::new(0); + let suppression = Suppression::from_line(line, offset).unwrap(); + + assert_eq!(suppression.kind, SuppressionKind::Line); + assert_eq!( + suppression.rule_specifier, + RuleSpecifier::Category("lint".to_string()) + ); + assert_eq!(suppression.explanation.as_deref(), Some("explanation")); + } + + #[test] + fn test_suppression_from_line_file_kind() { + let line = "-- pgt-ignore-all lint/safety/banDropColumn: explanation"; + let offset = &TextSize::new(0); + let suppression = Suppression::from_line(line, offset).unwrap(); + + assert_eq!(suppression.kind, SuppressionKind::File); + assert_eq!( + suppression.rule_specifier, + RuleSpecifier::Rule( + "lint".to_string(), + "safety".to_string(), + "banDropColumn".to_string() + ) + ); + assert_eq!(suppression.explanation.as_deref(), Some("explanation")); + } + + #[test] + fn test_suppression_from_line_start_kind() { + let line = "-- pgt-ignore-start lint/safety/banDropColumn: explanation"; + let offset = &TextSize::new(0); + let suppression = Suppression::from_line(line, offset).unwrap(); + + assert_eq!(suppression.kind, SuppressionKind::Start); + assert_eq!( + suppression.rule_specifier, + RuleSpecifier::Rule( + "lint".to_string(), + "safety".to_string(), + "banDropColumn".to_string() + ) + ); + assert_eq!(suppression.explanation.as_deref(), Some("explanation")); + } + + #[test] + fn test_suppression_from_line_end_kind() { + let line = "-- pgt-ignore-end lint/safety/banDropColumn: explanation"; + let offset = &TextSize::new(0); + let suppression = Suppression::from_line(line, offset).unwrap(); + + assert_eq!(suppression.kind, SuppressionKind::End); + assert_eq!( + suppression.rule_specifier, + RuleSpecifier::Rule( + "lint".to_string(), + "safety".to_string(), + "banDropColumn".to_string() + ) + ); + assert_eq!(suppression.explanation.as_deref(), Some("explanation")); + } + + #[test] + fn test_suppression_span_with_offset() { + let line = " \n-- pgt-ignore lint/safety/banDropColumn: explanation"; + let offset = TextSize::new(5); + let suppression = Suppression::from_line(line, &offset).unwrap(); + + let expected_start = offset + TextSize::new(5); + let expected_len = TextSize::new(line.trim_ascii().len() as u32); + + let expected_end = expected_start + expected_len; + let expected_span = TextRange::new(expected_start, expected_end); + + assert_eq!(suppression.suppression_range, expected_span); + } + + #[test] + fn test_suppression_from_line_invalid_tag_and_missing_specifier() { + let lines = vec![ + "-- pgt-ignore-foo lint/safety/banDropColumn: explanation", + "-- pgt-ignore foo lint/safety/banDropColumn: explanation", + "-- pgt-ignore xyz lint/safety/banDropColumn: explanation", + "-- pgt-ignore", + ]; + let offset = &TextSize::new(0); + for line in lines { + let result = Suppression::from_line(line, offset); + assert!(result.is_err(), "Expected error for line: {}", line); + } + } + + #[test] + fn test_suppression_matches() { + let cases = vec![ + // the category works for all groups & rules + ("-- pgt-ignore lint", "lint/safety/banDropNotNull", true), + ("-- pgt-ignore lint", "lint/safety/banDropColumn", true), + // the group works for all rules in that group + ( + "-- pgt-ignore lint/safety", + "lint/safety/banDropColumn", + true, + ), + ("-- pgt-ignore lint", "typecheck", false), + ("-- pgt-ignore lint/safety", "typecheck", false), + // a specific supppression only works for that same rule + ( + "-- pgt-ignore lint/safety/banDropColumn", + "lint/safety/banDropColumn", + true, + ), + ( + "-- pgt-ignore lint/safety/banDropColumn", + "lint/safety/banDropTable", + false, + ), + ]; + + let offset = &TextSize::new(0); + + for (suppr_line, specifier_str, expected) in cases { + let suppression = Suppression::from_line(suppr_line, offset).unwrap(); + let specifier = RuleSpecifier::try_from(specifier_str).unwrap(); + assert_eq!( + suppression.matches(&specifier), + expected, + "Suppression line '{}' vs specifier '{}' should be {}", + suppr_line, + specifier_str, + expected + ); + } + } + + #[test] + fn test_rule_specifier_is_disabled() { + use pgt_analyse::RuleFilter; + + // Group filter disables all rules in that group + let spec = RuleSpecifier::Rule( + "lint".to_string(), + "safety".to_string(), + "banDropColumn".to_string(), + ); + let disabled = vec![RuleFilter::Group("safety")]; + assert!(spec.is_disabled(&disabled)); + + let spec2 = RuleSpecifier::Rule( + "lint".to_string(), + "safety".to_string(), + "banDropColumn".to_string(), + ); + let disabled2 = vec![RuleFilter::Rule("safety", "banDropColumn")]; + assert!(spec2.is_disabled(&disabled2)); + + let disabled3 = vec![RuleFilter::Rule("safety", "otherRule")]; + assert!(!spec2.is_disabled(&disabled3)); + + let disabled4 = vec![RuleFilter::Group("perf")]; + assert!(!spec.is_disabled(&disabled4)); + + // one match is enough + let disabled5 = vec![ + RuleFilter::Group("perf"), + RuleFilter::Rule("safety", "banDropColumn"), + ]; + assert!(spec.is_disabled(&disabled5)); + } +} diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index bfa413e3..6b0cc065 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -29,6 +29,7 @@ pgt_lexer = { workspace = true } pgt_query_ext = { workspace = true } pgt_schema_cache = { workspace = true } pgt_statement_splitter = { workspace = true } +pgt_suppressions = { workspace = true } pgt_text_size.workspace = true pgt_typecheck = { workspace = true } rustc-hash = { workspace = true } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index d0c8d13a..5f2a9ebf 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -570,6 +570,26 @@ impl Workspace for WorkspaceServer { }, )); + let suppressions = parser.document_suppressions(); + + let disabled_suppression_errors = + suppressions.get_disabled_diagnostic_suppressions_as_errors(&disabled_rules); + + let unused_suppression_errors = + suppressions.get_unused_suppressions_as_errors(&diagnostics); + + let suppression_errors: Vec = suppressions + .diagnostics + .iter() + .chain(disabled_suppression_errors.iter()) + .chain(unused_suppression_errors.iter()) + .cloned() + .map(Error::from) + .collect::>(); + + diagnostics.retain(|d| !suppressions.is_suppressed(d)); + diagnostics.extend(suppression_errors.into_iter().map(SDiagnostic::new)); + let errors = diagnostics .iter() .filter(|d| d.severity() == Severity::Error || d.severity() == Severity::Fatal) diff --git a/crates/pgt_workspace/src/workspace/server/analyser.rs b/crates/pgt_workspace/src/workspace/server/analyser.rs index 4defc79e..86e3d076 100644 --- a/crates/pgt_workspace/src/workspace/server/analyser.rs +++ b/crates/pgt_workspace/src/workspace/server/analyser.rs @@ -75,6 +75,7 @@ impl<'a, 'b> LintVisitor<'a, 'b> { .as_linter_rules() .map(|rules| rules.as_enabled_rules()) .unwrap_or_default(); + self.enabled_rules.extend(enabled_rules); let disabled_rules = self diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs index cc455134..d4a39aa3 100644 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ b/crates/pgt_workspace/src/workspace/server/change.rs @@ -1,3 +1,4 @@ +use pgt_suppressions::Suppressions; use pgt_text_size::{TextLen, TextRange, TextSize}; use std::ops::{Add, Sub}; @@ -85,6 +86,7 @@ impl Document { } self.version = change.version; + self.suppressions = Suppressions::from(self.content.as_str()); changes } diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index 89516b23..28514f51 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -1,4 +1,5 @@ use pgt_diagnostics::{Diagnostic, DiagnosticExt, Severity, serde::Diagnostic as SDiagnostic}; +use pgt_suppressions::Suppressions; use pgt_text_size::{TextRange, TextSize}; use super::statement_identifier::{StatementId, StatementIdGenerator}; @@ -10,6 +11,8 @@ pub(crate) struct Document { pub(crate) version: i32, pub(super) diagnostics: Vec, + pub(super) suppressions: Suppressions, + /// List of statements sorted by range.start() pub(super) positions: Vec, @@ -22,6 +25,8 @@ impl Document { let (ranges, diagnostics) = split_with_diagnostics(&content, None); + let suppressions = Suppressions::from(content.as_str()); + Self { positions: ranges .into_iter() @@ -31,6 +36,7 @@ impl Document { version, diagnostics, id_generator, + suppressions, } } diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs index 2b81faba..8b515128 100644 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ b/crates/pgt_workspace/src/workspace/server/parsed_document.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use pgt_diagnostics::serde::Diagnostic as SDiagnostic; use pgt_fs::PgTPath; use pgt_query_ext::diagnostics::SyntaxDiagnostic; +use pgt_suppressions::Suppressions; use pgt_text_size::{TextRange, TextSize}; use crate::workspace::ChangeFileParams; @@ -98,6 +99,10 @@ impl ParsedDocument { &self.doc.diagnostics } + pub fn document_suppressions(&self) -> &Suppressions { + &self.doc.suppressions + } + pub fn find<'a, M>(&'a self, id: StatementId, mapper: M) -> Option where M: StatementMapper<'a>, From 155272a445d8e417666db908ddceaad9e8c8a196 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 12 Jul 2025 19:37:55 +0200 Subject: [PATCH 085/114] docs: rule suppressions (#443) --- docs/rule_suppressions.md | 94 +++++++++++++++++++++++++++++++++++++++ mkdocs.yml | 25 ++++++----- 2 files changed, 107 insertions(+), 12 deletions(-) create mode 100644 docs/rule_suppressions.md diff --git a/docs/rule_suppressions.md b/docs/rule_suppressions.md new file mode 100644 index 00000000..af5890e7 --- /dev/null +++ b/docs/rule_suppressions.md @@ -0,0 +1,94 @@ +# Rule Suppressions + +You can suppress specific diagnostics or rules in your code using suppression comments. This is useful when you want to ignore a particular rule for an entire file, a line or a block of code. + +## How to Suppress a Rule + +To suppress a rule, add a comment above the line causing the diagnostic with the following format: + +```sql +-- pgt-ignore lint/safety/banDropTable +drop table users; +``` + +You can suppress single rules, groups of rules, or entire categories. The format of the rule to suppress is: + +`category(/group(/specific-rule))` + +Where group and specific rule are optional. + +So, to suppress the `lint/safety/banDropTable` diagnostic, all of these would work: + +```sql +-- pgt-ignore lint +-- pgt-ignore lint/safety +-- pgt-ignore lint/safety/banDropTable +``` + +You can also add an explanation to the suppression by adding a `:` and the explanation text: + +```sql +-- pgt-ignore lint/safety/banDropTable: My startup never had any users. +drop table users; +``` + +### Suppressing Rules for Block of Code + +You can suppress rules for blocks of code. + +```sql +create table users ( + -- ... +); + +-- pgt-ignore-start typecheck: The `users` table will be created with this migration. +alter table users drop constraint users_pkey; + +alter table users add primary key (user_id); +-- pgt-ignore-end typecheck +``` + +Every `pgt-ignore-start` needs a `pgt-ignore-end` suppression comment, and the suppressed rules must match exactly. + +This _won't_ work, because the start tag suppresses a different diagnostic: + +```sql +-- pgt-ignore-start lint/safety/banDropColumn +-- pgt-ignore-end lint/safety +``` + +Nesting is allowed, so this works fine: + +```sql +-- pgt-ignore-start typecheck: outer +-- pgt-ignore-start lint/safety: inner +-- pgt-ignore-end lint/safety: inner +-- pgt-ignore-end typecheck: outer +``` + +### Suppressing Rules for Entire Files + +Instead of repeating the same suppression on multiple lines, you can suppress for an entire file. + +```sql +-- pgt-ignore-all lint/safety/banDropTable + +drop table tasks; +drop table projects; +drop table users; +``` + +## Suppressing Multiple Rules + +You can suppress multiple rules by adding multiple suppression comments above a statement: + +```sql +-- pgt-ignore lint/safety/banDropColumn +-- pgt-ignore typecheck +alter table tasks drop column created_at; +``` + +## Notes + +- Trying to suppress diagnostics that have already been disabled in your [configuration file](/#configuration) will show a warning. +- Trying to suppress diagnostics that don't haven't been raised will also show a warning. diff --git a/mkdocs.yml b/mkdocs.yml index b4520ce4..572642c6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -6,22 +6,23 @@ repo_name: supabase-community/postgres-language-server repo_url: https://github.com/supabase-community/postgres-language-server theme: - name: 'readthedocs' - features: - - navigation.expand - palette: - primary: grey - accent: red + name: "readthedocs" + features: + - navigation.expand + palette: + primary: grey + accent: red nav: - Introduction: index.md - Guides: - - Linting Migrations: checking_migrations.md - - Troubleshooting: troubleshooting.md + - Linting Migrations: checking_migrations.md + - Troubleshooting: troubleshooting.md - Reference: - - Rules: rules.md - - Rule Sources: rule_sources.md - - CLI: cli_reference.md - - Environment Variables: env_variables.md + - Rules: rules.md + - Rule Sources: rule_sources.md + - Rule Suppressions: rule_suppressions.md + - CLI: cli_reference.md + - Environment Variables: env_variables.md plugins: - gh-admonitions From 27b6e7f64fb1862ff714a928770706c0267fdc45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 12 Jul 2025 20:25:47 +0200 Subject: [PATCH 086/114] refactor: drop change.rs <3 (#447) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ![patrick-zen png](https://github.com/user-attachments/assets/bc491335-4f23-47d0-ace6-2c408eb1e93e) as per PR title. notable changes: - drop `change.rs` in favour of a simple `update_document` api that replaces the content - simplify `StatementId` to just be a simple wrapper around the actual statement string. The content is wrapped in an `Arc` to make cloning cheap. turns out string interning only makes sense when we have a lot of duplicate strings. `Arc` is much more efficient and simpler in our case. - Remove `document.rs` and merge it into `parsed_document` to then rename it to `Document` - replaces the `strum` usage to get the available command because it didn't feel right to make an empty string the default for a StatementId. replaced it with a simple manual implementation. I ran a few benchmarks and the statement splitter performance seems to be good enough to run it on every keystroke on the entire file. ```sh large statement with length 863000 time: [23.180 ms 23.278 ms 23.386 ms] small statement with length 19000 time: [886.69 µs 890.91 µs 896.35 µs] ``` ToDo - [ ] add some tests - [x] cleanup --- .claude/settings.local.json | 3 +- Cargo.lock | 126 +- .../execute/process_file/workspace_file.rs | 18 +- crates/pgt_lsp/src/capabilities.rs | 4 +- crates/pgt_lsp/src/handlers/text_document.rs | 31 +- crates/pgt_statement_splitter/Cargo.toml | 7 +- .../benches/splitter.rs | 85 + .../pgt_workspace/src/features/completions.rs | 15 +- crates/pgt_workspace/src/workspace.rs | 27 +- crates/pgt_workspace/src/workspace/server.rs | 158 +- .../src/workspace/server/annotation.rs | 12 +- .../src/workspace/server/change.rs | 1650 ----------------- .../src/workspace/server/document.rs | 389 +++- .../src/workspace/server/parsed_document.rs | 447 ----- .../src/workspace/server/pg_query.rs | 11 +- .../workspace/server/statement_identifier.rs | 121 +- .../src/workspace/server/tree_sitter.rs | 131 +- 17 files changed, 690 insertions(+), 2545 deletions(-) create mode 100644 crates/pgt_statement_splitter/benches/splitter.rs delete mode 100644 crates/pgt_workspace/src/workspace/server/change.rs delete mode 100644 crates/pgt_workspace/src/workspace/server/parsed_document.rs diff --git a/.claude/settings.local.json b/.claude/settings.local.json index 85429d0c..591b9119 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -6,7 +6,8 @@ "Bash(cargo test:*)", "Bash(cargo run:*)", "Bash(cargo check:*)", - "Bash(cargo fmt:*)" + "Bash(cargo fmt:*)", + "Bash(cargo doc:*)" ], "deny": [] } diff --git a/Cargo.lock b/Cargo.lock index ce816de8..a89dbfbe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -290,6 +290,17 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + [[package]] name = "auto_impl" version = "1.2.0" @@ -808,7 +819,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", - "half", + "half 2.6.0", ] [[package]] @@ -822,6 +833,17 @@ dependencies = [ "libloading", ] +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "bitflags 1.3.2", + "textwrap", + "unicode-width", +] + [[package]] name = "clap" version = "4.5.23" @@ -943,6 +965,32 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +dependencies = [ + "atty", + "cast", + "clap 2.34.0", + "criterion-plot 0.4.5", + "csv", + "itertools 0.10.5", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_cbor", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + [[package]] name = "criterion" version = "0.5.1" @@ -952,8 +1000,8 @@ dependencies = [ "anes", "cast", "ciborium", - "clap", - "criterion-plot", + "clap 4.5.23", + "criterion-plot 0.5.0", "is-terminal", "itertools 0.10.5", "num-traits", @@ -969,6 +1017,16 @@ dependencies = [ "walkdir", ] +[[package]] +name = "criterion-plot" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +dependencies = [ + "cast", + "itertools 0.10.5", +] + [[package]] name = "criterion-plot" version = "0.5.0" @@ -1051,6 +1109,27 @@ dependencies = [ "typenum", ] +[[package]] +name = "csv" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d" +dependencies = [ + "memchr", +] + [[package]] name = "dashmap" version = "5.5.3" @@ -1615,6 +1694,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "half" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" + [[package]] name = "half" version = "2.6.0" @@ -1672,6 +1757,15 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + [[package]] name = "hermit-abi" version = "0.3.9" @@ -2643,7 +2737,7 @@ name = "pgt_completions" version = "0.0.0" dependencies = [ "async-std", - "criterion", + "criterion 0.5.1", "fuzzy-matcher", "pgt_schema_cache", "pgt_test_utils", @@ -2859,6 +2953,7 @@ dependencies = [ name = "pgt_statement_splitter" version = "0.0.0" dependencies = [ + "criterion 0.3.6", "ntest", "pgt_diagnostics", "pgt_lexer", @@ -2893,7 +2988,7 @@ name = "pgt_test_utils" version = "0.0.0" dependencies = [ "anyhow", - "clap", + "clap 4.5.23", "dotenv", "sqlx", "tree-sitter", @@ -2932,7 +3027,7 @@ dependencies = [ name = "pgt_treesitter_queries" version = "0.0.0" dependencies = [ - "clap", + "clap 4.5.23", "tree-sitter", "tree_sitter_sql", ] @@ -3729,6 +3824,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half 1.8.3", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.215" @@ -4316,6 +4421,15 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + [[package]] name = "thiserror" version = "1.0.69" diff --git a/crates/pgt_cli/src/execute/process_file/workspace_file.rs b/crates/pgt_cli/src/execute/process_file/workspace_file.rs index 790176b9..9f78c7cf 100644 --- a/crates/pgt_cli/src/execute/process_file/workspace_file.rs +++ b/crates/pgt_cli/src/execute/process_file/workspace_file.rs @@ -2,13 +2,14 @@ use crate::execute::diagnostics::{ResultExt, ResultIoExt}; use crate::execute::process_file::SharedTraversalOptions; use pgt_diagnostics::{Error, category}; use pgt_fs::{File, OpenOptions, PgTPath}; -use pgt_workspace::workspace::{ChangeParams, FileGuard, OpenFileParams}; +use pgt_workspace::workspace::{FileGuard, OpenFileParams}; use pgt_workspace::{Workspace, WorkspaceError}; use std::path::{Path, PathBuf}; /// Small wrapper that holds information and operations around the current processed file pub(crate) struct WorkspaceFile<'ctx, 'app> { guard: FileGuard<'app, dyn Workspace + 'ctx>, + #[allow(dead_code)] file: Box, pub(crate) path: PathBuf, } @@ -57,19 +58,4 @@ impl<'ctx, 'app> WorkspaceFile<'ctx, 'app> { pub(crate) fn input(&self) -> Result { self.guard().get_file_content() } - - /// It updates the workspace file with `new_content` - #[allow(dead_code)] - pub(crate) fn update_file(&mut self, new_content: impl Into) -> Result<(), Error> { - let new_content = new_content.into(); - - self.file - .set_content(new_content.as_bytes()) - .with_file_path(self.path.display().to_string())?; - self.guard.change_file( - self.file.file_version(), - vec![ChangeParams::overwrite(new_content)], - )?; - Ok(()) - } } diff --git a/crates/pgt_lsp/src/capabilities.rs b/crates/pgt_lsp/src/capabilities.rs index acfc60ed..3b473eb7 100644 --- a/crates/pgt_lsp/src/capabilities.rs +++ b/crates/pgt_lsp/src/capabilities.rs @@ -1,4 +1,5 @@ use crate::adapters::{PositionEncoding, WideEncoding, negotiated_encoding}; +use crate::handlers::code_actions::command_id; use pgt_workspace::features::code_actions::CommandActionCategory; use strum::IntoEnumIterator; use tower_lsp::lsp_types::{ @@ -7,8 +8,6 @@ use tower_lsp::lsp_types::{ TextDocumentSyncOptions, TextDocumentSyncSaveOptions, WorkDoneProgressOptions, }; -use crate::handlers::code_actions::command_id; - /// The capabilities to send from server as part of [`InitializeResult`] /// /// [`InitializeResult`]: lspower::lsp::InitializeResult @@ -54,7 +53,6 @@ pub(crate) fn server_capabilities(capabilities: &ClientCapabilities) -> ServerCa commands: CommandActionCategory::iter() .map(|c| command_id(&c)) .collect::>(), - ..Default::default() }), document_formatting_provider: None, diff --git a/crates/pgt_lsp/src/handlers/text_document.rs b/crates/pgt_lsp/src/handlers/text_document.rs index 63250ef5..cc2efb4b 100644 --- a/crates/pgt_lsp/src/handlers/text_document.rs +++ b/crates/pgt_lsp/src/handlers/text_document.rs @@ -1,10 +1,9 @@ -use crate::adapters::from_lsp; use crate::{ diagnostics::LspError, documents::Document, session::Session, utils::apply_document_changes, }; use anyhow::Result; use pgt_workspace::workspace::{ - ChangeFileParams, ChangeParams, CloseFileParams, GetFileContentParams, OpenFileParams, + ChangeFileParams, CloseFileParams, GetFileContentParams, OpenFileParams, }; use tower_lsp::lsp_types; use tracing::error; @@ -48,40 +47,28 @@ pub(crate) async fn did_change( let pgt_path = session.file_path(&url)?; - let old_doc = session.document(&url)?; let old_text = session.workspace.get_file_content(GetFileContentParams { path: pgt_path.clone(), })?; - - let start = params - .content_changes - .iter() - .rev() - .position(|change| change.range.is_none()) - .map_or(0, |idx| params.content_changes.len() - idx - 1); + tracing::trace!("old document: {:?}", old_text); + tracing::trace!("content changes: {:?}", params.content_changes); let text = apply_document_changes( session.position_encoding(), old_text, - ¶ms.content_changes[start..], + ¶ms.content_changes, ); + tracing::trace!("new document: {:?}", text); + + session.insert_document(url.clone(), Document::new(version, &text)); + session.workspace.change_file(ChangeFileParams { path: pgt_path, version, - changes: params.content_changes[start..] - .iter() - .map(|c| ChangeParams { - range: c.range.and_then(|r| { - from_lsp::text_range(&old_doc.line_index, r, session.position_encoding()).ok() - }), - text: c.text.clone(), - }) - .collect(), + content: text, })?; - session.insert_document(url.clone(), Document::new(version, &text)); - if let Err(err) = session.update_diagnostics(url).await { error!("Failed to update diagnostics: {}", err); } diff --git a/crates/pgt_statement_splitter/Cargo.toml b/crates/pgt_statement_splitter/Cargo.toml index deea07bb..bdd892a6 100644 --- a/crates/pgt_statement_splitter/Cargo.toml +++ b/crates/pgt_statement_splitter/Cargo.toml @@ -19,4 +19,9 @@ pgt_text_size.workspace = true regex.workspace = true [dev-dependencies] -ntest = "0.9.3" +criterion = "0.3" +ntest = "0.9.3" + +[[bench]] +harness = false +name = "splitter" diff --git a/crates/pgt_statement_splitter/benches/splitter.rs b/crates/pgt_statement_splitter/benches/splitter.rs new file mode 100644 index 00000000..e7cdeeef --- /dev/null +++ b/crates/pgt_statement_splitter/benches/splitter.rs @@ -0,0 +1,85 @@ +use criterion::{Criterion, black_box, criterion_group, criterion_main}; +use pgt_statement_splitter::split; + +pub fn splitter_benchmark(c: &mut Criterion) { + let large_statement = r#"with + available_tables as ( + select + c.relname as table_name, + c.oid as table_oid, + c.relkind as class_kind, + n.nspname as schema_name + from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n on n.oid = c.relnamespace + where + -- r: normal tables + -- v: views + -- m: materialized views + -- f: foreign tables + -- p: partitioned tables + c.relkind in ('r', 'v', 'm', 'f', 'p') + ), + available_indexes as ( + select + unnest (ix.indkey) as attnum, + ix.indisprimary as is_primary, + ix.indisunique as is_unique, + ix.indrelid as table_oid + from + pg_catalog.pg_class c + join pg_catalog.pg_index ix on c.oid = ix.indexrelid + where + c.relkind = 'i' + ) +select + atts.attname as name, + ts.table_name, + ts.table_oid :: int8 as "table_oid!", + ts.class_kind :: char as "class_kind!", + ts.schema_name, + atts.atttypid :: int8 as "type_id!", + not atts.attnotnull as "is_nullable!", + nullif( + information_schema._pg_char_max_length (atts.atttypid, atts.atttypmod), + -1 + ) as varchar_length, + pg_get_expr (def.adbin, def.adrelid) as default_expr, + coalesce(ix.is_primary, false) as "is_primary_key!", + coalesce(ix.is_unique, false) as "is_unique!", + pg_catalog.col_description (ts.table_oid, atts.attnum) as comment +from + pg_catalog.pg_attribute atts + join available_tables ts on atts.attrelid = ts.table_oid + left join available_indexes ix on atts.attrelid = ix.table_oid + and atts.attnum = ix.attnum + left join pg_catalog.pg_attrdef def on atts.attrelid = def.adrelid + and atts.attnum = def.adnum +where + -- system columns, such as `cmax` or `tableoid`, have negative `attnum`s + atts.attnum >= 0; + +"#; + + let large_content = large_statement.repeat(500); + + c.bench_function( + format!("large statement with length {}", large_content.len()).as_str(), + |b| { + b.iter(|| black_box(split(&large_content))); + }, + ); + + let small_statement = r#"select 1 from public.user where id = 1"#; + let small_content = small_statement.repeat(500); + + c.bench_function( + format!("small statement with length {}", small_content.len()).as_str(), + |b| { + b.iter(|| black_box(split(&small_content))); + }, + ); +} + +criterion_group!(benches, splitter_benchmark); +criterion_main!(benches); diff --git a/crates/pgt_workspace/src/features/completions.rs b/crates/pgt_workspace/src/features/completions.rs index 53eb9eab..c6f05c6e 100644 --- a/crates/pgt_workspace/src/features/completions.rs +++ b/crates/pgt_workspace/src/features/completions.rs @@ -4,7 +4,7 @@ use pgt_completions::CompletionItem; use pgt_fs::PgTPath; use pgt_text_size::{TextRange, TextSize}; -use crate::workspace::{GetCompletionsFilter, GetCompletionsMapper, ParsedDocument, StatementId}; +use crate::workspace::{Document, GetCompletionsFilter, GetCompletionsMapper, StatementId}; #[derive(Debug, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] @@ -30,7 +30,7 @@ impl IntoIterator for CompletionsResult { } pub(crate) fn get_statement_for_completions( - doc: &ParsedDocument, + doc: &Document, position: TextSize, ) -> Option<(StatementId, TextRange, String, Arc)> { let count = doc.count(); @@ -76,16 +76,15 @@ pub(crate) fn get_statement_for_completions( #[cfg(test)] mod tests { - use pgt_fs::PgTPath; use pgt_text_size::TextSize; - use crate::workspace::ParsedDocument; + use crate::workspace::Document; use super::get_statement_for_completions; static CURSOR_POSITION: &str = "€"; - fn get_doc_and_pos(sql: &str) -> (ParsedDocument, TextSize) { + fn get_doc_and_pos(sql: &str) -> (Document, TextSize) { let pos = sql .find(CURSOR_POSITION) .expect("Please add cursor position to test sql"); @@ -93,11 +92,7 @@ mod tests { let pos: u32 = pos.try_into().unwrap(); ( - ParsedDocument::new( - PgTPath::new("test.sql"), - sql.replace(CURSOR_POSITION, ""), - 5, - ), + Document::new(sql.replace(CURSOR_POSITION, ""), 5), TextSize::new(pos), ) } diff --git a/crates/pgt_workspace/src/workspace.rs b/crates/pgt_workspace/src/workspace.rs index 61d60a49..9206b39d 100644 --- a/crates/pgt_workspace/src/workspace.rs +++ b/crates/pgt_workspace/src/workspace.rs @@ -4,7 +4,6 @@ pub use self::client::{TransportRequest, WorkspaceClient, WorkspaceTransport}; use pgt_analyse::RuleCategories; use pgt_configuration::{PartialConfiguration, RuleSelector}; use pgt_fs::PgTPath; -use pgt_text_size::TextRange; #[cfg(feature = "schema")] use schemars::{JsonSchema, SchemaGenerator, schema::Schema}; use serde::{Deserialize, Serialize}; @@ -25,7 +24,7 @@ mod client; mod server; pub use server::StatementId; -pub(crate) use server::parsed_document::*; +pub(crate) use server::document::*; #[derive(Debug, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] @@ -46,21 +45,7 @@ pub struct CloseFileParams { pub struct ChangeFileParams { pub path: PgTPath, pub version: i32, - pub changes: Vec, -} - -#[derive(Debug, serde::Serialize, serde::Deserialize)] -#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] -pub struct ChangeParams { - /// The range of the file that changed. If `None`, the whole file changed. - pub range: Option, - pub text: String, -} - -impl ChangeParams { - pub fn overwrite(text: String) -> Self { - Self { range: None, text } - } + pub content: String, } #[derive(Debug, serde::Serialize, serde::Deserialize)] @@ -205,15 +190,11 @@ impl<'app, W: Workspace + ?Sized> FileGuard<'app, W> { Ok(Self { workspace, path }) } - pub fn change_file( - &self, - version: i32, - changes: Vec, - ) -> Result<(), WorkspaceError> { + pub fn change_file(&self, version: i32, content: String) -> Result<(), WorkspaceError> { self.workspace.change_file(ChangeFileParams { path: self.path.clone(), version, - changes, + content, }) } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 5f2a9ebf..81aa99ab 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -9,12 +9,11 @@ use analyser::AnalyserVisitorBuilder; use async_helper::run_async; use connection_manager::ConnectionManager; use dashmap::DashMap; -use document::Document; -use futures::{StreamExt, stream}; -use parsed_document::{ - AsyncDiagnosticsMapper, CursorPositionFilter, DefaultMapper, ExecuteStatementMapper, - ParsedDocument, SyncDiagnosticsMapper, +use document::{ + AsyncDiagnosticsMapper, CursorPositionFilter, DefaultMapper, Document, ExecuteStatementMapper, + SyncDiagnosticsMapper, }; +use futures::{StreamExt, stream}; use pgt_analyse::{AnalyserOptions, AnalysisFilter}; use pgt_analyser::{Analyser, AnalyserConfig, AnalyserContext}; use pgt_diagnostics::{ @@ -51,12 +50,10 @@ pub use statement_identifier::StatementId; mod analyser; mod annotation; mod async_helper; -mod change; mod connection_key; mod connection_manager; pub(crate) mod document; mod migration; -pub(crate) mod parsed_document; mod pg_query; mod schema_cache_manager; mod sql_function; @@ -70,7 +67,7 @@ pub(super) struct WorkspaceServer { /// Stores the schema cache for this workspace schema_cache: SchemaCacheManager, - parsed_documents: DashMap, + documents: DashMap, connection: ConnectionManager, } @@ -92,7 +89,7 @@ impl WorkspaceServer { pub(crate) fn new() -> Self { Self { settings: RwLock::default(), - parsed_documents: DashMap::default(), + documents: DashMap::default(), schema_cache: SchemaCacheManager::new(), connection: ConnectionManager::new(), } @@ -265,11 +262,9 @@ impl Workspace for WorkspaceServer { /// Add a new file to the workspace #[tracing::instrument(level = "info", skip_all, fields(path = params.path.as_path().as_os_str().to_str()), err)] fn open_file(&self, params: OpenFileParams) -> Result<(), WorkspaceError> { - self.parsed_documents + self.documents .entry(params.path.clone()) - .or_insert_with(|| { - ParsedDocument::new(params.path.clone(), params.content, params.version) - }); + .or_insert_with(|| Document::new(params.content, params.version)); if let Some(project_key) = self.path_belongs_to_current_workspace(¶ms.path) { self.set_current_project(project_key); @@ -280,7 +275,7 @@ impl Workspace for WorkspaceServer { /// Remove a file from the workspace fn close_file(&self, params: super::CloseFileParams) -> Result<(), WorkspaceError> { - self.parsed_documents + self.documents .remove(¶ms.path) .ok_or_else(WorkspaceError::not_found)?; @@ -293,16 +288,16 @@ impl Workspace for WorkspaceServer { version = params.version ), err)] fn change_file(&self, params: super::ChangeFileParams) -> Result<(), WorkspaceError> { - let mut parser = - self.parsed_documents - .entry(params.path.clone()) - .or_insert(ParsedDocument::new( - params.path.clone(), - "".to_string(), - params.version, - )); - - parser.apply_change(params); + match self.documents.entry(params.path.clone()) { + dashmap::mapref::entry::Entry::Occupied(mut entry) => { + entry + .get_mut() + .update_content(params.content, params.version); + } + dashmap::mapref::entry::Entry::Vacant(entry) => { + entry.insert(Document::new(params.content, params.version)); + } + } Ok(()) } @@ -313,7 +308,7 @@ impl Workspace for WorkspaceServer { fn get_file_content(&self, params: GetFileContentParams) -> Result { let document = self - .parsed_documents + .documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; Ok(document.get_document_content().to_string()) @@ -328,7 +323,7 @@ impl Workspace for WorkspaceServer { params: code_actions::CodeActionsParams, ) -> Result { let parser = self - .parsed_documents + .documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; @@ -370,7 +365,7 @@ impl Workspace for WorkspaceServer { params: ExecuteStatementParams, ) -> Result { let parser = self - .parsed_documents + .documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; @@ -427,8 +422,8 @@ impl Workspace for WorkspaceServer { } }; - let parser = self - .parsed_documents + let doc = self + .documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; @@ -436,7 +431,7 @@ impl Workspace for WorkspaceServer { * The statements in the document might already have associated diagnostics, * e.g. if they contain syntax errors that surfaced while parsing/splitting the statements */ - let mut diagnostics: Vec = parser.document_diagnostics().to_vec(); + let mut diagnostics: Vec = doc.document_diagnostics().to_vec(); /* * Type-checking against database connection @@ -444,11 +439,11 @@ impl Workspace for WorkspaceServer { if let Some(pool) = self.get_current_connection() { let path_clone = params.path.clone(); let schema_cache = self.schema_cache.load(pool.clone())?; - let input = parser.iter(AsyncDiagnosticsMapper).collect::>(); + let input = doc.iter(AsyncDiagnosticsMapper).collect::>(); // sorry for the ugly code :( let async_results = run_async(async move { stream::iter(input) - .map(|(_id, range, content, ast, cst, sign)| { + .map(|(id, range, ast, cst, sign)| { let pool = pool.clone(); let path = path_clone.clone(); let schema_cache = Arc::clone(&schema_cache); @@ -456,7 +451,7 @@ impl Workspace for WorkspaceServer { if let Some(ast) = ast { pgt_typecheck::check_sql(TypecheckParams { conn: &pool, - sql: &content, + sql: id.content(), ast: &ast, tree: &cst, schema_cache: schema_cache.as_ref(), @@ -527,50 +522,51 @@ impl Workspace for WorkspaceServer { filter, }); - diagnostics.extend(parser.iter(SyncDiagnosticsMapper).flat_map( - |(_id, range, ast, diag)| { - let mut errors: Vec = vec![]; - - if let Some(diag) = diag { - errors.push(diag.into()); - } - - if let Some(ast) = ast { - errors.extend( - analyser - .run(AnalyserContext { root: &ast }) - .into_iter() - .map(Error::from) - .collect::>(), - ); - } - - errors - .into_iter() - .map(|d| { - let severity = d - .category() - .filter(|category| category.name().starts_with("lint/")) - .map_or_else( - || d.severity(), - |category| { - settings - .get_severity_from_rule_code(category) - .unwrap_or(Severity::Warning) - }, - ); - - SDiagnostic::new( - d.with_file_path(params.path.as_path().display().to_string()) - .with_file_span(range) - .with_severity(severity), - ) - }) - .collect::>() - }, - )); + diagnostics.extend( + doc.iter(SyncDiagnosticsMapper) + .flat_map(|(_id, range, ast, diag)| { + let mut errors: Vec = vec![]; + + if let Some(diag) = diag { + errors.push(diag.into()); + } + + if let Some(ast) = ast { + errors.extend( + analyser + .run(AnalyserContext { root: &ast }) + .into_iter() + .map(Error::from) + .collect::>(), + ); + } + + errors + .into_iter() + .map(|d| { + let severity = d + .category() + .filter(|category| category.name().starts_with("lint/")) + .map_or_else( + || d.severity(), + |category| { + settings + .get_severity_from_rule_code(category) + .unwrap_or(Severity::Warning) + }, + ); + + SDiagnostic::new( + d.with_file_path(params.path.as_path().display().to_string()) + .with_file_span(range) + .with_severity(severity), + ) + }) + .collect::>() + }), + ); - let suppressions = parser.document_suppressions(); + let suppressions = doc.suppressions(); let disabled_suppression_errors = suppressions.get_disabled_diagnostic_suppressions_as_errors(&disabled_rules); @@ -612,7 +608,7 @@ impl Workspace for WorkspaceServer { params: GetCompletionsParams, ) -> Result { let parsed_doc = self - .parsed_documents + .documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; @@ -630,7 +626,7 @@ impl Workspace for WorkspaceServer { tracing::debug!("No statement found."); Ok(CompletionsResult::default()) } - Some((id, range, content, cst)) => { + Some((_id, range, content, cst)) => { let position = params.position - range.start(); let items = pgt_completions::complete(pgt_completions::CompletionParams { @@ -640,12 +636,6 @@ impl Workspace for WorkspaceServer { text: content, }); - tracing::debug!( - "Found {} completion items for statement with id {}", - items.len(), - id.raw() - ); - Ok(CompletionsResult { items }) } } diff --git a/crates/pgt_workspace/src/workspace/server/annotation.rs b/crates/pgt_workspace/src/workspace/server/annotation.rs index db6a8b3b..20710521 100644 --- a/crates/pgt_workspace/src/workspace/server/annotation.rs +++ b/crates/pgt_workspace/src/workspace/server/annotation.rs @@ -55,14 +55,6 @@ impl AnnotationStore { annotations } - - pub fn clear_statement(&self, id: &StatementId) { - self.db.remove(id); - - if let Some(child_id) = id.get_child_id() { - self.db.remove(&child_id); - } - } } #[cfg(test)] @@ -84,8 +76,8 @@ mod tests { ("SELECT * FROM foo\n", false), ]; - for (idx, (content, expected)) in test_cases.iter().enumerate() { - let statement_id = StatementId::Root(idx.into()); + for (content, expected) in test_cases.iter() { + let statement_id = StatementId::new(content); let annotations = store.get_annotations(&statement_id, content); diff --git a/crates/pgt_workspace/src/workspace/server/change.rs b/crates/pgt_workspace/src/workspace/server/change.rs deleted file mode 100644 index d4a39aa3..00000000 --- a/crates/pgt_workspace/src/workspace/server/change.rs +++ /dev/null @@ -1,1650 +0,0 @@ -use pgt_suppressions::Suppressions; -use pgt_text_size::{TextLen, TextRange, TextSize}; -use std::ops::{Add, Sub}; - -use crate::workspace::{ChangeFileParams, ChangeParams}; - -use super::{Document, document, statement_identifier::StatementId}; - -#[derive(Debug, PartialEq, Eq)] -pub enum StatementChange { - Added(AddedStatement), - Deleted(StatementId), - Modified(ModifiedStatement), -} - -#[derive(Debug, PartialEq, Eq)] -pub struct AddedStatement { - pub stmt: StatementId, - pub text: String, -} - -#[derive(Debug, PartialEq, Eq)] -pub struct ModifiedStatement { - pub old_stmt: StatementId, - pub old_stmt_text: String, - - pub new_stmt: StatementId, - pub new_stmt_text: String, - - pub change_range: TextRange, - pub change_text: String, -} - -impl StatementChange { - #[allow(dead_code)] - pub fn statement(&self) -> &StatementId { - match self { - StatementChange::Added(stmt) => &stmt.stmt, - StatementChange::Deleted(stmt) => stmt, - StatementChange::Modified(changed) => &changed.new_stmt, - } - } -} - -/// Returns all relevant details about the change and its effects on the current state of the document. -struct Affected { - /// Full range of the change, including the range of all statements that intersect with the change - affected_range: TextRange, - /// All indices of affected statement positions - affected_indices: Vec, - /// The index of the first statement position before the change, if any - prev_index: Option, - /// The index of the first statement position after the change, if any - next_index: Option, - /// the full affected range includng the prev and next statement - full_affected_range: TextRange, -} - -impl Document { - /// Applies a file change to the document and returns the affected statements - pub fn apply_file_change(&mut self, change: &ChangeFileParams) -> Vec { - // cleanup all diagnostics with every change because we cannot guarantee that they are still valid - // this is because we know their ranges only by finding slices within the content which is - // very much not guaranteed to result in correct ranges - self.diagnostics.clear(); - - // when we recieive more than one change, we need to push back the changes based on the - // total range of the previous ones. This is because the ranges are always related to the original state. - // BUT: only for the statement range changes, not for the text changes - // this is why we pass both varaints to apply_change - let mut changes = Vec::new(); - - let mut change_indices: Vec = (0..change.changes.len()).collect(); - change_indices.sort_by(|&a, &b| { - match (change.changes[a].range, change.changes[b].range) { - (Some(range_a), Some(range_b)) => range_b.start().cmp(&range_a.start()), - (Some(_), None) => std::cmp::Ordering::Greater, // full changes will never be sent in a batch so this does not matter - (None, Some(_)) => std::cmp::Ordering::Less, - (None, None) => std::cmp::Ordering::Equal, - } - }); - - // Sort changes by start position and process from last to first to avoid position invalidation - for &idx in &change_indices { - changes.extend(self.apply_change(&change.changes[idx])); - } - - self.version = change.version; - self.suppressions = Suppressions::from(self.content.as_str()); - - changes - } - - /// Helper method to drain all positions and return them as deleted statements - fn drain_positions(&mut self) -> Vec { - self.positions - .drain(..) - .map(|(id, _)| StatementChange::Deleted(id)) - .collect() - } - - /// Applies a change to the document and returns the affected statements - /// - /// Will always assume its a full change and reparse the whole document - fn apply_full_change(&mut self, change: &ChangeParams) -> Vec { - let mut changes = Vec::new(); - - changes.extend(self.drain_positions()); - - self.content = change.apply_to_text(&self.content); - - let (ranges, diagnostics) = document::split_with_diagnostics(&self.content, None); - - self.diagnostics = diagnostics; - - // Do not add any statements if there is a fatal error - if self.has_fatal_error() { - return changes; - } - - changes.extend(ranges.into_iter().map(|range| { - let id = self.id_generator.next(); - let text = self.content[range].to_string(); - self.positions.push((id.clone(), range)); - - StatementChange::Added(AddedStatement { stmt: id, text }) - })); - - changes - } - - fn insert_statement(&mut self, range: TextRange) -> StatementId { - let pos = self - .positions - .binary_search_by(|(_, r)| r.start().cmp(&range.start())) - .unwrap_err(); - - let new_id = self.id_generator.next(); - self.positions.insert(pos, (new_id.clone(), range)); - - new_id - } - - /// Returns all relevant details about the change and its effects on the current state of the document. - /// - The affected range is the full range of the change, including the range of all statements that intersect with the change - /// - All indices of affected statement positions - /// - The index of the first statement position before the change, if any - /// - The index of the first statement position after the change, if any - /// - the full affected range includng the prev and next statement - fn get_affected( - &self, - change_range: TextRange, - content_size: TextSize, - diff_size: TextSize, - is_addition: bool, - ) -> Affected { - let mut start = change_range.start(); - let mut end = change_range.end().min(content_size); - - let is_trim = change_range.start() >= content_size; - - let mut affected_indices = Vec::new(); - let mut prev_index = None; - let mut next_index = None; - - for (index, (_, pos_range)) in self.positions.iter().enumerate() { - if pos_range.intersect(change_range).is_some() { - affected_indices.push(index); - start = start.min(pos_range.start()); - end = end.max(pos_range.end()); - } else if pos_range.end() <= change_range.start() { - prev_index = Some(index); - } else if pos_range.start() >= change_range.end() && next_index.is_none() { - next_index = Some(index); - break; - } - } - - if affected_indices.is_empty() && prev_index.is_none() { - // if there is no prev_index and no intersection -> use 0 - start = 0.into(); - } - - if affected_indices.is_empty() && next_index.is_none() { - // if there is no next_index and no intersection -> use content_size - end = content_size; - } - - let first_affected_stmt_start = prev_index - .map(|i| self.positions[i].1.start()) - .unwrap_or(start); - - let mut last_affected_stmt_end = next_index - .map(|i| self.positions[i].1.end()) - .unwrap_or_else(|| end); - - if is_addition { - end = end.add(diff_size); - last_affected_stmt_end = last_affected_stmt_end.add(diff_size); - } else if !is_trim { - end = end.sub(diff_size); - last_affected_stmt_end = last_affected_stmt_end.sub(diff_size) - }; - - Affected { - affected_range: { - let end = end.min(content_size); - TextRange::new(start.min(end), end) - }, - affected_indices, - prev_index, - next_index, - full_affected_range: TextRange::new( - first_affected_stmt_start, - last_affected_stmt_end - .min(content_size) - .max(first_affected_stmt_start), - ), - } - } - - fn move_ranges(&mut self, offset: TextSize, diff_size: TextSize, is_addition: bool) { - self.positions - .iter_mut() - .skip_while(|(_, r)| offset > r.start()) - .for_each(|(_, range)| { - let new_range = if is_addition { - range.add(diff_size) - } else { - range.sub(diff_size) - }; - - *range = new_range; - }); - } - - /// Applies a single change to the document and returns the affected statements - /// - /// * `change`: The range-adjusted change to use for statement changes - /// * `original_change`: The original change to use for text changes (yes, this is a bit confusing, and we might want to refactor this entire thing at some point.) - fn apply_change(&mut self, change: &ChangeParams) -> Vec { - // if range is none, we have a full change - if change.range.is_none() { - // doesnt matter what change since range is null - return self.apply_full_change(change); - } - - // i spent a relatively large amount of time thinking about how to handle range changes - // properly. there are quite a few edge cases to consider. I eventually skipped most of - // them, because the complexity is not worth the return for now. we might want to revisit - // this later though. - - let mut changed: Vec = Vec::with_capacity(self.positions.len()); - - let change_range = change.range.unwrap(); - let previous_content = self.content.clone(); - let new_content = change.apply_to_text(&self.content); - - // we first need to determine the affected range and all affected statements, as well as - // the index of the prev and the next statement, if any. The full affected range is the - // affected range expanded to the start of the previous statement and the end of the next - let Affected { - affected_range, - affected_indices, - prev_index, - next_index, - full_affected_range, - } = self.get_affected( - change_range, - new_content.text_len(), - change.diff_size(), - change.is_addition(), - ); - - // if within a statement, we can modify it if the change results in also a single statement - if affected_indices.len() == 1 { - let changed_content = get_affected(&new_content, affected_range); - - let (new_ranges, diags) = - document::split_with_diagnostics(changed_content, Some(affected_range.start())); - - self.diagnostics = diags; - - if self.has_fatal_error() { - // cleanup all positions if there is a fatal error - changed.extend(self.drain_positions()); - // still process text change - self.content = new_content; - return changed; - } - - if new_ranges.len() == 1 { - let affected_idx = affected_indices[0]; - let new_range = new_ranges[0].add(affected_range.start()); - let (old_id, old_range) = self.positions[affected_idx].clone(); - - // move all statements after the affected range - self.move_ranges(old_range.end(), change.diff_size(), change.is_addition()); - - let new_id = self.id_generator.next(); - self.positions[affected_idx] = (new_id.clone(), new_range); - - changed.push(StatementChange::Modified(ModifiedStatement { - old_stmt: old_id.clone(), - old_stmt_text: previous_content[old_range].to_string(), - - new_stmt: new_id, - new_stmt_text: changed_content[new_ranges[0]].to_string(), - // change must be relative to the statement - change_text: change.text.clone(), - // make sure we always have a valid range >= 0 - change_range: change_range - .checked_sub(old_range.start()) - .unwrap_or(change_range.sub(change_range.start())), - })); - - self.content = new_content; - - return changed; - } - } - - // in any other case, parse the full affected range - let changed_content = get_affected(&new_content, full_affected_range); - - let (new_ranges, diags) = - document::split_with_diagnostics(changed_content, Some(full_affected_range.start())); - - self.diagnostics = diags; - - if self.has_fatal_error() { - // cleanup all positions if there is a fatal error - changed.extend(self.drain_positions()); - // still process text change - self.content = new_content; - return changed; - } - - // delete and add new ones - if let Some(next_index) = next_index { - changed.push(StatementChange::Deleted( - self.positions[next_index].0.clone(), - )); - self.positions.remove(next_index); - } - for idx in affected_indices.iter().rev() { - changed.push(StatementChange::Deleted(self.positions[*idx].0.clone())); - self.positions.remove(*idx); - } - if let Some(prev_index) = prev_index { - changed.push(StatementChange::Deleted( - self.positions[prev_index].0.clone(), - )); - self.positions.remove(prev_index); - } - - new_ranges.iter().for_each(|range| { - let actual_range = range.add(full_affected_range.start()); - let new_id = self.insert_statement(actual_range); - changed.push(StatementChange::Added(AddedStatement { - stmt: new_id, - text: new_content[actual_range].to_string(), - })); - }); - - // move all statements after the afffected range - self.move_ranges( - full_affected_range.end(), - change.diff_size(), - change.is_addition(), - ); - - self.content = new_content; - - changed - } -} - -impl ChangeParams { - /// For lack of a better name, this returns the change in size of the text compared to the range - pub fn change_size(&self) -> i64 { - match self.range { - Some(range) => { - let range_length: usize = range.len().into(); - let text_length = self.text.chars().count(); - text_length as i64 - range_length as i64 - } - None => i64::try_from(self.text.chars().count()).unwrap(), - } - } - - pub fn diff_size(&self) -> TextSize { - match self.range { - Some(range) => { - let range_length: usize = range.len().into(); - let text_length = self.text.chars().count(); - let diff = (text_length as i64 - range_length as i64).abs(); - TextSize::from(u32::try_from(diff).unwrap()) - } - None => TextSize::from(u32::try_from(self.text.chars().count()).unwrap()), - } - } - - pub fn is_addition(&self) -> bool { - self.range.is_some() && self.text.len() > self.range.unwrap().len().into() - } - - pub fn is_deletion(&self) -> bool { - self.range.is_some() && self.text.len() < self.range.unwrap().len().into() - } - - pub fn apply_to_text(&self, text: &str) -> String { - if self.range.is_none() { - return self.text.clone(); - } - - let range = self.range.unwrap(); - let start = usize::from(range.start()); - let end = usize::from(range.end()); - - let mut new_text = String::new(); - new_text.push_str(&text[..start]); - new_text.push_str(&self.text); - if end < text.len() { - new_text.push_str(&text[end..]); - } - - new_text - } -} - -fn get_affected(content: &str, range: TextRange) -> &str { - let start_byte = content - .char_indices() - .nth(usize::from(range.start())) - .map(|(i, _)| i) - .unwrap_or(content.len()); - - let end_byte = content - .char_indices() - .nth(usize::from(range.end())) - .map(|(i, _)| i) - .unwrap_or(content.len()); - - &content[start_byte..end_byte] -} - -#[cfg(test)] -mod tests { - use super::*; - use pgt_text_size::TextRange; - - use crate::workspace::{ChangeFileParams, ChangeParams}; - - use pgt_fs::PgTPath; - - impl Document { - pub fn get_text(&self, idx: usize) -> String { - self.content[self.positions[idx].1.start().into()..self.positions[idx].1.end().into()] - .to_string() - } - } - - fn assert_document_integrity(d: &Document) { - let ranges = pgt_statement_splitter::split(&d.content).ranges; - - assert!( - ranges.len() == d.positions.len(), - "should have the correct amount of positions" - ); - - assert!( - ranges - .iter() - .all(|r| { d.positions.iter().any(|(_, stmt_range)| stmt_range == r) }), - "all ranges should be in positions" - ); - } - - #[test] - fn comments_at_begin() { - let path = PgTPath::new("test.sql"); - let input = "\nselect id from users;\n"; - - let mut d = Document::new(input.to_string(), 0); - - let change1 = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "-".to_string(), - range: Some(TextRange::new(0.into(), 0.into())), - }], - }; - - let _changed1 = d.apply_file_change(&change1); - - assert_eq!(d.content, "-\nselect id from users;\n"); - assert_eq!(d.positions.len(), 2); - - let change2 = ChangeFileParams { - path: path.clone(), - version: 2, - changes: vec![ChangeParams { - text: "-".to_string(), - range: Some(TextRange::new(1.into(), 1.into())), - }], - }; - - let _changed2 = d.apply_file_change(&change2); - - assert_eq!(d.content, "--\nselect id from users;\n"); - assert_eq!(d.positions.len(), 1); - - let change3 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: " ".to_string(), - range: Some(TextRange::new(2.into(), 2.into())), - }], - }; - - let _changed3 = d.apply_file_change(&change3); - - assert_eq!(d.content, "-- \nselect id from users;\n"); - assert_eq!(d.positions.len(), 1); - - let change4 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: "t".to_string(), - range: Some(TextRange::new(3.into(), 3.into())), - }], - }; - - let _changed4 = d.apply_file_change(&change4); - - assert_eq!(d.content, "-- t\nselect id from users;\n"); - assert_eq!(d.positions.len(), 1); - - assert_document_integrity(&d); - } - - #[test] - fn typing_comments() { - let path = PgTPath::new("test.sql"); - let input = "select id from users;\n"; - - let mut d = Document::new(input.to_string(), 0); - - let change1 = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "-".to_string(), - range: Some(TextRange::new(22.into(), 23.into())), - }], - }; - - let _changed1 = d.apply_file_change(&change1); - - assert_eq!(d.content, "select id from users;\n-"); - assert_eq!(d.positions.len(), 2); - - let change2 = ChangeFileParams { - path: path.clone(), - version: 2, - changes: vec![ChangeParams { - text: "-".to_string(), - range: Some(TextRange::new(23.into(), 24.into())), - }], - }; - - let _changed2 = d.apply_file_change(&change2); - - assert_eq!(d.content, "select id from users;\n--"); - assert_eq!(d.positions.len(), 1); - - let change3 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: " ".to_string(), - range: Some(TextRange::new(24.into(), 25.into())), - }], - }; - - let _changed3 = d.apply_file_change(&change3); - - assert_eq!(d.content, "select id from users;\n-- "); - assert_eq!(d.positions.len(), 1); - - let change4 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: "t".to_string(), - range: Some(TextRange::new(25.into(), 26.into())), - }], - }; - - let _changed4 = d.apply_file_change(&change4); - - assert_eq!(d.content, "select id from users;\n-- t"); - assert_eq!(d.positions.len(), 1); - - assert_document_integrity(&d); - } - - #[test] - fn within_statements() { - let path = PgTPath::new("test.sql"); - let input = "select id from users;\n\n\n\nselect * from contacts;"; - - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 2); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "select 1;".to_string(), - range: Some(TextRange::new(23.into(), 23.into())), - }], - }; - - let changed = d.apply_file_change(&change); - - assert_eq!(changed.len(), 5); - assert_eq!( - changed - .iter() - .filter(|c| matches!(c, StatementChange::Deleted(_))) - .count(), - 2 - ); - assert_eq!( - changed - .iter() - .filter(|c| matches!(c, StatementChange::Added(_))) - .count(), - 3 - ); - - assert_document_integrity(&d); - } - - #[test] - fn within_statements_2() { - let path = PgTPath::new("test.sql"); - let input = "alter table deal alter column value drop not null;\n"; - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 1); - - let change1 = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: " ".to_string(), - range: Some(TextRange::new(17.into(), 17.into())), - }], - }; - - let changed1 = d.apply_file_change(&change1); - assert_eq!(changed1.len(), 1); - assert_eq!( - d.content, - "alter table deal alter column value drop not null;\n" - ); - assert_document_integrity(&d); - - let change2 = ChangeFileParams { - path: path.clone(), - version: 2, - changes: vec![ChangeParams { - text: " ".to_string(), - range: Some(TextRange::new(18.into(), 18.into())), - }], - }; - - let changed2 = d.apply_file_change(&change2); - assert_eq!(changed2.len(), 1); - assert_eq!( - d.content, - "alter table deal alter column value drop not null;\n" - ); - assert_document_integrity(&d); - - let change3 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: " ".to_string(), - range: Some(TextRange::new(19.into(), 19.into())), - }], - }; - - let changed3 = d.apply_file_change(&change3); - assert_eq!(changed3.len(), 1); - assert_eq!( - d.content, - "alter table deal alter column value drop not null;\n" - ); - assert_document_integrity(&d); - - let change4 = ChangeFileParams { - path: path.clone(), - version: 4, - changes: vec![ChangeParams { - text: " ".to_string(), - range: Some(TextRange::new(20.into(), 20.into())), - }], - }; - - let changed4 = d.apply_file_change(&change4); - assert_eq!(changed4.len(), 1); - assert_eq!( - d.content, - "alter table deal alter column value drop not null;\n" - ); - assert_document_integrity(&d); - } - - #[test] - fn julians_sample() { - let path = PgTPath::new("test.sql"); - let input = "select\n *\nfrom\n test;\n\nselect\n\nalter table test\n\ndrop column id;"; - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 4); - - let change1 = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: " ".to_string(), - range: Some(TextRange::new(31.into(), 31.into())), - }], - }; - - let changed1 = d.apply_file_change(&change1); - assert_eq!(changed1.len(), 1); - assert_eq!( - d.content, - "select\n *\nfrom\n test;\n\nselect \n\nalter table test\n\ndrop column id;" - ); - assert_document_integrity(&d); - - // problem: this creates a new statement - let change2 = ChangeFileParams { - path: path.clone(), - version: 2, - changes: vec![ChangeParams { - text: ";".to_string(), - range: Some(TextRange::new(32.into(), 32.into())), - }], - }; - - let changed2 = d.apply_file_change(&change2); - assert_eq!(changed2.len(), 4); - assert_eq!( - changed2 - .iter() - .filter(|c| matches!(c, StatementChange::Deleted(_))) - .count(), - 2 - ); - assert_eq!( - changed2 - .iter() - .filter(|c| matches!(c, StatementChange::Added(_))) - .count(), - 2 - ); - assert_document_integrity(&d); - - let change3 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new(32.into(), 33.into())), - }], - }; - - let changed3 = d.apply_file_change(&change3); - assert_eq!(changed3.len(), 1); - assert!(matches!(&changed3[0], StatementChange::Modified(_))); - assert_eq!( - d.content, - "select\n *\nfrom\n test;\n\nselect \n\nalter table test\n\ndrop column id;" - ); - match &changed3[0] { - StatementChange::Modified(changed) => { - assert_eq!(changed.old_stmt_text, "select ;"); - assert_eq!(changed.new_stmt_text, "select"); - assert_eq!(changed.change_text, ""); - assert_eq!(changed.change_range, TextRange::new(7.into(), 8.into())); - } - _ => panic!("expected modified statement"), - } - assert_document_integrity(&d); - } - - #[test] - fn across_statements() { - let path = PgTPath::new("test.sql"); - let input = "select id from users;\nselect * from contacts;"; - - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 2); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: ",test from users;\nselect 1;".to_string(), - range: Some(TextRange::new(9.into(), 45.into())), - }], - }; - - let changed = d.apply_file_change(&change); - - assert_eq!(changed.len(), 4); - assert!(matches!(changed[0], StatementChange::Deleted(_))); - assert_eq!(changed[0].statement().raw(), 1); - assert!(matches!( - changed[1], - StatementChange::Deleted(StatementId::Root(_)) - )); - assert_eq!(changed[1].statement().raw(), 0); - assert!( - matches!(&changed[2], StatementChange::Added(AddedStatement { stmt: _, text }) if text == "select id,test from users;") - ); - assert!( - matches!(&changed[3], StatementChange::Added(AddedStatement { stmt: _, text }) if text == "select 1;") - ); - - assert_document_integrity(&d); - } - - #[test] - fn append_whitespace_to_statement() { - let path = PgTPath::new("test.sql"); - let input = "select id"; - - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 1); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: " ".to_string(), - range: Some(TextRange::new(9.into(), 10.into())), - }], - }; - - let changed = d.apply_file_change(&change); - - assert_eq!(changed.len(), 1); - - assert_document_integrity(&d); - } - - #[test] - fn apply_changes() { - let path = PgTPath::new("test.sql"); - let input = "select id from users;\nselect * from contacts;"; - - let mut d = Document::new(input.to_string(), 0); - - assert_eq!(d.positions.len(), 2); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: ",test from users\nselect 1;".to_string(), - range: Some(TextRange::new(9.into(), 45.into())), - }], - }; - - let changed = d.apply_file_change(&change); - - assert_eq!(changed.len(), 4); - - assert!(matches!( - changed[0], - StatementChange::Deleted(StatementId::Root(_)) - )); - assert_eq!(changed[0].statement().raw(), 1); - assert!(matches!( - changed[1], - StatementChange::Deleted(StatementId::Root(_)) - )); - assert_eq!(changed[1].statement().raw(), 0); - assert_eq!( - changed[2], - StatementChange::Added(AddedStatement { - stmt: StatementId::Root(2.into()), - text: "select id,test from users".to_string() - }) - ); - assert_eq!( - changed[3], - StatementChange::Added(AddedStatement { - stmt: StatementId::Root(3.into()), - text: "select 1;".to_string() - }) - ); - - assert_eq!("select id,test from users\nselect 1;", d.content); - - assert_document_integrity(&d); - } - - #[test] - fn removing_newline_at_the_beginning() { - let path = PgTPath::new("test.sql"); - let input = "\n"; - - let mut d = Document::new(input.to_string(), 1); - - assert_eq!(d.positions.len(), 0); - - let change = ChangeFileParams { - path: path.clone(), - version: 2, - changes: vec![ChangeParams { - text: "\nbegin;\n\nselect 1\n\nrollback;\n".to_string(), - range: Some(TextRange::new(0.into(), 1.into())), - }], - }; - - let changes = d.apply_file_change(&change); - - assert_eq!(changes.len(), 3); - - assert_document_integrity(&d); - - let change2 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new(0.into(), 1.into())), - }], - }; - - let changes2 = d.apply_file_change(&change2); - - assert_eq!(changes2.len(), 1); - - assert_document_integrity(&d); - } - - #[test] - fn apply_changes_at_end_of_statement() { - let path = PgTPath::new("test.sql"); - let input = "select id from\nselect * from contacts;"; - - let mut d = Document::new(input.to_string(), 1); - - assert_eq!(d.positions.len(), 2); - - let change = ChangeFileParams { - path: path.clone(), - version: 2, - changes: vec![ChangeParams { - text: " contacts;".to_string(), - range: Some(TextRange::new(14.into(), 14.into())), - }], - }; - - let changes = d.apply_file_change(&change); - - assert_eq!(changes.len(), 1); - - assert!(matches!(changes[0], StatementChange::Modified(_))); - - assert_eq!( - "select id from contacts;\nselect * from contacts;", - d.content - ); - - assert_document_integrity(&d); - } - - #[test] - fn apply_changes_replacement() { - let path = PgTPath::new("test.sql"); - - let mut doc = Document::new("".to_string(), 0); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "select 1;\nselect 2;".to_string(), - range: None, - }], - }; - - doc.apply_file_change(&change); - - assert_eq!(doc.get_text(0), "select 1;".to_string()); - assert_eq!(doc.get_text(1), "select 2;".to_string()); - assert_eq!( - doc.positions[0].1, - TextRange::new(TextSize::new(0), TextSize::new(9)) - ); - assert_eq!( - doc.positions[1].1, - TextRange::new(TextSize::new(10), TextSize::new(19)) - ); - - let change_2 = ChangeFileParams { - path: path.clone(), - version: 2, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new(7.into(), 8.into())), - }], - }; - - doc.apply_file_change(&change_2); - - assert_eq!(doc.content, "select ;\nselect 2;"); - assert_eq!(doc.positions.len(), 2); - assert_eq!(doc.get_text(0), "select ;".to_string()); - assert_eq!(doc.get_text(1), "select 2;".to_string()); - assert_eq!( - doc.positions[0].1, - TextRange::new(TextSize::new(0), TextSize::new(8)) - ); - assert_eq!( - doc.positions[1].1, - TextRange::new(TextSize::new(9), TextSize::new(18)) - ); - - let change_3 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: "!".to_string(), - range: Some(TextRange::new(7.into(), 7.into())), - }], - }; - - doc.apply_file_change(&change_3); - - assert_eq!(doc.content, "select !;\nselect 2;"); - assert_eq!(doc.positions.len(), 2); - assert_eq!( - doc.positions[0].1, - TextRange::new(TextSize::new(0), TextSize::new(9)) - ); - assert_eq!( - doc.positions[1].1, - TextRange::new(TextSize::new(10), TextSize::new(19)) - ); - - let change_4 = ChangeFileParams { - path: path.clone(), - version: 4, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new(7.into(), 8.into())), - }], - }; - - doc.apply_file_change(&change_4); - - assert_eq!(doc.content, "select ;\nselect 2;"); - assert_eq!(doc.positions.len(), 2); - assert_eq!( - doc.positions[0].1, - TextRange::new(TextSize::new(0), TextSize::new(8)) - ); - assert_eq!( - doc.positions[1].1, - TextRange::new(TextSize::new(9), TextSize::new(18)) - ); - - let change_5 = ChangeFileParams { - path: path.clone(), - version: 5, - changes: vec![ChangeParams { - text: "1".to_string(), - range: Some(TextRange::new(7.into(), 7.into())), - }], - }; - - doc.apply_file_change(&change_5); - - assert_eq!(doc.content, "select 1;\nselect 2;"); - assert_eq!(doc.positions.len(), 2); - assert_eq!( - doc.positions[0].1, - TextRange::new(TextSize::new(0), TextSize::new(9)) - ); - assert_eq!( - doc.positions[1].1, - TextRange::new(TextSize::new(10), TextSize::new(19)) - ); - - assert_document_integrity(&doc); - } - - #[test] - fn comment_at_begin() { - let path = PgTPath::new("test.sql"); - - let mut doc = Document::new( - "-- Add new schema named \"private\"\nCREATE SCHEMA \"private\";".to_string(), - 0, - ); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new(0.into(), 1.into())), - }], - }; - - let changed = doc.apply_file_change(&change); - - assert_eq!( - doc.content, - "- Add new schema named \"private\"\nCREATE SCHEMA \"private\";" - ); - assert_eq!(changed.len(), 3); - assert!(matches!(&changed[0], StatementChange::Deleted(_))); - assert!(matches!( - changed[1], - StatementChange::Added(AddedStatement { .. }) - )); - assert!(matches!( - changed[2], - StatementChange::Added(AddedStatement { .. }) - )); - - let change_2 = ChangeFileParams { - path: path.clone(), - version: 2, - changes: vec![ChangeParams { - text: "-".to_string(), - range: Some(TextRange::new(0.into(), 0.into())), - }], - }; - - let changed_2 = doc.apply_file_change(&change_2); - - assert_eq!( - doc.content, - "-- Add new schema named \"private\"\nCREATE SCHEMA \"private\";" - ); - - assert_eq!(changed_2.len(), 3); - assert!(matches!( - changed_2[0], - StatementChange::Deleted(StatementId::Root(_)) - )); - assert!(matches!( - changed_2[1], - StatementChange::Deleted(StatementId::Root(_)) - )); - assert!(matches!( - changed_2[2], - StatementChange::Added(AddedStatement { .. }) - )); - - assert_document_integrity(&doc); - } - - #[test] - fn apply_changes_within_statement() { - let input = "select id from users;\nselect * from contacts;"; - let path = PgTPath::new("test.sql"); - - let mut doc = Document::new(input.to_string(), 0); - - assert_eq!(doc.positions.len(), 2); - - let stmt_1_range = doc.positions[0].clone(); - let stmt_2_range = doc.positions[1].clone(); - - let update_text = ",test"; - - let update_range = TextRange::new(9.into(), 10.into()); - - let update_text_len = u32::try_from(update_text.chars().count()).unwrap(); - let update_addition = update_text_len - u32::from(update_range.len()); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: update_text.to_string(), - range: Some(update_range), - }], - }; - - doc.apply_file_change(&change); - - assert_eq!( - "select id,test from users;\nselect * from contacts;", - doc.content - ); - assert_eq!(doc.positions.len(), 2); - assert_eq!(doc.positions[0].1.start(), stmt_1_range.1.start()); - assert_eq!( - u32::from(doc.positions[0].1.end()), - u32::from(stmt_1_range.1.end()) + update_addition - ); - assert_eq!( - u32::from(doc.positions[1].1.start()), - u32::from(stmt_2_range.1.start()) + update_addition - ); - assert_eq!( - u32::from(doc.positions[1].1.end()), - u32::from(stmt_2_range.1.end()) + update_addition - ); - - assert_document_integrity(&doc); - } - - #[test] - fn remove_outside_of_content() { - let path = PgTPath::new("test.sql"); - let input = "select id from contacts;\n\nselect * from contacts;"; - - let mut d = Document::new(input.to_string(), 1); - - assert_eq!(d.positions.len(), 2); - - let change1 = ChangeFileParams { - path: path.clone(), - version: 2, - changes: vec![ChangeParams { - text: "\n".to_string(), - range: Some(TextRange::new(49.into(), 49.into())), - }], - }; - - d.apply_file_change(&change1); - - assert_eq!( - d.content, - "select id from contacts;\n\nselect * from contacts;\n" - ); - - let change2 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: "\n".to_string(), - range: Some(TextRange::new(50.into(), 50.into())), - }], - }; - - d.apply_file_change(&change2); - - assert_eq!( - d.content, - "select id from contacts;\n\nselect * from contacts;\n\n" - ); - - let change5 = ChangeFileParams { - path: path.clone(), - version: 6, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new(51.into(), 52.into())), - }], - }; - - let changes = d.apply_file_change(&change5); - - assert!(matches!( - changes[0], - StatementChange::Deleted(StatementId::Root(_)) - )); - - assert!(matches!( - changes[1], - StatementChange::Added(AddedStatement { .. }) - )); - - assert_eq!(changes.len(), 2); - - assert_eq!( - d.content, - "select id from contacts;\n\nselect * from contacts;\n\n" - ); - - assert_document_integrity(&d); - } - - #[test] - fn remove_trailing_whitespace() { - let path = PgTPath::new("test.sql"); - - let mut doc = Document::new("select * from ".to_string(), 0); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new(13.into(), 14.into())), - }], - }; - - let changed = doc.apply_file_change(&change); - - assert_eq!(doc.content, "select * from"); - - assert_eq!(changed.len(), 1); - - match &changed[0] { - StatementChange::Modified(stmt) => { - let ModifiedStatement { - change_range, - change_text, - new_stmt_text, - old_stmt_text, - .. - } = stmt; - - assert_eq!(change_range, &TextRange::new(13.into(), 14.into())); - assert_eq!(change_text, ""); - assert_eq!(new_stmt_text, "select * from"); - - // the whitespace was not considered - // to be a part of the statement - assert_eq!(old_stmt_text, "select * from"); - } - - _ => unreachable!("Did not yield a modified statement."), - } - - assert_document_integrity(&doc); - } - - #[test] - fn remove_trailing_whitespace_and_last_char() { - let path = PgTPath::new("test.sql"); - - let mut doc = Document::new("select * from ".to_string(), 0); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new(12.into(), 14.into())), - }], - }; - - let changed = doc.apply_file_change(&change); - - assert_eq!(doc.content, "select * fro"); - - assert_eq!(changed.len(), 1); - - match &changed[0] { - StatementChange::Modified(stmt) => { - let ModifiedStatement { - change_range, - change_text, - new_stmt_text, - old_stmt_text, - .. - } = stmt; - - assert_eq!(change_range, &TextRange::new(12.into(), 14.into())); - assert_eq!(change_text, ""); - assert_eq!(new_stmt_text, "select * fro"); - - // the whitespace was not considered - // to be a part of the statement - assert_eq!(old_stmt_text, "select * from"); - } - - _ => unreachable!("Did not yield a modified statement."), - } - - assert_document_integrity(&doc); - } - - #[test] - fn multiple_deletions_at_once() { - let path = PgTPath::new("test.sql"); - - let mut doc = Document::new("ALTER TABLE ONLY public.omni_channel_message ADD CONSTRAINT omni_channel_message_organisation_id_fkey FOREIGN KEY (organisation_id) REFERENCES public.organisation(id) ON UPDATE RESTRICT ON DELETE CASCADE;".to_string(), 0); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ - ChangeParams { - range: Some(TextRange::new(60.into(), 80.into())), - text: "sendout".to_string(), - }, - ChangeParams { - range: Some(TextRange::new(24.into(), 44.into())), - text: "sendout".to_string(), - }, - ], - }; - - let changed = doc.apply_file_change(&change); - - assert_eq!( - doc.content, - "ALTER TABLE ONLY public.sendout ADD CONSTRAINT sendout_organisation_id_fkey FOREIGN KEY (organisation_id) REFERENCES public.organisation(id) ON UPDATE RESTRICT ON DELETE CASCADE;" - ); - - assert_eq!(changed.len(), 2); - - assert_document_integrity(&doc); - } - - #[test] - fn multiple_additions_at_once() { - let path = PgTPath::new("test.sql"); - - let mut doc = Document::new("ALTER TABLE ONLY public.sendout ADD CONSTRAINT sendout_organisation_id_fkey FOREIGN KEY (organisation_id) REFERENCES public.organisation(id) ON UPDATE RESTRICT ON DELETE CASCADE;".to_string(), 0); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ - ChangeParams { - range: Some(TextRange::new(47.into(), 54.into())), - text: "omni_channel_message".to_string(), - }, - ChangeParams { - range: Some(TextRange::new(24.into(), 31.into())), - text: "omni_channel_message".to_string(), - }, - ], - }; - - let changed = doc.apply_file_change(&change); - - assert_eq!( - doc.content, - "ALTER TABLE ONLY public.omni_channel_message ADD CONSTRAINT omni_channel_message_organisation_id_fkey FOREIGN KEY (organisation_id) REFERENCES public.organisation(id) ON UPDATE RESTRICT ON DELETE CASCADE;" - ); - - assert_eq!(changed.len(), 2); - - assert_document_integrity(&doc); - } - - #[test] - fn remove_inbetween_whitespace() { - let path = PgTPath::new("test.sql"); - - let mut doc = Document::new("select * from users".to_string(), 0); - - let change = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new(9.into(), 11.into())), - }], - }; - - let changed = doc.apply_file_change(&change); - - assert_eq!(doc.content, "select * from users"); - - assert_eq!(changed.len(), 1); - - match &changed[0] { - StatementChange::Modified(stmt) => { - let ModifiedStatement { - change_range, - change_text, - new_stmt_text, - old_stmt_text, - .. - } = stmt; - - assert_eq!(change_range, &TextRange::new(9.into(), 11.into())); - assert_eq!(change_text, ""); - assert_eq!(old_stmt_text, "select * from users"); - assert_eq!(new_stmt_text, "select * from users"); - } - - _ => unreachable!("Did not yield a modified statement."), - } - - assert_document_integrity(&doc); - } - - #[test] - fn test_another_issue() { - let path = PgTPath::new("test.sql"); - let initial_content = r#" - - - -ALTER TABLE ONLY "public"."campaign_contact_list" - ADD CONSTRAINT "campaign_contact_list_contact_list_id_fkey" FOREIGN KEY ("contact_list_id") REFERENCES "public"."contact_list"("id") ON UPDATE RESTRICT ON DELETE CASCADE; -"#; - - let mut doc = Document::new(initial_content.to_string(), 0); - - let change1 = ChangeFileParams { - path: path.clone(), - version: 1, - changes: vec![ - ChangeParams { - range: Some(TextRange::new(31.into(), 39.into())), - text: "journey_node".to_string(), - }, - ChangeParams { - range: Some(TextRange::new(74.into(), 82.into())), - text: "journey_node".to_string(), - }, - ], - }; - - let _changes = doc.apply_file_change(&change1); - - let expected_content = r#" - - - -ALTER TABLE ONLY "public"."journey_node_contact_list" - ADD CONSTRAINT "journey_node_contact_list_contact_list_id_fkey" FOREIGN KEY ("contact_list_id") REFERENCES "public"."contact_list"("id") ON UPDATE RESTRICT ON DELETE CASCADE; -"#; - - assert_eq!(doc.content, expected_content); - - assert_document_integrity(&doc); - } - - #[test] - fn test_comments_only() { - let path = PgTPath::new("test.sql"); - let initial_content = "-- atlas:import async_trigger/setup.sql\n-- atlas:import public/setup.sql\n-- atlas:import private/setup.sql\n-- atlas:import api/setup.sql\n-- atlas:import async_trigger/index.sql\n-- atlas:import public/enums/index.sql\n-- atlas:import public/types/index.sql\n-- atlas:import private/enums/index.sql\n-- atlas:import private/functions/index.sql\n-- atlas:import public/tables/index.sql\n-- atlas:import public/index.sql\n-- atlas:import private/index.sql\n-- atlas:import api/index.sql\n\n\n\n"; - - // Create a new document - let mut doc = Document::new(initial_content.to_string(), 0); - - // First change: Delete some text at line 2, character 24-29 - let change1 = ChangeFileParams { - path: path.clone(), - version: 3, - changes: vec![ChangeParams { - text: "".to_string(), - range: Some(TextRange::new( - // Calculate the correct position based on the content - // Line 2, character 24 - 98.into(), - // Line 2, character 29 - 103.into(), - )), - }], - }; - - let _changes1 = doc.apply_file_change(&change1); - - // Second change: Add 't' at line 2, character 24 - let change2 = ChangeFileParams { - path: path.clone(), - version: 4, - changes: vec![ChangeParams { - text: "t".to_string(), - range: Some(TextRange::new(98.into(), 98.into())), - }], - }; - - let _changes2 = doc.apply_file_change(&change2); - - assert_eq!( - doc.positions.len(), - 0, - "Document should have no statement after adding 't'" - ); - - // Third change: Add 'e' at line 2, character 25 - let change3 = ChangeFileParams { - path: path.clone(), - version: 5, - changes: vec![ChangeParams { - text: "e".to_string(), - range: Some(TextRange::new(99.into(), 99.into())), - }], - }; - - let _changes3 = doc.apply_file_change(&change3); - assert_eq!( - doc.positions.len(), - 0, - "Document should still have no statement" - ); - - // Fourth change: Add 's' at line 2, character 26 - let change4 = ChangeFileParams { - path: path.clone(), - version: 6, - changes: vec![ChangeParams { - text: "s".to_string(), - range: Some(TextRange::new(100.into(), 100.into())), - }], - }; - - let _changes4 = doc.apply_file_change(&change4); - assert_eq!( - doc.positions.len(), - 0, - "Document should still have no statement" - ); - - // Fifth change: Add 't' at line 2, character 27 - let change5 = ChangeFileParams { - path: path.clone(), - version: 7, - changes: vec![ChangeParams { - text: "t".to_string(), - range: Some(TextRange::new(101.into(), 101.into())), - }], - }; - - let _changes5 = doc.apply_file_change(&change5); - assert_eq!( - doc.positions.len(), - 0, - "Document should still have no statement" - ); - - assert_document_integrity(&doc); - } -} diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index 28514f51..f8ab639d 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -1,63 +1,338 @@ -use pgt_diagnostics::{Diagnostic, DiagnosticExt, Severity, serde::Diagnostic as SDiagnostic}; +use std::sync::Arc; + +use pgt_diagnostics::{Diagnostic, DiagnosticExt, serde::Diagnostic as SDiagnostic}; +use pgt_query_ext::diagnostics::SyntaxDiagnostic; use pgt_suppressions::Suppressions; use pgt_text_size::{TextRange, TextSize}; -use super::statement_identifier::{StatementId, StatementIdGenerator}; +use super::{ + annotation::AnnotationStore, + pg_query::PgQueryStore, + sql_function::{SQLFunctionSignature, get_sql_fn_body, get_sql_fn_signature}, + statement_identifier::StatementId, + tree_sitter::TreeSitterStore, +}; + +pub struct Document { + content: String, + version: i32, + ranges: Vec, + diagnostics: Vec, + ast_db: PgQueryStore, + cst_db: TreeSitterStore, + #[allow(dead_code)] + annotation_db: AnnotationStore, + suppressions: Suppressions, +} + +impl Document { + pub fn new(content: String, version: i32) -> Document { + let cst_db = TreeSitterStore::new(); + let ast_db = PgQueryStore::new(); + let annotation_db = AnnotationStore::new(); + let suppressions = Suppressions::from(content.as_str()); + + let (ranges, diagnostics) = split_with_diagnostics(&content, None); -type StatementPos = (StatementId, TextRange); + Document { + ranges, + diagnostics, + content, + version, + ast_db, + cst_db, + annotation_db, + suppressions, + } + } -pub(crate) struct Document { - pub(crate) content: String, - pub(crate) version: i32, + pub fn update_content(&mut self, content: String, version: i32) { + self.content = content; + self.version = version; - pub(super) diagnostics: Vec, - pub(super) suppressions: Suppressions, + let (ranges, diagnostics) = split_with_diagnostics(&self.content, None); - /// List of statements sorted by range.start() - pub(super) positions: Vec, + self.ranges = ranges; + self.diagnostics = diagnostics; + self.suppressions = Suppressions::from(self.content.as_str()); + } + + pub fn suppressions(&self) -> &Suppressions { + &self.suppressions + } + + pub fn get_document_content(&self) -> &str { + &self.content + } + + pub fn document_diagnostics(&self) -> &Vec { + &self.diagnostics + } + + pub fn find<'a, M>(&'a self, id: StatementId, mapper: M) -> Option + where + M: StatementMapper<'a>, + { + self.iter_with_filter(mapper, IdFilter::new(id)).next() + } + + pub fn iter<'a, M>(&'a self, mapper: M) -> ParseIterator<'a, M, NoFilter> + where + M: StatementMapper<'a>, + { + self.iter_with_filter(mapper, NoFilter) + } + + pub fn iter_with_filter<'a, M, F>(&'a self, mapper: M, filter: F) -> ParseIterator<'a, M, F> + where + M: StatementMapper<'a>, + F: StatementFilter<'a>, + { + ParseIterator::new(self, mapper, filter) + } - pub(super) id_generator: StatementIdGenerator, + #[allow(dead_code)] + pub fn count(&self) -> usize { + self.iter(DefaultMapper).count() + } } -impl Document { - pub(crate) fn new(content: String, version: i32) -> Self { - let mut id_generator = StatementIdGenerator::new(); +pub trait StatementMapper<'a> { + type Output; - let (ranges, diagnostics) = split_with_diagnostics(&content, None); + fn map(&self, parsed: &'a Document, id: StatementId, range: TextRange) -> Self::Output; +} - let suppressions = Suppressions::from(content.as_str()); +pub trait StatementFilter<'a> { + fn predicate(&self, id: &StatementId, range: &TextRange, content: &str) -> bool; +} + +pub struct ParseIterator<'a, M, F> { + parser: &'a Document, + mapper: M, + filter: F, + ranges: std::slice::Iter<'a, TextRange>, + pending_sub_statements: Vec<(StatementId, TextRange, String)>, +} +impl<'a, M, F> ParseIterator<'a, M, F> { + pub fn new(parser: &'a Document, mapper: M, filter: F) -> Self { Self { - positions: ranges - .into_iter() - .map(|range| (id_generator.next(), range)) - .collect(), - content, - version, - diagnostics, - id_generator, - suppressions, + parser, + mapper, + filter, + ranges: parser.ranges.iter(), + pending_sub_statements: Vec::new(), } } +} + +impl<'a, M, F> Iterator for ParseIterator<'a, M, F> +where + M: StatementMapper<'a>, + F: StatementFilter<'a>, +{ + type Item = M::Output; - pub fn statement_content(&self, id: &StatementId) -> Option<&str> { - self.positions - .iter() - .find(|(statement_id, _)| statement_id == id) - .map(|(_, range)| &self.content[*range]) + fn next(&mut self) -> Option { + // First check if we have any pending sub-statements to process + if let Some((id, range, content)) = self.pending_sub_statements.pop() { + if self.filter.predicate(&id, &range, content.as_str()) { + return Some(self.mapper.map(self.parser, id, range)); + } + // If the sub-statement doesn't pass the filter, continue to the next item + return self.next(); + } + + // Process the next top-level statement + let next_range = self.ranges.next(); + + if let Some(range) = next_range { + // If we should include sub-statements and this statement has an AST + + let content = &self.parser.content[*range]; + let root_id = StatementId::new(content); + + if let Ok(ast) = self.parser.ast_db.get_or_cache_ast(&root_id).as_ref() { + // Check if this is a SQL function definition with a body + if let Some(sub_statement) = get_sql_fn_body(ast, content) { + // Add sub-statements to our pending queue + self.pending_sub_statements.push(( + root_id.create_child(&sub_statement.body), + // adjust range to document + sub_statement.range + range.start(), + sub_statement.body.clone(), + )); + } + } + + // Return the current statement if it passes the filter + if self.filter.predicate(&root_id, range, content) { + return Some(self.mapper.map(self.parser, root_id, *range)); + } + + // If the current statement doesn't pass the filter, try the next one + return self.next(); + } + + None } +} + +pub struct DefaultMapper; +impl<'a> StatementMapper<'a> for DefaultMapper { + type Output = (StatementId, TextRange, String); - /// Returns true if there is at least one fatal error in the diagnostics - /// - /// A fatal error is a scan error that prevents the document from being used - pub(super) fn has_fatal_error(&self) -> bool { - self.diagnostics - .iter() - .any(|d| d.severity() == Severity::Fatal) + fn map(&self, _parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { + (id.clone(), range, id.content().to_string()) } +} - pub fn iter(&self) -> StatementIterator<'_> { - StatementIterator::new(self) +pub struct ExecuteStatementMapper; +impl<'a> StatementMapper<'a> for ExecuteStatementMapper { + type Output = ( + StatementId, + TextRange, + String, + Option, + ); + + fn map(&self, parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { + let ast_result = parser.ast_db.get_or_cache_ast(&id); + let ast_option = match &*ast_result { + Ok(node) => Some(node.clone()), + Err(_) => None, + }; + + (id.clone(), range, id.content().to_string(), ast_option) + } +} + +pub struct AsyncDiagnosticsMapper; +impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { + type Output = ( + StatementId, + TextRange, + Option, + Arc, + Option, + ); + + fn map(&self, parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { + let ast_result = parser.ast_db.get_or_cache_ast(&id); + + let ast_option = match &*ast_result { + Ok(node) => Some(node.clone()), + Err(_) => None, + }; + + let cst_result = parser.cst_db.get_or_cache_tree(&id); + + let sql_fn_sig = id.parent().and_then(|root| { + let ast_option = parser.ast_db.get_or_cache_ast(&root).as_ref().clone().ok(); + + let ast_option = ast_option.as_ref()?; + + get_sql_fn_signature(ast_option) + }); + + (id.clone(), range, ast_option, cst_result, sql_fn_sig) + } +} + +pub struct SyncDiagnosticsMapper; +impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { + type Output = ( + StatementId, + TextRange, + Option, + Option, + ); + + fn map(&self, parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { + let ast_result = parser.ast_db.get_or_cache_ast(&id); + + let (ast_option, diagnostics) = match &*ast_result { + Ok(node) => (Some(node.clone()), None), + Err(diag) => (None, Some(diag.clone())), + }; + + (id.clone(), range, ast_option, diagnostics) + } +} + +pub struct GetCompletionsMapper; +impl<'a> StatementMapper<'a> for GetCompletionsMapper { + type Output = (StatementId, TextRange, String, Arc); + + fn map(&self, parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { + let tree = parser.cst_db.get_or_cache_tree(&id); + (id.clone(), range, id.content().to_string(), tree) + } +} + +/* + * We allow an offset of two for the statement: + * + * select * from | <-- we want to suggest items for the next token. + * + * However, if the current statement is terminated by a semicolon, we don't apply any + * offset. + * + * select * from users; | <-- no autocompletions here. + */ +pub struct GetCompletionsFilter { + pub cursor_position: TextSize, +} +impl StatementFilter<'_> for GetCompletionsFilter { + fn predicate(&self, _id: &StatementId, range: &TextRange, content: &str) -> bool { + let is_terminated_by_semi = content.chars().last().is_some_and(|c| c == ';'); + + let measuring_range = if is_terminated_by_semi { + *range + } else { + range.checked_expand_end(2.into()).unwrap_or(*range) + }; + measuring_range.contains(self.cursor_position) + } +} + +pub struct NoFilter; +impl StatementFilter<'_> for NoFilter { + fn predicate(&self, _id: &StatementId, _range: &TextRange, _content: &str) -> bool { + true + } +} + +pub struct CursorPositionFilter { + pos: TextSize, +} + +impl CursorPositionFilter { + pub fn new(pos: TextSize) -> Self { + Self { pos } + } +} + +impl StatementFilter<'_> for CursorPositionFilter { + fn predicate(&self, _id: &StatementId, range: &TextRange, _content: &str) -> bool { + range.contains(self.pos) + } +} + +pub struct IdFilter { + id: StatementId, +} + +impl IdFilter { + pub fn new(id: StatementId) -> Self { + Self { id } + } +} + +impl StatementFilter<'_> for IdFilter { + fn predicate(&self, id: &StatementId, _range: &TextRange, _content: &str) -> bool { + *id == self.id } } @@ -85,29 +360,23 @@ pub(crate) fn split_with_diagnostics( ) } -pub struct StatementIterator<'a> { - document: &'a Document, - positions: std::slice::Iter<'a, StatementPos>, -} +#[cfg(test)] +mod tests { + use super::*; -impl<'a> StatementIterator<'a> { - pub fn new(document: &'a Document) -> Self { - Self { - document, - positions: document.positions.iter(), - } - } -} + #[test] + fn sql_function_body() { + let input = "CREATE FUNCTION add(test0 integer, test1 integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT;"; -impl<'a> Iterator for StatementIterator<'a> { - type Item = (StatementId, TextRange, &'a str); + let d = Document::new(input.to_string(), 1); - fn next(&mut self) -> Option { - self.positions.next().map(|(id, range)| { - let range = *range; - let doc = self.document; - let id = id.clone(); - (id, range, &doc.content[range]) - }) + let stmts = d.iter(DefaultMapper).collect::>(); + + assert_eq!(stmts.len(), 2); + assert_eq!(stmts[1].2, "select $1 + $2;"); } } diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs deleted file mode 100644 index 8b515128..00000000 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ /dev/null @@ -1,447 +0,0 @@ -use std::sync::Arc; - -use pgt_diagnostics::serde::Diagnostic as SDiagnostic; -use pgt_fs::PgTPath; -use pgt_query_ext::diagnostics::SyntaxDiagnostic; -use pgt_suppressions::Suppressions; -use pgt_text_size::{TextRange, TextSize}; - -use crate::workspace::ChangeFileParams; - -use super::{ - annotation::AnnotationStore, - change::StatementChange, - document::{Document, StatementIterator}, - pg_query::PgQueryStore, - sql_function::{SQLFunctionSignature, get_sql_fn_body, get_sql_fn_signature}, - statement_identifier::StatementId, - tree_sitter::TreeSitterStore, -}; - -pub struct ParsedDocument { - #[allow(dead_code)] - path: PgTPath, - - doc: Document, - ast_db: PgQueryStore, - cst_db: TreeSitterStore, - annotation_db: AnnotationStore, -} - -impl ParsedDocument { - pub fn new(path: PgTPath, content: String, version: i32) -> ParsedDocument { - let doc = Document::new(content, version); - - let cst_db = TreeSitterStore::new(); - let ast_db = PgQueryStore::new(); - let annotation_db = AnnotationStore::new(); - - doc.iter().for_each(|(stmt, _, content)| { - cst_db.add_statement(&stmt, content); - }); - - ParsedDocument { - path, - doc, - ast_db, - cst_db, - annotation_db, - } - } - - /// Applies a change to the document and updates the CST and AST databases accordingly. - /// - /// Note that only tree-sitter cares about statement modifications vs remove + add. - /// Hence, we just clear the AST for the old statements and lazily load them when requested. - /// - /// * `params`: ChangeFileParams - The parameters for the change to be applied. - pub fn apply_change(&mut self, params: ChangeFileParams) { - for c in &self.doc.apply_file_change(¶ms) { - match c { - StatementChange::Added(added) => { - tracing::debug!( - "Adding statement: id:{:?}, text:{:?}", - added.stmt, - added.text - ); - self.cst_db.add_statement(&added.stmt, &added.text); - } - StatementChange::Deleted(s) => { - tracing::debug!("Deleting statement: id {:?}", s,); - self.cst_db.remove_statement(s); - self.ast_db.clear_statement(s); - self.annotation_db.clear_statement(s); - } - StatementChange::Modified(s) => { - tracing::debug!( - "Modifying statement with id {:?} (new id {:?}). Range {:?}, Changed from '{:?}' to '{:?}', changed text: {:?}", - s.old_stmt, - s.new_stmt, - s.change_range, - s.old_stmt_text, - s.new_stmt_text, - s.change_text - ); - - self.cst_db.modify_statement(s); - self.ast_db.clear_statement(&s.old_stmt); - self.annotation_db.clear_statement(&s.old_stmt); - } - } - } - } - - pub fn get_document_content(&self) -> &str { - &self.doc.content - } - - pub fn document_diagnostics(&self) -> &Vec { - &self.doc.diagnostics - } - - pub fn document_suppressions(&self) -> &Suppressions { - &self.doc.suppressions - } - - pub fn find<'a, M>(&'a self, id: StatementId, mapper: M) -> Option - where - M: StatementMapper<'a>, - { - self.iter_with_filter(mapper, IdFilter::new(id)).next() - } - - pub fn iter<'a, M>(&'a self, mapper: M) -> ParseIterator<'a, M, NoFilter> - where - M: StatementMapper<'a>, - { - self.iter_with_filter(mapper, NoFilter) - } - - pub fn iter_with_filter<'a, M, F>(&'a self, mapper: M, filter: F) -> ParseIterator<'a, M, F> - where - M: StatementMapper<'a>, - F: StatementFilter<'a>, - { - ParseIterator::new(self, mapper, filter) - } - - #[allow(dead_code)] - pub fn count(&self) -> usize { - self.iter(DefaultMapper).count() - } -} - -pub trait StatementMapper<'a> { - type Output; - - fn map( - &self, - parsed: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output; -} - -pub trait StatementFilter<'a> { - fn predicate(&self, id: &StatementId, range: &TextRange, content: &str) -> bool; -} - -pub struct ParseIterator<'a, M, F> { - parser: &'a ParsedDocument, - statements: StatementIterator<'a>, - mapper: M, - filter: F, - pending_sub_statements: Vec<(StatementId, TextRange, String)>, -} - -impl<'a, M, F> ParseIterator<'a, M, F> { - pub fn new(parser: &'a ParsedDocument, mapper: M, filter: F) -> Self { - Self { - parser, - statements: parser.doc.iter(), - mapper, - filter, - pending_sub_statements: Vec::new(), - } - } -} - -impl<'a, M, F> Iterator for ParseIterator<'a, M, F> -where - M: StatementMapper<'a>, - F: StatementFilter<'a>, -{ - type Item = M::Output; - - fn next(&mut self) -> Option { - // First check if we have any pending sub-statements to process - if let Some((id, range, content)) = self.pending_sub_statements.pop() { - if self.filter.predicate(&id, &range, content.as_str()) { - return Some(self.mapper.map(self.parser, id, range, &content)); - } - // If the sub-statement doesn't pass the filter, continue to the next item - return self.next(); - } - - // Process the next top-level statement - let next_statement = self.statements.next(); - - if let Some((root_id, range, content)) = next_statement { - // If we should include sub-statements and this statement has an AST - let content_owned = content.to_string(); - if let Ok(ast) = self - .parser - .ast_db - .get_or_cache_ast(&root_id, &content_owned) - .as_ref() - { - // Check if this is a SQL function definition with a body - if let Some(sub_statement) = get_sql_fn_body(ast, &content_owned) { - // Add sub-statements to our pending queue - self.pending_sub_statements.push(( - root_id.create_child(), - // adjust range to document - sub_statement.range + range.start(), - sub_statement.body.clone(), - )); - } - } - - // Return the current statement if it passes the filter - if self.filter.predicate(&root_id, &range, content) { - return Some(self.mapper.map(self.parser, root_id, range, content)); - } - - // If the current statement doesn't pass the filter, try the next one - return self.next(); - } - - None - } -} - -pub struct DefaultMapper; -impl<'a> StatementMapper<'a> for DefaultMapper { - type Output = (StatementId, TextRange, String); - - fn map( - &self, - _parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - (id, range, content.to_string()) - } -} - -pub struct ExecuteStatementMapper; -impl<'a> StatementMapper<'a> for ExecuteStatementMapper { - type Output = ( - StatementId, - TextRange, - String, - Option, - ); - - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let ast_result = parser.ast_db.get_or_cache_ast(&id, content); - let ast_option = match &*ast_result { - Ok(node) => Some(node.clone()), - Err(_) => None, - }; - - (id, range, content.to_string(), ast_option) - } -} - -pub struct AsyncDiagnosticsMapper; -impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { - type Output = ( - StatementId, - TextRange, - String, - Option, - Arc, - Option, - ); - - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let content_owned = content.to_string(); - let ast_result = parser.ast_db.get_or_cache_ast(&id, &content_owned); - - let ast_option = match &*ast_result { - Ok(node) => Some(node.clone()), - Err(_) => None, - }; - - let cst_result = parser.cst_db.get_or_cache_tree(&id, &content_owned); - - let sql_fn_sig = id - .parent() - .and_then(|root| { - let c = parser.doc.statement_content(&root)?; - Some((root, c)) - }) - .and_then(|(root, c)| { - let ast_option = parser - .ast_db - .get_or_cache_ast(&root, c) - .as_ref() - .clone() - .ok(); - - let ast_option = ast_option.as_ref()?; - - get_sql_fn_signature(ast_option) - }); - - (id, range, content_owned, ast_option, cst_result, sql_fn_sig) - } -} - -pub struct SyncDiagnosticsMapper; -impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { - type Output = ( - StatementId, - TextRange, - Option, - Option, - ); - - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let ast_result = parser.ast_db.get_or_cache_ast(&id, content); - - let (ast_option, diagnostics) = match &*ast_result { - Ok(node) => (Some(node.clone()), None), - Err(diag) => (None, Some(diag.clone())), - }; - - (id, range, ast_option, diagnostics) - } -} - -pub struct GetCompletionsMapper; -impl<'a> StatementMapper<'a> for GetCompletionsMapper { - type Output = (StatementId, TextRange, String, Arc); - - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let tree = parser.cst_db.get_or_cache_tree(&id, content); - (id, range, content.into(), tree) - } -} - -/* - * We allow an offset of two for the statement: - * - * select * from | <-- we want to suggest items for the next token. - * - * However, if the current statement is terminated by a semicolon, we don't apply any - * offset. - * - * select * from users; | <-- no autocompletions here. - */ -pub struct GetCompletionsFilter { - pub cursor_position: TextSize, -} -impl StatementFilter<'_> for GetCompletionsFilter { - fn predicate(&self, _id: &StatementId, range: &TextRange, content: &str) -> bool { - let is_terminated_by_semi = content.chars().last().is_some_and(|c| c == ';'); - - let measuring_range = if is_terminated_by_semi { - *range - } else { - range.checked_expand_end(2.into()).unwrap_or(*range) - }; - measuring_range.contains(self.cursor_position) - } -} - -pub struct NoFilter; -impl StatementFilter<'_> for NoFilter { - fn predicate(&self, _id: &StatementId, _range: &TextRange, _content: &str) -> bool { - true - } -} - -pub struct CursorPositionFilter { - pos: TextSize, -} - -impl CursorPositionFilter { - pub fn new(pos: TextSize) -> Self { - Self { pos } - } -} - -impl StatementFilter<'_> for CursorPositionFilter { - fn predicate(&self, _id: &StatementId, range: &TextRange, _content: &str) -> bool { - range.contains(self.pos) - } -} - -pub struct IdFilter { - id: StatementId, -} - -impl IdFilter { - pub fn new(id: StatementId) -> Self { - Self { id } - } -} - -impl StatementFilter<'_> for IdFilter { - fn predicate(&self, id: &StatementId, _range: &TextRange, _content: &str) -> bool { - *id == self.id - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use pgt_fs::PgTPath; - - #[test] - fn sql_function_body() { - let input = "CREATE FUNCTION add(test0 integer, test1 integer) RETURNS integer - AS 'select $1 + $2;' - LANGUAGE SQL - IMMUTABLE - RETURNS NULL ON NULL INPUT;"; - - let path = PgTPath::new("test.sql"); - - let d = ParsedDocument::new(path, input.to_string(), 0); - - let stmts = d.iter(DefaultMapper).collect::>(); - - assert_eq!(stmts.len(), 2); - assert_eq!(stmts[1].2, "select $1 + $2;"); - } -} diff --git a/crates/pgt_workspace/src/workspace/server/pg_query.rs b/crates/pgt_workspace/src/workspace/server/pg_query.rs index e5c0cac8..45af96e7 100644 --- a/crates/pgt_workspace/src/workspace/server/pg_query.rs +++ b/crates/pgt_workspace/src/workspace/server/pg_query.rs @@ -17,22 +17,13 @@ impl PgQueryStore { pub fn get_or_cache_ast( &self, statement: &StatementId, - content: &str, ) -> Arc> { if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { return existing; } - let r = Arc::new(pgt_query_ext::parse(content).map_err(SyntaxDiagnostic::from)); + let r = Arc::new(pgt_query_ext::parse(statement.content()).map_err(SyntaxDiagnostic::from)); self.db.insert(statement.clone(), r.clone()); r } - - pub fn clear_statement(&self, id: &StatementId) { - self.db.remove(id); - - if let Some(child_id) = id.get_child_id() { - self.db.remove(&child_id); - } - } } diff --git a/crates/pgt_workspace/src/workspace/server/statement_identifier.rs b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs index 627ff261..59259690 100644 --- a/crates/pgt_workspace/src/workspace/server/statement_identifier.rs +++ b/crates/pgt_workspace/src/workspace/server/statement_identifier.rs @@ -1,24 +1,6 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] -pub struct RootId { - inner: usize, -} +use std::sync::Arc; -#[cfg(test)] -impl From for usize { - fn from(val: RootId) -> Self { - val.inner - } -} - -#[cfg(test)] -impl From for RootId { - fn from(inner: usize) -> Self { - RootId { inner } - } -} +use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] @@ -35,91 +17,82 @@ impl From for RootId { /// ``` /// /// For now, we only support SQL functions – no complex, nested statements. -/// -/// An SQL function only ever has ONE child, that's why the inner `RootId` of a `Root` -/// is the same as the one of its `Child`. pub enum StatementId { - Root(RootId), - // StatementId is the same as the root id since we can only have a single sql function body per Root - Child(RootId), + Root { + content: Arc, + }, + Child { + content: Arc, // child's actual content + parent_content: Arc, // parent's content for lookups + }, } +// this is only here for strum to work on the code actions enum impl Default for StatementId { fn default() -> Self { - StatementId::Root(RootId { inner: 0 }) + StatementId::Root { content: "".into() } } } impl StatementId { - pub fn raw(&self) -> usize { - match self { - StatementId::Root(s) => s.inner, - StatementId::Child(s) => s.inner, + pub fn new(statement: &str) -> Self { + StatementId::Root { + content: statement.into(), } } - pub fn is_root(&self) -> bool { - matches!(self, StatementId::Root(_)) - } - - pub fn is_child(&self) -> bool { - matches!(self, StatementId::Child(_)) - } - - pub fn is_child_of(&self, maybe_parent: &StatementId) -> bool { + /// Use this if you need to create a matching `StatementId::Child` for `Root`. + /// You cannot create a `Child` of a `Child`. + /// Note: This method requires the child content to be provided. + pub fn create_child(&self, child_content: &str) -> StatementId { match self { - StatementId::Root(_) => false, - StatementId::Child(child_root) => match maybe_parent { - StatementId::Root(parent_rood) => child_root == parent_rood, - // TODO: can we have multiple nested statements? - StatementId::Child(_) => false, + StatementId::Root { content } => StatementId::Child { + content: child_content.into(), + parent_content: content.clone(), }, + StatementId::Child { .. } => panic!("Cannot create child from a child statement id"), } } - pub fn parent(&self) -> Option { + pub fn content(&self) -> &str { match self { - StatementId::Root(_) => None, - StatementId::Child(id) => Some(StatementId::Root(id.clone())), + StatementId::Root { content } => content, + StatementId::Child { content, .. } => content, } } -} -/// Helper struct to generate unique statement ids -pub struct StatementIdGenerator { - next_id: usize, -} + /// Returns the parent content if this is a child statement + pub fn parent_content(&self) -> Option<&str> { + match self { + StatementId::Root { .. } => None, + StatementId::Child { parent_content, .. } => Some(parent_content), + } + } -impl StatementIdGenerator { - pub fn new() -> Self { - Self { next_id: 0 } + pub fn is_root(&self) -> bool { + matches!(self, StatementId::Root { .. }) } - pub fn next(&mut self) -> StatementId { - let id = self.next_id; - self.next_id += 1; - StatementId::Root(RootId { inner: id }) + pub fn is_child(&self) -> bool { + matches!(self, StatementId::Child { .. }) } -} -impl StatementId { - /// Use this to get the matching `StatementId::Child` for - /// a `StatementId::Root`. - /// If the `StatementId` was already a `Child`, this will return `None`. - /// It is not guaranteed that the `Root` actually has a `Child` statement in the workspace. - pub fn get_child_id(&self) -> Option { + pub fn is_child_of(&self, maybe_parent: &StatementId) -> bool { match self { - StatementId::Root(id) => Some(StatementId::Child(RootId { inner: id.inner })), - StatementId::Child(_) => None, + StatementId::Root { .. } => false, + StatementId::Child { parent_content, .. } => match maybe_parent { + StatementId::Root { content } => parent_content == content, + StatementId::Child { .. } => false, + }, } } - /// Use this if you need to create a matching `StatementId::Child` for `Root`. - /// You cannot create a `Child` of a `Child`. - pub fn create_child(&self) -> StatementId { + pub fn parent(&self) -> Option { match self { - StatementId::Root(id) => StatementId::Child(RootId { inner: id.inner }), - StatementId::Child(_) => panic!("Cannot create child from a child statement id"), + StatementId::Root { .. } => None, + StatementId::Child { parent_content, .. } => Some(StatementId::Root { + content: parent_content.clone(), + }), } } } diff --git a/crates/pgt_workspace/src/workspace/server/tree_sitter.rs b/crates/pgt_workspace/src/workspace/server/tree_sitter.rs index a8932535..2cd73133 100644 --- a/crates/pgt_workspace/src/workspace/server/tree_sitter.rs +++ b/crates/pgt_workspace/src/workspace/server/tree_sitter.rs @@ -1,9 +1,8 @@ use std::sync::{Arc, Mutex}; use dashmap::DashMap; -use tree_sitter::InputEdit; -use super::{change::ModifiedStatement, statement_identifier::StatementId}; +use super::statement_identifier::StatementId; pub struct TreeSitterStore { db: DashMap>, @@ -23,139 +22,15 @@ impl TreeSitterStore { } } - pub fn get_or_cache_tree( - &self, - statement: &StatementId, - content: &str, - ) -> Arc { + pub fn get_or_cache_tree(&self, statement: &StatementId) -> Arc { if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { return existing; } let mut parser = self.parser.lock().expect("Failed to lock parser"); - let tree = Arc::new(parser.parse(content, None).unwrap()); + let tree = Arc::new(parser.parse(statement.content(), None).unwrap()); self.db.insert(statement.clone(), tree.clone()); tree } - - pub fn add_statement(&self, statement: &StatementId, content: &str) { - let mut parser = self.parser.lock().expect("Failed to lock parser"); - let tree = parser.parse(content, None).unwrap(); - self.db.insert(statement.clone(), Arc::new(tree)); - } - - pub fn remove_statement(&self, id: &StatementId) { - self.db.remove(id); - - if let Some(child_id) = id.get_child_id() { - self.db.remove(&child_id); - } - } - - pub fn modify_statement(&self, change: &ModifiedStatement) { - let old = self.db.remove(&change.old_stmt); - - if old.is_none() { - self.add_statement(&change.new_stmt, &change.change_text); - return; - } - - // we clone the three for now, lets see if that is sufficient or if we need to mutate the - // original tree instead but that will require some kind of locking - let mut tree = old.unwrap().1.as_ref().clone(); - - let edit = edit_from_change( - change.old_stmt_text.as_str(), - usize::from(change.change_range.start()), - usize::from(change.change_range.end()), - change.change_text.as_str(), - ); - - tree.edit(&edit); - - let mut parser = self.parser.lock().expect("Failed to lock parser"); - // todo handle error - self.db.insert( - change.new_stmt.clone(), - Arc::new(parser.parse(&change.new_stmt_text, Some(&tree)).unwrap()), - ); - } -} - -// Converts character positions and replacement text into a tree-sitter InputEdit -pub(crate) fn edit_from_change( - text: &str, - start_char: usize, - end_char: usize, - replacement_text: &str, -) -> InputEdit { - let mut start_byte = 0; - let mut end_byte = 0; - let mut chars_counted = 0; - - let mut line = 0; - let mut current_line_char_start = 0; // Track start of the current line in characters - let mut column_start = 0; - let mut column_end = 0; - - // Find the byte positions corresponding to the character positions - for (idx, c) in text.char_indices() { - if chars_counted == start_char { - start_byte = idx; - column_start = chars_counted - current_line_char_start; - } - if chars_counted == end_char { - end_byte = idx; - column_end = chars_counted - current_line_char_start; - break; // Found both start and end - } - if c == '\n' { - line += 1; - current_line_char_start = chars_counted + 1; // Next character starts a new line - } - chars_counted += 1; - } - - // Handle case where end_char is at the end of the text - if end_char == chars_counted && end_byte == 0 { - end_byte = text.len(); - column_end = chars_counted - current_line_char_start; - } - - let start_point = tree_sitter::Point::new(line, column_start); - let old_end_point = tree_sitter::Point::new(line, column_end); - - // Calculate the new end byte after the edit - let new_end_byte = start_byte + replacement_text.len(); - - // Calculate the new end position - let new_lines = replacement_text.matches('\n').count(); - let last_line_length = if new_lines > 0 { - replacement_text - .split('\n') - .next_back() - .unwrap_or("") - .chars() - .count() - } else { - replacement_text.chars().count() - }; - - let new_end_position = if new_lines > 0 { - // If there are new lines, the row is offset by the number of new lines, and the column is the length of the last line - tree_sitter::Point::new(start_point.row + new_lines, last_line_length) - } else { - // If there are no new lines, the row remains the same, and the column is offset by the length of the insertion - tree_sitter::Point::new(start_point.row, start_point.column + last_line_length) - }; - - InputEdit { - start_byte, - old_end_byte: end_byte, - new_end_byte, - start_position: start_point, - old_end_position: old_end_point, - new_end_position, - } } From c06ab001f9112ab14f312213cf3d6b4c3ad48eab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sat, 12 Jul 2025 20:45:03 +0200 Subject: [PATCH 087/114] refactor: simplify caches (#448) --- Cargo.lock | 22 ++++++++- crates/pgt_workspace/Cargo.toml | 2 +- crates/pgt_workspace/src/workspace/server.rs | 42 +++++++++-------- .../src/workspace/server/annotation.rs | 23 ++++++--- .../workspace/server/connection_manager.rs | 28 ++++++++--- .../src/workspace/server/pg_query.rs | 23 ++++++--- .../workspace/server/schema_cache_manager.rs | 47 ++++++++++--------- .../src/workspace/server/tree_sitter.rs | 30 +++++++++--- 8 files changed, 147 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a89dbfbe..4da985a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1454,6 +1454,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1731,6 +1737,11 @@ name = "hashbrown" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] [[package]] name = "hashlink" @@ -2253,6 +2264,15 @@ dependencies = [ "logos-codegen", ] +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.2", +] + [[package]] name = "lsp-types" version = "0.94.1" @@ -3066,10 +3086,10 @@ dependencies = [ "biome_js_factory", "biome_js_syntax", "biome_rowan", - "dashmap 5.5.3", "futures", "globset", "ignore", + "lru", "pgt_analyse", "pgt_analyser", "pgt_completions", diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index 6b0cc065..f535e505 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -13,9 +13,9 @@ version = "0.0.0" [dependencies] biome_deserialize = "0.6.0" -dashmap = "5.5.3" futures = "0.3.31" globset = "0.4.16" +lru = "0.12" ignore = { workspace = true } pgt_analyse = { workspace = true, features = ["serde"] } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 81aa99ab..f7ace3c2 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -1,4 +1,5 @@ use std::{ + collections::HashMap, fs, panic::RefUnwindSafe, path::{Path, PathBuf}, @@ -8,7 +9,6 @@ use std::{ use analyser::AnalyserVisitorBuilder; use async_helper::run_async; use connection_manager::ConnectionManager; -use dashmap::DashMap; use document::{ AsyncDiagnosticsMapper, CursorPositionFilter, DefaultMapper, Document, ExecuteStatementMapper, SyncDiagnosticsMapper, @@ -67,7 +67,7 @@ pub(super) struct WorkspaceServer { /// Stores the schema cache for this workspace schema_cache: SchemaCacheManager, - documents: DashMap, + documents: RwLock>, connection: ConnectionManager, } @@ -89,7 +89,7 @@ impl WorkspaceServer { pub(crate) fn new() -> Self { Self { settings: RwLock::default(), - documents: DashMap::default(), + documents: RwLock::new(HashMap::new()), schema_cache: SchemaCacheManager::new(), connection: ConnectionManager::new(), } @@ -262,7 +262,8 @@ impl Workspace for WorkspaceServer { /// Add a new file to the workspace #[tracing::instrument(level = "info", skip_all, fields(path = params.path.as_path().as_os_str().to_str()), err)] fn open_file(&self, params: OpenFileParams) -> Result<(), WorkspaceError> { - self.documents + let mut documents = self.documents.write().unwrap(); + documents .entry(params.path.clone()) .or_insert_with(|| Document::new(params.content, params.version)); @@ -275,7 +276,8 @@ impl Workspace for WorkspaceServer { /// Remove a file from the workspace fn close_file(&self, params: super::CloseFileParams) -> Result<(), WorkspaceError> { - self.documents + let mut documents = self.documents.write().unwrap(); + documents .remove(¶ms.path) .ok_or_else(WorkspaceError::not_found)?; @@ -288,13 +290,15 @@ impl Workspace for WorkspaceServer { version = params.version ), err)] fn change_file(&self, params: super::ChangeFileParams) -> Result<(), WorkspaceError> { - match self.documents.entry(params.path.clone()) { - dashmap::mapref::entry::Entry::Occupied(mut entry) => { + let mut documents = self.documents.write().unwrap(); + + match documents.entry(params.path.clone()) { + std::collections::hash_map::Entry::Occupied(mut entry) => { entry .get_mut() .update_content(params.content, params.version); } - dashmap::mapref::entry::Entry::Vacant(entry) => { + std::collections::hash_map::Entry::Vacant(entry) => { entry.insert(Document::new(params.content, params.version)); } } @@ -307,8 +311,8 @@ impl Workspace for WorkspaceServer { } fn get_file_content(&self, params: GetFileContentParams) -> Result { - let document = self - .documents + let documents = self.documents.read().unwrap(); + let document = documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; Ok(document.get_document_content().to_string()) @@ -322,8 +326,8 @@ impl Workspace for WorkspaceServer { &self, params: code_actions::CodeActionsParams, ) -> Result { - let parser = self - .documents + let documents = self.documents.read().unwrap(); + let parser = documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; @@ -364,8 +368,8 @@ impl Workspace for WorkspaceServer { &self, params: ExecuteStatementParams, ) -> Result { - let parser = self - .documents + let documents = self.documents.read().unwrap(); + let parser = documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; @@ -422,8 +426,8 @@ impl Workspace for WorkspaceServer { } }; - let doc = self - .documents + let documents = self.documents.read().unwrap(); + let doc = documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; @@ -607,8 +611,8 @@ impl Workspace for WorkspaceServer { &self, params: GetCompletionsParams, ) -> Result { - let parsed_doc = self - .documents + let documents = self.documents.read().unwrap(); + let parsed_doc = documents .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; @@ -621,7 +625,7 @@ impl Workspace for WorkspaceServer { let schema_cache = self.schema_cache.load(pool)?; - match get_statement_for_completions(&parsed_doc, params.position) { + match get_statement_for_completions(parsed_doc, params.position) { None => { tracing::debug!("No statement found."); Ok(CompletionsResult::default()) diff --git a/crates/pgt_workspace/src/workspace/server/annotation.rs b/crates/pgt_workspace/src/workspace/server/annotation.rs index 20710521..0ff6cc0a 100644 --- a/crates/pgt_workspace/src/workspace/server/annotation.rs +++ b/crates/pgt_workspace/src/workspace/server/annotation.rs @@ -1,17 +1,20 @@ -use std::sync::Arc; +use std::num::NonZeroUsize; +use std::sync::{Arc, Mutex}; -use dashmap::DashMap; +use lru::LruCache; use pgt_lexer::SyntaxKind; use super::statement_identifier::StatementId; +const DEFAULT_CACHE_SIZE: usize = 1000; + #[derive(Debug, Clone, PartialEq, Eq)] pub struct StatementAnnotations { ends_with_semicolon: bool, } pub struct AnnotationStore { - db: DashMap>, + db: Mutex>>, } const WHITESPACE_TOKENS: [SyntaxKind; 6] = [ @@ -25,7 +28,11 @@ const WHITESPACE_TOKENS: [SyntaxKind; 6] = [ impl AnnotationStore { pub fn new() -> AnnotationStore { - AnnotationStore { db: DashMap::new() } + AnnotationStore { + db: Mutex::new(LruCache::new( + NonZeroUsize::new(DEFAULT_CACHE_SIZE).unwrap(), + )), + } } #[allow(unused)] @@ -34,8 +41,10 @@ impl AnnotationStore { statement_id: &StatementId, content: &str, ) -> Arc { - if let Some(existing) = self.db.get(statement_id).map(|x| x.clone()) { - return existing; + let mut cache = self.db.lock().unwrap(); + + if let Some(existing) = cache.get(statement_id) { + return existing.clone(); } let lexed = pgt_lexer::lex(content); @@ -51,7 +60,7 @@ impl AnnotationStore { ends_with_semicolon, }); - self.db.insert(statement_id.clone(), annotations.clone()); + cache.put(statement_id.clone(), annotations.clone()); annotations } diff --git a/crates/pgt_workspace/src/workspace/server/connection_manager.rs b/crates/pgt_workspace/src/workspace/server/connection_manager.rs index d21988f0..8955b378 100644 --- a/crates/pgt_workspace/src/workspace/server/connection_manager.rs +++ b/crates/pgt_workspace/src/workspace/server/connection_manager.rs @@ -1,6 +1,7 @@ +use std::collections::HashMap; +use std::sync::RwLock; use std::time::{Duration, Instant}; -use dashmap::DashMap; use sqlx::{PgPool, Postgres, pool::PoolOptions, postgres::PgConnectOptions}; use crate::settings::DatabaseSettings; @@ -16,13 +17,13 @@ struct CachedPool { #[derive(Default)] pub struct ConnectionManager { - pools: DashMap, + pools: RwLock>, } impl ConnectionManager { pub fn new() -> Self { Self { - pools: DashMap::new(), + pools: RwLock::new(HashMap::new()), } } @@ -41,8 +42,19 @@ impl ConnectionManager { return None; } - // If we have a cached pool, update its last_accessed time and return it - if let Some(mut cached_pool) = self.pools.get_mut(&key) { + // Try read lock first for cache hit + if let Ok(pools) = self.pools.read() { + if let Some(cached_pool) = pools.get(&key) { + // Can't update last_accessed with read lock, but that's okay for occasional misses + return Some(cached_pool.pool.clone()); + } + } + + // Cache miss or need to update timestamp - use write lock + let mut pools = self.pools.write().unwrap(); + + // Double-check after acquiring write lock + if let Some(cached_pool) = pools.get_mut(&key) { cached_pool.last_accessed = Instant::now(); return Some(cached_pool.pool.clone()); } @@ -69,7 +81,7 @@ impl ConnectionManager { idle_timeout: Duration::from_secs(60 * 5), }; - self.pools.insert(key, cached_pool); + pools.insert(key, cached_pool); Some(pool) } @@ -78,8 +90,10 @@ impl ConnectionManager { fn cleanup_idle_pools(&self, ignore_key: &ConnectionKey) { let now = Instant::now(); + let mut pools = self.pools.write().unwrap(); + // Use retain to keep only non-idle connections - self.pools.retain(|key, cached_pool| { + pools.retain(|key, cached_pool| { let idle_duration = now.duration_since(cached_pool.last_accessed); if idle_duration > cached_pool.idle_timeout && key != ignore_key { tracing::debug!( diff --git a/crates/pgt_workspace/src/workspace/server/pg_query.rs b/crates/pgt_workspace/src/workspace/server/pg_query.rs index 45af96e7..6f1fa2c1 100644 --- a/crates/pgt_workspace/src/workspace/server/pg_query.rs +++ b/crates/pgt_workspace/src/workspace/server/pg_query.rs @@ -1,29 +1,38 @@ -use std::sync::Arc; +use std::num::NonZeroUsize; +use std::sync::{Arc, Mutex}; -use dashmap::DashMap; +use lru::LruCache; use pgt_query_ext::diagnostics::*; use super::statement_identifier::StatementId; +const DEFAULT_CACHE_SIZE: usize = 1000; + pub struct PgQueryStore { - db: DashMap>>, + db: Mutex>>>, } impl PgQueryStore { pub fn new() -> PgQueryStore { - PgQueryStore { db: DashMap::new() } + PgQueryStore { + db: Mutex::new(LruCache::new( + NonZeroUsize::new(DEFAULT_CACHE_SIZE).unwrap(), + )), + } } pub fn get_or_cache_ast( &self, statement: &StatementId, ) -> Arc> { - if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { - return existing; + let mut cache = self.db.lock().unwrap(); + + if let Some(existing) = cache.get(statement) { + return existing.clone(); } let r = Arc::new(pgt_query_ext::parse(statement.content()).map_err(SyntaxDiagnostic::from)); - self.db.insert(statement.clone(), r.clone()); + cache.put(statement.clone(), r.clone()); r } } diff --git a/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs b/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs index b42dfc34..007ebb78 100644 --- a/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs +++ b/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs @@ -1,6 +1,6 @@ -use std::sync::Arc; +use std::collections::HashMap; +use std::sync::{Arc, RwLock}; -use dashmap::DashMap; use pgt_schema_cache::SchemaCache; use sqlx::PgPool; @@ -10,38 +10,41 @@ use super::{async_helper::run_async, connection_key::ConnectionKey}; #[derive(Default)] pub struct SchemaCacheManager { - schemas: DashMap>, + schemas: RwLock>>, } impl SchemaCacheManager { pub fn new() -> Self { Self { - schemas: DashMap::new(), + schemas: RwLock::new(HashMap::new()), } } pub fn load(&self, pool: PgPool) -> Result, WorkspaceError> { let key: ConnectionKey = (&pool).into(); - if let Some(cache) = self.schemas.get(&key) { - return Ok(Arc::clone(&*cache)); + // Try read lock first for cache hit + if let Ok(schemas) = self.schemas.read() { + if let Some(cache) = schemas.get(&key) { + return Ok(Arc::clone(cache)); + } } - let schema_cache = self - .schemas - .entry(key) - .or_try_insert_with::(|| { - // This closure will only be called once per key if multiple threads - // try to access the same key simultaneously - let pool_clone = pool.clone(); - let schema_cache = - Arc::new(run_async( - async move { SchemaCache::load(&pool_clone).await }, - )??); - - Ok(schema_cache) - })?; - - Ok(Arc::clone(&schema_cache)) + // Cache miss - need write lock to insert + let mut schemas = self.schemas.write().unwrap(); + + // Double-check after acquiring write lock + if let Some(cache) = schemas.get(&key) { + return Ok(Arc::clone(cache)); + } + + // Load schema cache + let pool_clone = pool.clone(); + let schema_cache = Arc::new(run_async( + async move { SchemaCache::load(&pool_clone).await }, + )??); + + schemas.insert(key, schema_cache.clone()); + Ok(schema_cache) } } diff --git a/crates/pgt_workspace/src/workspace/server/tree_sitter.rs b/crates/pgt_workspace/src/workspace/server/tree_sitter.rs index 2cd73133..b8f62b63 100644 --- a/crates/pgt_workspace/src/workspace/server/tree_sitter.rs +++ b/crates/pgt_workspace/src/workspace/server/tree_sitter.rs @@ -1,11 +1,14 @@ +use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; -use dashmap::DashMap; +use lru::LruCache; use super::statement_identifier::StatementId; +const DEFAULT_CACHE_SIZE: usize = 1000; + pub struct TreeSitterStore { - db: DashMap>, + db: Mutex>>, parser: Mutex, } @@ -17,20 +20,35 @@ impl TreeSitterStore { .expect("Error loading sql language"); TreeSitterStore { - db: DashMap::new(), + db: Mutex::new(LruCache::new( + NonZeroUsize::new(DEFAULT_CACHE_SIZE).unwrap(), + )), parser: Mutex::new(parser), } } pub fn get_or_cache_tree(&self, statement: &StatementId) -> Arc { - if let Some(existing) = self.db.get(statement).map(|x| x.clone()) { - return existing; + let mut cache = self.db.lock().expect("Failed to lock cache"); + + if let Some(existing) = cache.get(statement) { + return existing.clone(); } + // Cache miss - drop cache lock, parse, then re-acquire to insert + drop(cache); + let mut parser = self.parser.lock().expect("Failed to lock parser"); let tree = Arc::new(parser.parse(statement.content(), None).unwrap()); - self.db.insert(statement.clone(), tree.clone()); + drop(parser); + + let mut cache = self.db.lock().expect("Failed to lock cache"); + + // Double-check after re-acquiring lock + if let Some(existing) = cache.get(statement) { + return existing.clone(); + } + cache.put(statement.clone(), tree.clone()); tree } } From 9f480dcff246018a7b1f2cacfc47188b396e418f Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sun, 13 Jul 2025 14:49:53 +0200 Subject: [PATCH 088/114] chore(docs): fix schema links (#451) --- docs/index.md | 4 +++- postgrestools.jsonc | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/index.md b/docs/index.md index 14490385..842d935e 100644 --- a/docs/index.md +++ b/docs/index.md @@ -90,7 +90,7 @@ You’ll now have a `postgrestools.jsonc` file in your directory: ```json { - "$schema": "https://pgtools.dev/schemas/0.0.0/schema.json", + "$schema": "https://pgtools.dev/latest/schema.json", "vcs": { "enabled": false, "clientKind": "git", @@ -121,6 +121,8 @@ You’ll now have a `postgrestools.jsonc` file in your directory: Make sure to edit the database connection settings to connect to your local development database. To see all options, run `postgrestools --help`. +You can use your current `postgrestools` version instead of "latest" in the `$schema` URL, e.g. `https://pgtools.dev/0.8.1/schema.json`. + ## Usage You can use Postgres Tools via the command line or a using a code editor that supports an LSP. diff --git a/postgrestools.jsonc b/postgrestools.jsonc index 325c7861..47d08c72 100644 --- a/postgrestools.jsonc +++ b/postgrestools.jsonc @@ -1,5 +1,5 @@ { - "$schema": "./docs/schemas/latest/schema.json", + "$schema": "./docs/schema.json", "vcs": { "enabled": false, "clientKind": "git", From 549248dd2ba028e347f7b62520ab9a09348fcad9 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sun, 13 Jul 2025 15:29:58 +0200 Subject: [PATCH 089/114] chore: fix doc command (#450) --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 842d935e..328e7c77 100644 --- a/docs/index.md +++ b/docs/index.md @@ -157,7 +157,7 @@ postgrestools start Then, every command needs to add the `--use-server` options, e.g.: ```sh -echo "select 1" | biome check --use-server --stdin-file-path=dummy.sql +echo "select 1" | postgrestools check --use-server --stdin-file-path=dummy.sql ``` #### Daemon logs From e3fd6b48b76fa4ef800f11e6efadfb69b0ccf8a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Tue, 15 Jul 2025 08:13:43 +0200 Subject: [PATCH 090/114] feat: plpgsql syntax errors (#452) Screenshot 2025-07-14 at 20 38 14 ToDo: - [x] tests - [x] fix range of returned diagnostic --- crates/pgt_lsp/tests/server.rs | 81 +++++ crates/pgt_query_ext/src/diagnostics.rs | 10 + crates/pgt_query_ext/src/lib.rs | 35 +++ crates/pgt_workspace/src/workspace/server.rs | 8 +- .../src/workspace/server/document.rs | 282 +++++++++++++++++- .../src/workspace/server/function_utils.rs | 57 ++++ .../src/workspace/server/pg_query.rs | 198 +++++++++++- .../src/workspace/server/sql_function.rs | 60 +--- test.sql | 15 + 9 files changed, 680 insertions(+), 66 deletions(-) create mode 100644 crates/pgt_workspace/src/workspace/server/function_utils.rs diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 96ff566c..353e80ae 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -1678,3 +1678,84 @@ ALTER TABLE ONLY "public"."campaign_contact_list" Ok(()) } + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_plpgsql(test_db: PgPool) -> Result<()> { + let factory = ServerFactory::default(); + let mut fs = MemoryFileSystem::default(); + + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + fs.insert( + url!("postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf).unwrap(), + ); + + let (service, client) = factory + .create_with_fs(None, DynRef::Owned(Box::new(fs))) + .into_inner(); + + let (stream, sink) = client.split(); + let mut server = Server::new(service); + + let (sender, mut receiver) = channel(CHANNEL_BUFFER_SIZE); + let reader = tokio::spawn(client_handler(stream, sink, sender)); + + server.initialize().await?; + server.initialized().await?; + + server.load_configuration().await?; + + let initial_content = r#" +create function test_organisation_id () + returns setof text + language plpgsql + security invoker + as $$ + declre + v_organisation_id uuid; +begin + return next is(private.organisation_id(), v_organisation_id, 'should return organisation_id of token'); +end +$$; +"#; + + server.open_document(initial_content).await?; + + let notification = tokio::time::timeout(Duration::from_secs(5), async { + loop { + match receiver.next().await { + Some(ServerNotification::PublishDiagnostics(msg)) => { + if msg.diagnostics.iter().any(|d| { + d.message + .contains("Invalid statement: syntax error at or near \"declre\"") + }) { + return true; + } + } + _ => continue, + } + } + }) + .await + .is_ok(); + + assert!(notification, "expected diagnostics for unknown column"); + + server.shutdown().await?; + reader.abort(); + + Ok(()) +} diff --git a/crates/pgt_query_ext/src/diagnostics.rs b/crates/pgt_query_ext/src/diagnostics.rs index aa16db81..7e3f0a37 100644 --- a/crates/pgt_query_ext/src/diagnostics.rs +++ b/crates/pgt_query_ext/src/diagnostics.rs @@ -15,6 +15,16 @@ pub struct SyntaxDiagnostic { pub message: MessageAndDescription, } +impl SyntaxDiagnostic { + /// Create a new syntax diagnostic with the given message and optional span. + pub fn new(message: impl Into, span: Option) -> Self { + SyntaxDiagnostic { + span, + message: MessageAndDescription::from(message.into()), + } + } +} + impl From for SyntaxDiagnostic { fn from(err: pg_query::Error) -> Self { SyntaxDiagnostic { diff --git a/crates/pgt_query_ext/src/lib.rs b/crates/pgt_query_ext/src/lib.rs index a087ec60..5882a778 100644 --- a/crates/pgt_query_ext/src/lib.rs +++ b/crates/pgt_query_ext/src/lib.rs @@ -25,3 +25,38 @@ pub fn parse(sql: &str) -> Result { .ok_or_else(|| Error::Parse("Unable to find root node".to_string())) })? } + +/// This function parses a PL/pgSQL function. +/// +/// It expects the entire `CREATE FUNCTION` statement. +pub fn parse_plpgsql(sql: &str) -> Result<()> { + // we swallow the error until we have a proper binding + let _ = pg_query::parse_plpgsql(sql)?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_plpgsql_err() { + let input = " +create function test_organisation_id () + returns setof text + language plpgsql + security invoker + as $$ + -- syntax error here + decare + v_organisation_id uuid; +begin + select 1; +end +$$; + "; + + assert!(parse_plpgsql(input).is_err()); + } +} diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index f7ace3c2..399f2ec6 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -53,6 +53,7 @@ mod async_helper; mod connection_key; mod connection_manager; pub(crate) mod document; +mod function_utils; mod migration; mod pg_query; mod schema_cache_manager; @@ -528,7 +529,7 @@ impl Workspace for WorkspaceServer { diagnostics.extend( doc.iter(SyncDiagnosticsMapper) - .flat_map(|(_id, range, ast, diag)| { + .flat_map(|(range, ast, diag)| { let mut errors: Vec = vec![]; if let Some(diag) = diag { @@ -560,9 +561,12 @@ impl Workspace for WorkspaceServer { }, ); + // adjust the span of the diagnostics to the statement (if it has one) + let span = d.location().span.map(|s| s + range.start()); + SDiagnostic::new( d.with_file_path(params.path.as_path().display().to_string()) - .with_file_span(range) + .with_file_span(span.unwrap_or(range)) .with_severity(severity), ) }) diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index f8ab639d..9d3700df 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -243,7 +243,6 @@ impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { pub struct SyncDiagnosticsMapper; impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { type Output = ( - StatementId, TextRange, Option, Option, @@ -253,11 +252,18 @@ impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { let ast_result = parser.ast_db.get_or_cache_ast(&id); let (ast_option, diagnostics) = match &*ast_result { - Ok(node) => (Some(node.clone()), None), + Ok(node) => { + let plpgsql_result = parser.ast_db.get_or_cache_plpgsql_parse(&id); + if let Some(Err(diag)) = plpgsql_result { + (Some(node.clone()), Some(diag.clone())) + } else { + (Some(node.clone()), None) + } + } Err(diag) => (None, Some(diag.clone())), }; - (id.clone(), range, ast_option, diagnostics) + (range, ast_option, diagnostics) } } @@ -379,4 +385,274 @@ mod tests { assert_eq!(stmts.len(), 2); assert_eq!(stmts[1].2, "select $1 + $2;"); } + + #[test] + fn test_sync_diagnostics_mapper_plpgsql_syntax_error() { + let input = " +CREATE FUNCTION test_func() + RETURNS void + LANGUAGE plpgsql + AS $$ +BEGIN + -- syntax error: missing semicolon and typo + DECLAR x integer + x := 10; +END; +$$;"; + + let d = Document::new(input.to_string(), 1); + let results = d.iter(SyncDiagnosticsMapper).collect::>(); + + assert_eq!(results.len(), 1); + let (_range, ast, diagnostic) = &results[0]; + + // Should have parsed the CREATE FUNCTION statement + assert!(ast.is_some()); + + // Should have a PL/pgSQL syntax error + assert!(diagnostic.is_some()); + assert_eq!( + format!("{:?}", diagnostic.as_ref().unwrap().message), + "Invalid statement: syntax error at or near \"DECLAR\"" + ); + } + + #[test] + fn test_sync_diagnostics_mapper_plpgsql_valid() { + let input = " +CREATE FUNCTION valid_func() + RETURNS integer + LANGUAGE plpgsql + AS $$ +DECLARE + x integer := 5; +BEGIN + RETURN x * 2; +END; +$$;"; + + let d = Document::new(input.to_string(), 1); + let results = d.iter(SyncDiagnosticsMapper).collect::>(); + + assert_eq!(results.len(), 1); + let (_range, ast, diagnostic) = &results[0]; + + // Should have parsed the CREATE FUNCTION statement + assert!(ast.is_some()); + + // Should NOT have any PL/pgSQL syntax errors + assert!(diagnostic.is_none()); + } + + #[test] + fn test_sync_diagnostics_mapper_plpgsql_caching() { + let input = " +CREATE FUNCTION cached_func() + RETURNS void + LANGUAGE plpgsql + AS $$ +BEGIN + RAISE NOTICE 'Testing cache'; +END; +$$;"; + + let d = Document::new(input.to_string(), 1); + + let results1 = d.iter(SyncDiagnosticsMapper).collect::>(); + assert_eq!(results1.len(), 1); + assert!(results1[0].1.is_some()); + assert!(results1[0].2.is_none()); + + let results2 = d.iter(SyncDiagnosticsMapper).collect::>(); + assert_eq!(results2.len(), 1); + assert!(results2[0].1.is_some()); + assert!(results2[0].2.is_none()); + } + + #[test] + fn test_default_mapper() { + let input = "SELECT 1; INSERT INTO users VALUES (1);"; + let d = Document::new(input.to_string(), 1); + + let results = d.iter(DefaultMapper).collect::>(); + assert_eq!(results.len(), 2); + + assert_eq!(results[0].2, "SELECT 1;"); + assert_eq!(results[1].2, "INSERT INTO users VALUES (1);"); + + assert_eq!(results[0].1.start(), 0.into()); + assert_eq!(results[0].1.end(), 9.into()); + assert_eq!(results[1].1.start(), 10.into()); + assert_eq!(results[1].1.end(), 39.into()); + } + + #[test] + fn test_execute_statement_mapper() { + let input = "SELECT 1; INVALID SYNTAX HERE;"; + let d = Document::new(input.to_string(), 1); + + let results = d.iter(ExecuteStatementMapper).collect::>(); + assert_eq!(results.len(), 2); + + // First statement should parse successfully + assert_eq!(results[0].2, "SELECT 1;"); + assert!(results[0].3.is_some()); + + // Second statement should fail to parse + assert_eq!(results[1].2, "INVALID SYNTAX HERE;"); + assert!(results[1].3.is_none()); + } + + #[test] + fn test_async_diagnostics_mapper() { + let input = " +CREATE FUNCTION test_fn() RETURNS integer AS $$ +BEGIN + RETURN 42; +END; +$$ LANGUAGE plpgsql;"; + + let d = Document::new(input.to_string(), 1); + let results = d.iter(AsyncDiagnosticsMapper).collect::>(); + + assert_eq!(results.len(), 1); + let (_id, _range, ast, cst, sql_fn_sig) = &results[0]; + + // Should have both AST and CST + assert!(ast.is_some()); + assert_eq!(cst.root_node().kind(), "program"); + + // Should not have SQL function signature for top-level statement + assert!(sql_fn_sig.is_none()); + } + + #[test] + fn test_async_diagnostics_mapper_with_sql_function_body() { + let input = + "CREATE FUNCTION add(a int, b int) RETURNS int AS 'SELECT $1 + $2;' LANGUAGE sql;"; + let d = Document::new(input.to_string(), 1); + + let results = d.iter(AsyncDiagnosticsMapper).collect::>(); + assert_eq!(results.len(), 2); + + // Check the function body + let (_id, _range, ast, _cst, sql_fn_sig) = &results[1]; + assert_eq!(_id.content(), "SELECT $1 + $2;"); + assert!(ast.is_some()); + assert!(sql_fn_sig.is_some()); + + let sig = sql_fn_sig.as_ref().unwrap(); + assert_eq!(sig.name, "add"); + assert_eq!(sig.args.len(), 2); + assert_eq!(sig.args[0].name, Some("a".to_string())); + assert_eq!(sig.args[1].name, Some("b".to_string())); + } + + #[test] + fn test_get_completions_mapper() { + let input = "SELECT * FROM users;"; + let d = Document::new(input.to_string(), 1); + + let results = d.iter(GetCompletionsMapper).collect::>(); + assert_eq!(results.len(), 1); + + let (_id, _range, content, tree) = &results[0]; + assert_eq!(content, "SELECT * FROM users;"); + assert_eq!(tree.root_node().kind(), "program"); + } + + #[test] + fn test_get_completions_filter() { + let input = "SELECT * FROM users; INSERT INTO"; + let d = Document::new(input.to_string(), 1); + + // Test cursor at end of first statement (terminated with semicolon) + let filter1 = GetCompletionsFilter { + cursor_position: 20.into(), + }; + let results1 = d + .iter_with_filter(DefaultMapper, filter1) + .collect::>(); + assert_eq!(results1.len(), 0); // No completions after semicolon + + // Test cursor at end of second statement (not terminated) + let filter2 = GetCompletionsFilter { + cursor_position: 32.into(), + }; + let results2 = d + .iter_with_filter(DefaultMapper, filter2) + .collect::>(); + assert_eq!(results2.len(), 1); + assert_eq!(results2[0].2, "INSERT INTO"); + } + + #[test] + fn test_cursor_position_filter() { + let input = "SELECT 1; INSERT INTO users VALUES (1);"; + let d = Document::new(input.to_string(), 1); + + // Cursor in first statement + let filter1 = CursorPositionFilter::new(5.into()); + let results1 = d + .iter_with_filter(DefaultMapper, filter1) + .collect::>(); + assert_eq!(results1.len(), 1); + assert_eq!(results1[0].2, "SELECT 1;"); + + // Cursor in second statement + let filter2 = CursorPositionFilter::new(25.into()); + let results2 = d + .iter_with_filter(DefaultMapper, filter2) + .collect::>(); + assert_eq!(results2.len(), 1); + assert_eq!(results2[0].2, "INSERT INTO users VALUES (1);"); + } + + #[test] + fn test_id_filter() { + let input = "SELECT 1; SELECT 2;"; + let d = Document::new(input.to_string(), 1); + + // Get all statements first to get their IDs + let all_results = d.iter(DefaultMapper).collect::>(); + assert_eq!(all_results.len(), 2); + + // Filter by first statement ID + let filter = IdFilter::new(all_results[0].0.clone()); + let results = d + .iter_with_filter(DefaultMapper, filter) + .collect::>(); + assert_eq!(results.len(), 1); + assert_eq!(results[0].2, "SELECT 1;"); + } + + #[test] + fn test_no_filter() { + let input = "SELECT 1; SELECT 2; SELECT 3;"; + let d = Document::new(input.to_string(), 1); + + let results = d + .iter_with_filter(DefaultMapper, NoFilter) + .collect::>(); + assert_eq!(results.len(), 3); + } + + #[test] + fn test_find_method() { + let input = "SELECT 1; SELECT 2;"; + let d = Document::new(input.to_string(), 1); + + // Get all statements to get their IDs + let all_results = d.iter(DefaultMapper).collect::>(); + + // Find specific statement + let result = d.find(all_results[1].0.clone(), DefaultMapper); + assert!(result.is_some()); + assert_eq!(result.unwrap().2, "SELECT 2;"); + + // Try to find non-existent statement + let fake_id = StatementId::new("SELECT 3;"); + let result = d.find(fake_id, DefaultMapper); + assert!(result.is_none()); + } } diff --git a/crates/pgt_workspace/src/workspace/server/function_utils.rs b/crates/pgt_workspace/src/workspace/server/function_utils.rs new file mode 100644 index 00000000..cf02ceb1 --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server/function_utils.rs @@ -0,0 +1,57 @@ +/// Helper function to find a specific option value from function options +pub fn find_option_value( + create_fn: &pgt_query_ext::protobuf::CreateFunctionStmt, + option_name: &str, +) -> Option { + create_fn + .options + .iter() + .filter_map(|opt_wrapper| opt_wrapper.node.as_ref()) + .find_map(|opt| { + if let pgt_query_ext::NodeEnum::DefElem(def_elem) = opt { + if def_elem.defname == option_name { + def_elem + .arg + .iter() + .filter_map(|arg_wrapper| arg_wrapper.node.as_ref()) + .find_map(|arg| { + if let pgt_query_ext::NodeEnum::String(s) = arg { + Some(s.sval.clone()) + } else if let pgt_query_ext::NodeEnum::List(l) = arg { + l.items.iter().find_map(|item_wrapper| { + if let Some(pgt_query_ext::NodeEnum::String(s)) = + item_wrapper.node.as_ref() + { + Some(s.sval.clone()) + } else { + None + } + }) + } else { + None + } + }) + } else { + None + } + } else { + None + } + }) +} + +pub fn parse_name(nodes: &[pgt_query_ext::protobuf::Node]) -> Option<(Option, String)> { + let names = nodes + .iter() + .map(|n| match &n.node { + Some(pgt_query_ext::NodeEnum::String(s)) => Some(s.sval.clone()), + _ => None, + }) + .collect::>(); + + match names.as_slice() { + [Some(schema), Some(name)] => Some((Some(schema.clone()), name.clone())), + [Some(name)] => Some((None, name.clone())), + _ => None, + } +} diff --git a/crates/pgt_workspace/src/workspace/server/pg_query.rs b/crates/pgt_workspace/src/workspace/server/pg_query.rs index 6f1fa2c1..ba471dfa 100644 --- a/crates/pgt_workspace/src/workspace/server/pg_query.rs +++ b/crates/pgt_workspace/src/workspace/server/pg_query.rs @@ -3,19 +3,25 @@ use std::sync::{Arc, Mutex}; use lru::LruCache; use pgt_query_ext::diagnostics::*; +use pgt_text_size::TextRange; +use super::function_utils::find_option_value; use super::statement_identifier::StatementId; const DEFAULT_CACHE_SIZE: usize = 1000; pub struct PgQueryStore { - db: Mutex>>>, + ast_db: Mutex>>>, + plpgsql_db: Mutex>>, } impl PgQueryStore { pub fn new() -> PgQueryStore { PgQueryStore { - db: Mutex::new(LruCache::new( + ast_db: Mutex::new(LruCache::new( + NonZeroUsize::new(DEFAULT_CACHE_SIZE).unwrap(), + )), + plpgsql_db: Mutex::new(LruCache::new( NonZeroUsize::new(DEFAULT_CACHE_SIZE).unwrap(), )), } @@ -25,7 +31,7 @@ impl PgQueryStore { &self, statement: &StatementId, ) -> Arc> { - let mut cache = self.db.lock().unwrap(); + let mut cache = self.ast_db.lock().unwrap(); if let Some(existing) = cache.get(statement) { return existing.clone(); @@ -35,4 +41,190 @@ impl PgQueryStore { cache.put(statement.clone(), r.clone()); r } + + pub fn get_or_cache_plpgsql_parse( + &self, + statement: &StatementId, + ) -> Option> { + let ast = self.get_or_cache_ast(statement); + + let create_fn = match ast.as_ref() { + Ok(pgt_query_ext::NodeEnum::CreateFunctionStmt(node)) => node, + _ => return None, + }; + + let language = find_option_value(create_fn, "language")?; + + if language != "plpgsql" { + return None; + } + + let mut cache = self.plpgsql_db.lock().unwrap(); + + if let Some(existing) = cache.get(statement) { + return Some(existing.clone()); + } + + let sql_body = find_option_value(create_fn, "as")?; + + let start = statement.content().find(&sql_body)?; + let end = start + sql_body.len(); + + let range = TextRange::new(start.try_into().unwrap(), end.try_into().unwrap()); + + let r = pgt_query_ext::parse_plpgsql(statement.content()) + .map_err(|err| SyntaxDiagnostic::new(err.to_string(), Some(range))); + cache.put(statement.clone(), r.clone()); + + Some(r) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_plpgsql_syntax_error() { + let input = " +create function test_organisation_id () + returns setof text + language plpgsql + security invoker + as $$ + -- syntax error here + delare + v_organisation_id uuid; +begin + return next is(private.organisation_id(), v_organisation_id, 'should return organisation_id of token'); +end +$$; +"; + + let store = PgQueryStore::new(); + + let res = store.get_or_cache_plpgsql_parse(&StatementId::new(input)); + + assert!(matches!(res, Some(Err(_)))); + } + + #[test] + fn test_plpgsql_valid() { + let input = " +CREATE FUNCTION test_function() + RETURNS integer + LANGUAGE plpgsql + AS $$ +DECLARE + counter integer := 0; +BEGIN + counter := counter + 1; + RETURN counter; +END; +$$; +"; + + let store = PgQueryStore::new(); + + let res = store.get_or_cache_plpgsql_parse(&StatementId::new(input)); + + assert!(matches!(res, Some(Ok(_)))); + } + + #[test] + fn test_non_plpgsql_function() { + let input = " +CREATE FUNCTION add_numbers(a integer, b integer) + RETURNS integer + LANGUAGE sql + AS $$ + SELECT a + b; + $$; +"; + + let store = PgQueryStore::new(); + + let res = store.get_or_cache_plpgsql_parse(&StatementId::new(input)); + + assert!(res.is_none()); + } + + #[test] + fn test_non_function_statement() { + let input = "SELECT * FROM users WHERE id = 1;"; + + let store = PgQueryStore::new(); + + let res = store.get_or_cache_plpgsql_parse(&StatementId::new(input)); + + assert!(res.is_none()); + } + + #[test] + fn test_cache_behavior() { + let input = " +CREATE FUNCTION cached_function() + RETURNS void + LANGUAGE plpgsql + AS $$ +BEGIN + RAISE NOTICE 'Hello from cache test'; +END; +$$; +"; + + let store = PgQueryStore::new(); + let statement_id = StatementId::new(input); + + // First call should parse + let res1 = store.get_or_cache_plpgsql_parse(&statement_id); + assert!(matches!(res1, Some(Ok(_)))); + + // Second call should return cached result + let res2 = store.get_or_cache_plpgsql_parse(&statement_id); + assert!(matches!(res2, Some(Ok(_)))); + } + + #[test] + fn test_plpgsql_with_complex_body() { + let input = " +CREATE FUNCTION complex_function(p_id integer) + RETURNS TABLE(id integer, name text, status boolean) + LANGUAGE plpgsql + AS $$ +DECLARE + v_count integer; + v_status boolean := true; +BEGIN + SELECT COUNT(*) INTO v_count FROM users WHERE user_id = p_id; + + IF v_count > 0 THEN + RETURN QUERY + SELECT u.id, u.name, v_status + FROM users u + WHERE u.user_id = p_id; + ELSE + RAISE EXCEPTION 'User not found'; + END IF; +END; +$$; +"; + + let store = PgQueryStore::new(); + + let res = store.get_or_cache_plpgsql_parse(&StatementId::new(input)); + + assert!(matches!(res, Some(Ok(_)))); + } + + #[test] + fn test_invalid_ast() { + let input = "CREATE FUNCTION invalid syntax here"; + + let store = PgQueryStore::new(); + + let res = store.get_or_cache_plpgsql_parse(&StatementId::new(input)); + + assert!(res.is_none()); + } } diff --git a/crates/pgt_workspace/src/workspace/server/sql_function.rs b/crates/pgt_workspace/src/workspace/server/sql_function.rs index bc2c6c3b..6161dda7 100644 --- a/crates/pgt_workspace/src/workspace/server/sql_function.rs +++ b/crates/pgt_workspace/src/workspace/server/sql_function.rs @@ -1,5 +1,7 @@ use pgt_text_size::TextRange; +use super::function_utils::{find_option_value, parse_name}; + #[derive(Debug, Clone)] pub struct ArgType { pub schema: Option, @@ -106,64 +108,6 @@ pub fn get_sql_fn_body(ast: &pgt_query_ext::NodeEnum, content: &str) -> Option Option { - create_fn - .options - .iter() - .filter_map(|opt_wrapper| opt_wrapper.node.as_ref()) - .find_map(|opt| { - if let pgt_query_ext::NodeEnum::DefElem(def_elem) = opt { - if def_elem.defname == option_name { - def_elem - .arg - .iter() - .filter_map(|arg_wrapper| arg_wrapper.node.as_ref()) - .find_map(|arg| { - if let pgt_query_ext::NodeEnum::String(s) = arg { - Some(s.sval.clone()) - } else if let pgt_query_ext::NodeEnum::List(l) = arg { - l.items.iter().find_map(|item_wrapper| { - if let Some(pgt_query_ext::NodeEnum::String(s)) = - item_wrapper.node.as_ref() - { - Some(s.sval.clone()) - } else { - None - } - }) - } else { - None - } - }) - } else { - None - } - } else { - None - } - }) -} - -fn parse_name(nodes: &[pgt_query_ext::protobuf::Node]) -> Option<(Option, String)> { - let names = nodes - .iter() - .map(|n| match &n.node { - Some(pgt_query_ext::NodeEnum::String(s)) => Some(s.sval.clone()), - _ => None, - }) - .collect::>(); - - match names.as_slice() { - [Some(schema), Some(name)] => Some((Some(schema.clone()), name.clone())), - [Some(name)] => Some((None, name.clone())), - _ => None, - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/test.sql b/test.sql index 88b7310d..97c8e639 100644 --- a/test.sql +++ b/test.sql @@ -9,3 +9,18 @@ from unknown_users; sel 1; + + + +create function test_organisation_id () + returns setof text + language plpgsql + security invoker + as $$ + declre + v_organisation_id uuid; +begin + return next is(private.organisation_id(), v_organisation_id, 'should return organisation_id of token'); +end +$$; + From f6cea4beaa506d70f1d8897a5d4f2690d5f28be2 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 15 Jul 2025 08:43:51 +0200 Subject: [PATCH 091/114] chore: update git cliff version --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 96760298..07dd6ecd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -122,7 +122,7 @@ jobs: fetch-depth: 0 - name: 📝 Create Changelog - uses: orhun/git-cliff-action@v3 + uses: orhun/git-cliff-action@v4 id: create_changelog with: config: cliff.toml From f61317697cf7857ad2d5163c66893beed5039105 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 15 Jul 2025 10:13:56 +0200 Subject: [PATCH 092/114] chore: temp publish --- .github/workflows/publish.reusable.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/workflows/publish.reusable.yml b/.github/workflows/publish.reusable.yml index 31e625d7..d2650f33 100644 --- a/.github/workflows/publish.reusable.yml +++ b/.github/workflows/publish.reusable.yml @@ -56,6 +56,18 @@ jobs: env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} # + - name: Temp publish + if: inputs.is-prerelease != 'true' + run: | + npm publish "@postgrestools/cli-aarch64-apple-darwin" --tag latest --access public --provenance + npm publish "@postgrestools/cli-aarch64-windows-msvc" --tag latest --access public --provenance + npm publish "@postgrestools/cli-aarch64-linux-gnu" --tag latest --access public --provenance + npm publish "@postgrestools/cli-x86_64-apple-darwin" --tag latest --access public --provenance + npm publish "@postgrestools/cli-x86_64-windows-msvc" --tag latest --access public --provenance + npm publish "@postgrestools/cli-x86_64-linux-gnu" --tag latest --access public --provenance + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + - name: Publish npm packages as latest if: inputs.is-prerelease != 'true' run: | From 9f8939f16f96c0173ac91add4e43e81027d6b588 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 15 Jul 2025 10:29:12 +0200 Subject: [PATCH 093/114] chore: temp publish --- .github/workflows/publish.reusable.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/publish.reusable.yml b/.github/workflows/publish.reusable.yml index d2650f33..690c5aa0 100644 --- a/.github/workflows/publish.reusable.yml +++ b/.github/workflows/publish.reusable.yml @@ -47,6 +47,10 @@ jobs: run: | cat packages/@postgrestools/postgrestools/package.json + - name: Print package.json + run: | + cat packages/@postgrestools/cli-aarch64-apple-darwin/package.json + - name: Publish npm packages as nightly if: inputs.is-prerelease == 'true' run: | From 9eb2aac21b4e4ff2a21ed0de9656197e49587714 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 15 Jul 2025 11:13:52 +0200 Subject: [PATCH 094/114] chore: temp publish --- .github/workflows/publish.reusable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.reusable.yml b/.github/workflows/publish.reusable.yml index 690c5aa0..ead8a1b8 100644 --- a/.github/workflows/publish.reusable.yml +++ b/.github/workflows/publish.reusable.yml @@ -49,7 +49,7 @@ jobs: - name: Print package.json run: | - cat packages/@postgrestools/cli-aarch64-apple-darwin/package.json + cat packages/@postgrestools/postgrestools_aarch64-apple-darwin/package.json - name: Publish npm packages as nightly if: inputs.is-prerelease == 'true' From 8d776e9b6a60b7c6ca5bc14b814352ee90086942 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 15 Jul 2025 11:45:13 +0200 Subject: [PATCH 095/114] chore: temp publish (#457) --- .github/workflows/publish.reusable.yml | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/.github/workflows/publish.reusable.yml b/.github/workflows/publish.reusable.yml index ead8a1b8..0d9095ed 100644 --- a/.github/workflows/publish.reusable.yml +++ b/.github/workflows/publish.reusable.yml @@ -60,15 +60,21 @@ jobs: env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} # + - name: Temp LS packages + run: | + cd packages + ls @postgrestools/ + - name: Temp publish if: inputs.is-prerelease != 'true' run: | - npm publish "@postgrestools/cli-aarch64-apple-darwin" --tag latest --access public --provenance - npm publish "@postgrestools/cli-aarch64-windows-msvc" --tag latest --access public --provenance - npm publish "@postgrestools/cli-aarch64-linux-gnu" --tag latest --access public --provenance - npm publish "@postgrestools/cli-x86_64-apple-darwin" --tag latest --access public --provenance - npm publish "@postgrestools/cli-x86_64-windows-msvc" --tag latest --access public --provenance - npm publish "@postgrestools/cli-x86_64-linux-gnu" --tag latest --access public --provenance + cd packages + npm publish "@postgrestools/postgrestools_aarch64-apple-darwin" --tag latest --access public --provenance + npm publish "@postgrestools/postgrestools_aarch64-windows-msvc" --tag latest --access public --provenance + npm publish "@postgrestools/postgrestools_aarch64-linux-gnu" --tag latest --access public --provenance + npm publish "@postgrestools/postgrestools_x86_64-apple-darwin" --tag latest --access public --provenance + npm publish "@postgrestools/postgrestools_x86_64-windows-msvc" --tag latest --access public --provenance + npm publish "@postgrestools/postgrestools_x86_64-linux-gnu" --tag latest --access public --provenance env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} From 5254806d924b42c1cc883fe2f3c1894797585b37 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 15 Jul 2025 11:50:07 +0200 Subject: [PATCH 096/114] chore: temp publish --- .../@postgrestools/postgrestools/scripts/generate-packages.mjs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/@postgrestools/postgrestools/scripts/generate-packages.mjs b/packages/@postgrestools/postgrestools/scripts/generate-packages.mjs index 34193a92..8d49bd25 100644 --- a/packages/@postgrestools/postgrestools/scripts/generate-packages.mjs +++ b/packages/@postgrestools/postgrestools/scripts/generate-packages.mjs @@ -163,7 +163,7 @@ function copyBinaryToNativePackage(platform, arch, os) { const ext = getBinaryExt(os); const manifestPath = resolve(packageRoot, "package.json"); - console.info(`Update manifest ${manifestPath}`); + console.info(`Update manifest ${manifestPath} to ${JSON.stringify(manifest)}`); fs.writeFileSync(manifestPath, manifest); // Copy the CLI binary From a6d5d13def1103cf94c4b15a09e44df2816f163c Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 15 Jul 2025 11:50:34 +0200 Subject: [PATCH 097/114] chore: temp publish --- .github/workflows/publish.reusable.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/publish.reusable.yml b/.github/workflows/publish.reusable.yml index 0d9095ed..31b00f74 100644 --- a/.github/workflows/publish.reusable.yml +++ b/.github/workflows/publish.reusable.yml @@ -68,7 +68,6 @@ jobs: - name: Temp publish if: inputs.is-prerelease != 'true' run: | - cd packages npm publish "@postgrestools/postgrestools_aarch64-apple-darwin" --tag latest --access public --provenance npm publish "@postgrestools/postgrestools_aarch64-windows-msvc" --tag latest --access public --provenance npm publish "@postgrestools/postgrestools_aarch64-linux-gnu" --tag latest --access public --provenance From 0dc25fdeeed2cab6b37127a9c1770294dbf5a713 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 15 Jul 2025 11:59:36 +0200 Subject: [PATCH 098/114] chore: temp publish --- .github/workflows/publish.reusable.yml | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/.github/workflows/publish.reusable.yml b/.github/workflows/publish.reusable.yml index 31b00f74..8ab4a298 100644 --- a/.github/workflows/publish.reusable.yml +++ b/.github/workflows/publish.reusable.yml @@ -60,28 +60,13 @@ jobs: env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} # - - name: Temp LS packages - run: | - cd packages - ls @postgrestools/ - - - name: Temp publish - if: inputs.is-prerelease != 'true' - run: | - npm publish "@postgrestools/postgrestools_aarch64-apple-darwin" --tag latest --access public --provenance - npm publish "@postgrestools/postgrestools_aarch64-windows-msvc" --tag latest --access public --provenance - npm publish "@postgrestools/postgrestools_aarch64-linux-gnu" --tag latest --access public --provenance - npm publish "@postgrestools/postgrestools_x86_64-apple-darwin" --tag latest --access public --provenance - npm publish "@postgrestools/postgrestools_x86_64-windows-msvc" --tag latest --access public --provenance - npm publish "@postgrestools/postgrestools_x86_64-linux-gnu" --tag latest --access public --provenance - env: - NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} - - name: Publish npm packages as latest if: inputs.is-prerelease != 'true' run: | for package in packages/@postgrestools/*; do - npm publish "$package" --tag latest --access public --provenance + if [[ "$package_name" != "backend-jsonrpc" && "$package_name" != "postgrestools" ]]; then + npm publish "$package" --tag latest --access public --provenance + fi done env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} From 3263f34318ea5c06bbe15591a039e1b4aca8ca52 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 15 Jul 2025 12:02:32 +0200 Subject: [PATCH 099/114] chore: temp publish --- .github/workflows/publish.reusable.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/publish.reusable.yml b/.github/workflows/publish.reusable.yml index 8ab4a298..ce41abf9 100644 --- a/.github/workflows/publish.reusable.yml +++ b/.github/workflows/publish.reusable.yml @@ -64,6 +64,7 @@ jobs: if: inputs.is-prerelease != 'true' run: | for package in packages/@postgrestools/*; do + package_name=$(basename "$package") if [[ "$package_name" != "backend-jsonrpc" && "$package_name" != "postgrestools" ]]; then npm publish "$package" --tag latest --access public --provenance fi From 5dd0981e628b438da45d15b959936aa62d367e27 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 15 Jul 2025 12:04:32 +0200 Subject: [PATCH 100/114] chore: cleanup --- .github/workflows/publish.reusable.yml | 22 +------------------ .../scripts/generate-packages.mjs | 2 +- 2 files changed, 2 insertions(+), 22 deletions(-) diff --git a/.github/workflows/publish.reusable.yml b/.github/workflows/publish.reusable.yml index ce41abf9..5be2e12f 100644 --- a/.github/workflows/publish.reusable.yml +++ b/.github/workflows/publish.reusable.yml @@ -34,23 +34,6 @@ jobs: RELEASE_TAG: ${{ inputs.release-tag }} PRERELEASE: ${{ inputs.is-prerelease }} - - name: Verify NPM TOKEN exists - run: | - if [ -z "${{ secrets.NPM_TOKEN }}" ]; then - echo "Secret is not defined" - exit 1 - else - echo "Secret is defined" - fi - - - name: Print package.json - run: | - cat packages/@postgrestools/postgrestools/package.json - - - name: Print package.json - run: | - cat packages/@postgrestools/postgrestools_aarch64-apple-darwin/package.json - - name: Publish npm packages as nightly if: inputs.is-prerelease == 'true' run: | @@ -64,10 +47,7 @@ jobs: if: inputs.is-prerelease != 'true' run: | for package in packages/@postgrestools/*; do - package_name=$(basename "$package") - if [[ "$package_name" != "backend-jsonrpc" && "$package_name" != "postgrestools" ]]; then - npm publish "$package" --tag latest --access public --provenance - fi + npm publish "$package" --tag latest --access public --provenance done env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/packages/@postgrestools/postgrestools/scripts/generate-packages.mjs b/packages/@postgrestools/postgrestools/scripts/generate-packages.mjs index 8d49bd25..34193a92 100644 --- a/packages/@postgrestools/postgrestools/scripts/generate-packages.mjs +++ b/packages/@postgrestools/postgrestools/scripts/generate-packages.mjs @@ -163,7 +163,7 @@ function copyBinaryToNativePackage(platform, arch, os) { const ext = getBinaryExt(os); const manifestPath = resolve(packageRoot, "package.json"); - console.info(`Update manifest ${manifestPath} to ${JSON.stringify(manifest)}`); + console.info(`Update manifest ${manifestPath}`); fs.writeFileSync(manifestPath, manifest); // Copy the CLI binary From c323eb03d8c444bcdb08adf96455d432b849f2fc Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 15 Jul 2025 12:22:55 +0200 Subject: [PATCH 101/114] chore: expose schema_cache & file_context in lint rules (#449) --- Cargo.lock | 4 + crates/pgt_analyse/Cargo.toml | 9 +- .../pgt_analyse/src/analysed_file_context.rs | 7 ++ crates/pgt_analyse/src/context.rs | 27 ++++- crates/pgt_analyse/src/lib.rs | 2 + crates/pgt_analyse/src/registry.rs | 12 ++- crates/pgt_analyse/src/rule.rs | 9 +- crates/pgt_analyser/Cargo.toml | 12 ++- crates/pgt_analyser/src/lib.rs | 58 ++++++++--- crates/pgt_analyser/tests/rules_tests.rs | 12 ++- crates/pgt_lsp/tests/server.rs | 18 +++- crates/pgt_query_ext/src/diagnostics.rs | 7 +- crates/pgt_workspace/Cargo.toml | 4 +- crates/pgt_workspace/src/workspace/server.rs | 96 +++++++++--------- .../src/workspace/server.tests.rs | 99 +++++++++++++++++++ .../src/workspace/server/document.rs | 52 +++++----- docs/codegen/src/rules_docs.rs | 14 ++- docs/rules/ban-drop-column.md | 6 +- docs/rules/ban-drop-not-null.md | 6 +- docs/rules/ban-drop-table.md | 6 +- .../backend-jsonrpc/src/workspace.ts | 9 +- .../codegen/src/generate_new_analyser_rule.rs | 9 +- xtask/rules_check/src/lib.rs | 14 ++- 23 files changed, 365 insertions(+), 127 deletions(-) create mode 100644 crates/pgt_analyse/src/analysed_file_context.rs create mode 100644 crates/pgt_workspace/src/workspace/server.tests.rs diff --git a/Cargo.lock b/Cargo.lock index 4da985a3..16b1de5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2693,6 +2693,7 @@ dependencies = [ "pgt_console", "pgt_diagnostics", "pgt_query_ext", + "pgt_schema_cache", "pgt_text_size", "rustc-hash 2.1.0", "schemars", @@ -2708,7 +2709,9 @@ dependencies = [ "pgt_console", "pgt_diagnostics", "pgt_query_ext", + "pgt_schema_cache", "pgt_test_macros", + "pgt_text_size", "serde", "termcolor", ] @@ -3102,6 +3105,7 @@ dependencies = [ "pgt_schema_cache", "pgt_statement_splitter", "pgt_suppressions", + "pgt_test_utils", "pgt_text_size", "pgt_typecheck", "rustc-hash 2.1.0", diff --git a/crates/pgt_analyse/Cargo.toml b/crates/pgt_analyse/Cargo.toml index 75eb0211..4d30784c 100644 --- a/crates/pgt_analyse/Cargo.toml +++ b/crates/pgt_analyse/Cargo.toml @@ -13,10 +13,11 @@ version = "0.0.0" [dependencies] -pgt_console.workspace = true -pgt_diagnostics.workspace = true -pgt_query_ext.workspace = true -rustc-hash = { workspace = true } +pgt_console.workspace = true +pgt_diagnostics.workspace = true +pgt_query_ext.workspace = true +pgt_schema_cache.workspace = true +rustc-hash = { workspace = true } biome_deserialize = { workspace = true, optional = true } biome_deserialize_macros = { workspace = true, optional = true } diff --git a/crates/pgt_analyse/src/analysed_file_context.rs b/crates/pgt_analyse/src/analysed_file_context.rs new file mode 100644 index 00000000..cba53eeb --- /dev/null +++ b/crates/pgt_analyse/src/analysed_file_context.rs @@ -0,0 +1,7 @@ +#[derive(Default)] +pub struct AnalysedFileContext {} + +impl AnalysedFileContext { + #[allow(unused)] + pub fn update_from(&mut self, stmt_root: &pgt_query_ext::NodeEnum) {} +} diff --git a/crates/pgt_analyse/src/context.rs b/crates/pgt_analyse/src/context.rs index cd069657..7447c0bb 100644 --- a/crates/pgt_analyse/src/context.rs +++ b/crates/pgt_analyse/src/context.rs @@ -1,4 +1,7 @@ +use pgt_schema_cache::SchemaCache; + use crate::{ + AnalysedFileContext, categories::RuleCategory, rule::{GroupCategory, Rule, RuleGroup, RuleMetadata}, }; @@ -6,6 +9,8 @@ use crate::{ pub struct RuleContext<'a, R: Rule> { stmt: &'a pgt_query_ext::NodeEnum, options: &'a R::Options, + schema_cache: Option<&'a SchemaCache>, + file_context: &'a AnalysedFileContext, } impl<'a, R> RuleContext<'a, R> @@ -13,8 +18,18 @@ where R: Rule + Sized + 'static, { #[allow(clippy::too_many_arguments)] - pub fn new(stmt: &'a pgt_query_ext::NodeEnum, options: &'a R::Options) -> Self { - Self { stmt, options } + pub fn new( + stmt: &'a pgt_query_ext::NodeEnum, + options: &'a R::Options, + schema_cache: Option<&'a SchemaCache>, + file_context: &'a AnalysedFileContext, + ) -> Self { + Self { + stmt, + options, + schema_cache, + file_context, + } } /// Returns the group that belongs to the current rule @@ -32,6 +47,14 @@ where self.stmt } + pub fn file_context(&self) -> &AnalysedFileContext { + self.file_context + } + + pub fn schema_cache(&self) -> Option<&SchemaCache> { + self.schema_cache + } + /// Returns the metadata of the rule /// /// The metadata contains information about the rule, such as the name, version, language, and whether it is recommended. diff --git a/crates/pgt_analyse/src/lib.rs b/crates/pgt_analyse/src/lib.rs index f312de45..1d4ec6ae 100644 --- a/crates/pgt_analyse/src/lib.rs +++ b/crates/pgt_analyse/src/lib.rs @@ -1,3 +1,4 @@ +mod analysed_file_context; mod categories; pub mod context; mod filter; @@ -9,6 +10,7 @@ mod rule; // Re-exported for use in the `declare_group` macro pub use pgt_diagnostics::category_concat; +pub use crate::analysed_file_context::AnalysedFileContext; pub use crate::categories::{ ActionCategory, RefactorKind, RuleCategories, RuleCategoriesBuilder, RuleCategory, SUPPRESSION_ACTION_CATEGORY, SourceActionKind, diff --git a/crates/pgt_analyse/src/registry.rs b/crates/pgt_analyse/src/registry.rs index 48b73b15..d43d7711 100644 --- a/crates/pgt_analyse/src/registry.rs +++ b/crates/pgt_analyse/src/registry.rs @@ -2,6 +2,7 @@ use std::{borrow, collections::BTreeSet}; use crate::{ AnalyserOptions, + analysed_file_context::AnalysedFileContext, context::RuleContext, filter::{AnalysisFilter, GroupKey, RuleKey}, rule::{GroupCategory, Rule, RuleDiagnostic, RuleGroup}, @@ -158,6 +159,8 @@ impl RuleRegistry { pub struct RegistryRuleParams<'a> { pub root: &'a pgt_query_ext::NodeEnum, pub options: &'a AnalyserOptions, + pub analysed_file_context: &'a AnalysedFileContext, + pub schema_cache: Option<&'a pgt_schema_cache::SchemaCache>, } /// Executor for rule as a generic function pointer @@ -174,7 +177,14 @@ impl RegistryRule { R: Rule + 'static, { let options = params.options.rule_options::().unwrap_or_default(); - let ctx = RuleContext::new(params.root, &options); + + let ctx = RuleContext::new( + params.root, + &options, + params.schema_cache, + params.analysed_file_context, + ); + R::run(&ctx) } diff --git a/crates/pgt_analyse/src/rule.rs b/crates/pgt_analyse/src/rule.rs index fae3cda3..1760ce97 100644 --- a/crates/pgt_analyse/src/rule.rs +++ b/crates/pgt_analyse/src/rule.rs @@ -102,7 +102,8 @@ pub trait GroupCategory { pub trait Rule: RuleMeta + Sized { type Options: Default + Clone + Debug; - fn run(ctx: &RuleContext) -> Vec; + /// `schema_cache` will only be available if the user has a working database connection. + fn run(rule_context: &RuleContext) -> Vec; } /// Diagnostic object returned by a single analysis rule @@ -208,6 +209,12 @@ impl RuleDiagnostic { self } + /// Sets the span of this diagnostic. + pub fn span(mut self, span: TextRange) -> Self { + self.span = Some(span); + self + } + /// Marks this diagnostic as unnecessary code, which will /// be displayed in the language server. /// diff --git a/crates/pgt_analyser/Cargo.toml b/crates/pgt_analyser/Cargo.toml index e77aaa4f..5f65b978 100644 --- a/crates/pgt_analyser/Cargo.toml +++ b/crates/pgt_analyser/Cargo.toml @@ -12,11 +12,13 @@ repository.workspace = true version = "0.0.0" [dependencies] -pgt_analyse = { workspace = true } -pgt_console = { workspace = true } -pgt_diagnostics = { workspace = true } -pgt_query_ext = { workspace = true } -serde = { workspace = true } +pgt_analyse = { workspace = true } +pgt_console = { workspace = true } +pgt_diagnostics = { workspace = true } +pgt_query_ext = { workspace = true } +pgt_schema_cache = { workspace = true } +pgt_text_size = { workspace = true } +serde = { workspace = true } [dev-dependencies] insta = { version = "1.42.1" } diff --git a/crates/pgt_analyser/src/lib.rs b/crates/pgt_analyser/src/lib.rs index 248fe22b..f96b6f6d 100644 --- a/crates/pgt_analyser/src/lib.rs +++ b/crates/pgt_analyser/src/lib.rs @@ -1,8 +1,8 @@ use std::{ops::Deref, sync::LazyLock}; use pgt_analyse::{ - AnalyserOptions, AnalysisFilter, MetadataRegistry, RegistryRuleParams, RuleDiagnostic, - RuleRegistry, + AnalysedFileContext, AnalyserOptions, AnalysisFilter, MetadataRegistry, RegistryRuleParams, + RuleDiagnostic, RuleRegistry, }; pub use registry::visit_registry; @@ -30,8 +30,15 @@ pub struct Analyser<'a> { registry: RuleRegistry, } -pub struct AnalyserContext<'a> { - pub root: &'a pgt_query_ext::NodeEnum, +#[derive(Debug)] +pub struct AnalysableStatement { + pub root: pgt_query_ext::NodeEnum, + pub range: pgt_text_size::TextRange, +} + +pub struct AnalyserParams<'a> { + pub stmts: Vec, + pub schema_cache: Option<&'a pgt_schema_cache::SchemaCache>, } pub struct AnalyserConfig<'a> { @@ -52,17 +59,31 @@ impl<'a> Analyser<'a> { } } - pub fn run(&self, ctx: AnalyserContext) -> Vec { - let params = RegistryRuleParams { - root: ctx.root, - options: self.options, - }; + pub fn run(&self, params: AnalyserParams) -> Vec { + let mut diagnostics = vec![]; + + let mut file_context = AnalysedFileContext::default(); + + for stmt in params.stmts { + let rule_params = RegistryRuleParams { + root: &stmt.root, + options: self.options, + analysed_file_context: &file_context, + schema_cache: params.schema_cache, + }; - self.registry - .rules - .iter() - .flat_map(|rule| (rule.run)(¶ms)) - .collect::>() + diagnostics.extend( + self.registry + .rules + .iter() + .flat_map(|rule| (rule.run)(&rule_params)) + .map(|r| r.span(stmt.range)), + ); + + file_context.update_from(&stmt.root); + } + + diagnostics } } @@ -77,9 +98,10 @@ mod tests { markup, }; use pgt_diagnostics::PrintDiagnostic; + use pgt_text_size::TextRange; use termcolor::NoColor; - use crate::Analyser; + use crate::{AnalysableStatement, Analyser}; #[ignore] #[test] @@ -102,6 +124,7 @@ mod tests { }; let ast = pgt_query_ext::parse(SQL).expect("failed to parse SQL"); + let range = TextRange::new(0.into(), u32::try_from(SQL.len()).unwrap().into()); let options = AnalyserOptions::default(); @@ -110,7 +133,10 @@ mod tests { filter, }); - let results = analyser.run(crate::AnalyserContext { root: &ast }); + let results = analyser.run(crate::AnalyserParams { + stmts: vec![AnalysableStatement { root: ast, range }], + schema_cache: None, + }); println!("*******************"); for result in &results { diff --git a/crates/pgt_analyser/tests/rules_tests.rs b/crates/pgt_analyser/tests/rules_tests.rs index 247c02b0..0a6b47ec 100644 --- a/crates/pgt_analyser/tests/rules_tests.rs +++ b/crates/pgt_analyser/tests/rules_tests.rs @@ -2,7 +2,7 @@ use core::slice; use std::{fmt::Write, fs::read_to_string, path::Path}; use pgt_analyse::{AnalyserOptions, AnalysisFilter, RuleDiagnostic, RuleFilter}; -use pgt_analyser::{Analyser, AnalyserConfig, AnalyserContext}; +use pgt_analyser::{AnalysableStatement, Analyser, AnalyserConfig, AnalyserParams}; use pgt_console::StdDisplay; use pgt_diagnostics::PrintDiagnostic; @@ -32,7 +32,15 @@ fn rule_test(full_path: &'static str, _: &str, _: &str) { filter, }); - let results = analyser.run(AnalyserContext { root: &ast }); + let stmt = AnalysableStatement { + root: ast, + range: pgt_text_size::TextRange::new(0.into(), u32::try_from(query.len()).unwrap().into()), + }; + + let results = analyser.run(AnalyserParams { + stmts: vec![stmt], + schema_cache: None, + }); let mut snapshot = String::new(); write_snapshot(&mut snapshot, query.as_str(), results.as_slice()); diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 353e80ae..176868f5 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -1734,13 +1734,24 @@ $$; server.open_document(initial_content).await?; - let notification = tokio::time::timeout(Duration::from_secs(5), async { + let got_notification = tokio::time::timeout(Duration::from_secs(5), async { loop { match receiver.next().await { Some(ServerNotification::PublishDiagnostics(msg)) => { if msg.diagnostics.iter().any(|d| { d.message .contains("Invalid statement: syntax error at or near \"declre\"") + && d.range + == Range { + start: Position { + line: 5, + character: 9, + }, + end: Position { + line: 11, + character: 0, + }, + } }) { return true; } @@ -1752,7 +1763,10 @@ $$; .await .is_ok(); - assert!(notification, "expected diagnostics for unknown column"); + assert!( + got_notification, + "expected diagnostics for invalid declare statement" + ); server.shutdown().await?; reader.abort(); diff --git a/crates/pgt_query_ext/src/diagnostics.rs b/crates/pgt_query_ext/src/diagnostics.rs index 7e3f0a37..1a068dc0 100644 --- a/crates/pgt_query_ext/src/diagnostics.rs +++ b/crates/pgt_query_ext/src/diagnostics.rs @@ -9,7 +9,7 @@ use pgt_text_size::TextRange; pub struct SyntaxDiagnostic { /// The location where the error is occurred #[location(span)] - span: Option, + pub span: Option, #[message] #[description] pub message: MessageAndDescription, @@ -23,6 +23,11 @@ impl SyntaxDiagnostic { message: MessageAndDescription::from(message.into()), } } + + pub fn span(mut self, span: TextRange) -> Self { + self.span = Some(span); + self + } } impl From for SyntaxDiagnostic { diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index f535e505..3ef4936b 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -62,7 +62,9 @@ schema = [ ] [dev-dependencies] -tempfile = "3.15.0" +pgt_test_utils = { workspace = true } +sqlx = { workspace = true } +tempfile = "3.15.0" [lib] doctest = false diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 399f2ec6..e6456afc 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -10,12 +10,12 @@ use analyser::AnalyserVisitorBuilder; use async_helper::run_async; use connection_manager::ConnectionManager; use document::{ - AsyncDiagnosticsMapper, CursorPositionFilter, DefaultMapper, Document, ExecuteStatementMapper, - SyncDiagnosticsMapper, + CursorPositionFilter, DefaultMapper, Document, ExecuteStatementMapper, + TypecheckDiagnosticsMapper, }; use futures::{StreamExt, stream}; use pgt_analyse::{AnalyserOptions, AnalysisFilter}; -use pgt_analyser::{Analyser, AnalyserConfig, AnalyserContext}; +use pgt_analyser::{Analyser, AnalyserConfig, AnalyserParams}; use pgt_diagnostics::{ Diagnostic, DiagnosticExt, Error, Severity, serde::Diagnostic as SDiagnostic, }; @@ -37,6 +37,7 @@ use crate::{ diagnostics::{PullDiagnosticsParams, PullDiagnosticsResult}, }, settings::{WorkspaceSettings, WorkspaceSettingsHandle, WorkspaceSettingsHandleMut}, + workspace::AnalyserDiagnosticsMapper, }; use super::{ @@ -444,7 +445,7 @@ impl Workspace for WorkspaceServer { if let Some(pool) = self.get_current_connection() { let path_clone = params.path.clone(); let schema_cache = self.schema_cache.load(pool.clone())?; - let input = doc.iter(AsyncDiagnosticsMapper).collect::>(); + let input = doc.iter(TypecheckDiagnosticsMapper).collect::>(); // sorry for the ugly code :( let async_results = run_async(async move { stream::iter(input) @@ -527,50 +528,49 @@ impl Workspace for WorkspaceServer { filter, }); + let path = params.path.as_path().display().to_string(); + + let schema_cache = self + .get_current_connection() + .and_then(|pool| self.schema_cache.load(pool.clone()).ok()); + + let mut analysable_stmts = vec![]; + for (stmt_root, diagnostic) in doc.iter(AnalyserDiagnosticsMapper) { + if let Some(node) = stmt_root { + analysable_stmts.push(node); + } + if let Some(diag) = diagnostic { + diagnostics.push(SDiagnostic::new( + diag.with_file_path(path.clone()) + .with_severity(Severity::Error), + )); + } + } + diagnostics.extend( - doc.iter(SyncDiagnosticsMapper) - .flat_map(|(range, ast, diag)| { - let mut errors: Vec = vec![]; - - if let Some(diag) = diag { - errors.push(diag.into()); - } - - if let Some(ast) = ast { - errors.extend( - analyser - .run(AnalyserContext { root: &ast }) - .into_iter() - .map(Error::from) - .collect::>(), - ); - } - - errors - .into_iter() - .map(|d| { - let severity = d - .category() - .filter(|category| category.name().starts_with("lint/")) - .map_or_else( - || d.severity(), - |category| { - settings - .get_severity_from_rule_code(category) - .unwrap_or(Severity::Warning) - }, - ); - - // adjust the span of the diagnostics to the statement (if it has one) - let span = d.location().span.map(|s| s + range.start()); - - SDiagnostic::new( - d.with_file_path(params.path.as_path().display().to_string()) - .with_file_span(span.unwrap_or(range)) - .with_severity(severity), - ) + analyser + .run(AnalyserParams { + stmts: analysable_stmts, + schema_cache: schema_cache.as_deref(), + }) + .into_iter() + .map(Error::from) + .map(|d| { + let severity = d + .category() + .map(|category| { + settings + .get_severity_from_rule_code(category) + .unwrap_or(Severity::Warning) }) - .collect::>() + .unwrap(); + + let span = d.location().span; + SDiagnostic::new( + d.with_file_path(path.clone()) + .with_file_span(span) + .with_severity(severity), + ) }), ); @@ -655,3 +655,7 @@ impl Workspace for WorkspaceServer { fn is_dir(path: &Path) -> bool { path.is_dir() || (path.is_symlink() && fs::read_link(path).is_ok_and(|path| path.is_dir())) } + +#[cfg(test)] +#[path = "server.tests.rs"] +mod tests; diff --git a/crates/pgt_workspace/src/workspace/server.tests.rs b/crates/pgt_workspace/src/workspace/server.tests.rs new file mode 100644 index 00000000..94b02cd5 --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server.tests.rs @@ -0,0 +1,99 @@ +use biome_deserialize::Merge; +use pgt_analyse::RuleCategories; +use pgt_configuration::{PartialConfiguration, database::PartialDatabaseConfiguration}; +use pgt_diagnostics::Diagnostic; +use pgt_fs::PgTPath; +use pgt_text_size::TextRange; +use sqlx::PgPool; + +use crate::{ + Workspace, WorkspaceError, + workspace::{ + OpenFileParams, RegisterProjectFolderParams, UpdateSettingsParams, server::WorkspaceServer, + }, +}; + +fn get_test_workspace( + partial_config: Option, +) -> Result { + let workspace = WorkspaceServer::new(); + + workspace.register_project_folder(RegisterProjectFolderParams { + path: None, + set_as_current_workspace: true, + })?; + + workspace.update_settings(UpdateSettingsParams { + configuration: partial_config.unwrap_or(PartialConfiguration::init()), + gitignore_matches: vec![], + vcs_base_path: None, + workspace_directory: None, + })?; + + Ok(workspace) +} + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_diagnostics(test_db: PgPool) { + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + + let workspace = get_test_workspace(Some(conf)).expect("Unable to create test workspace"); + + let path = PgTPath::new("test.sql"); + let content = r#" + create table users ( + id serial primary key, + name text not null + ); + + drop table non_existing_table; + + select 1; + "#; + + workspace + .open_file(OpenFileParams { + path: path.clone(), + content: content.into(), + version: 1, + }) + .expect("Unable to open test file"); + + let diagnostics = workspace + .pull_diagnostics(crate::workspace::PullDiagnosticsParams { + path: path.clone(), + categories: RuleCategories::all(), + max_diagnostics: 100, + only: vec![], + skip: vec![], + }) + .expect("Unable to pull diagnostics") + .diagnostics; + + assert_eq!(diagnostics.len(), 1, "Expected one diagnostic"); + + let diagnostic = &diagnostics[0]; + + assert_eq!( + diagnostic.category().map(|c| c.name()), + Some("lint/safety/banDropTable") + ); + + assert_eq!( + diagnostic.location().span, + Some(TextRange::new(106.into(), 136.into())) + ); +} diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index 9d3700df..b5798370 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use pgt_analyser::AnalysableStatement; use pgt_diagnostics::{Diagnostic, DiagnosticExt, serde::Diagnostic as SDiagnostic}; use pgt_query_ext::diagnostics::SyntaxDiagnostic; use pgt_suppressions::Suppressions; @@ -208,8 +209,8 @@ impl<'a> StatementMapper<'a> for ExecuteStatementMapper { } } -pub struct AsyncDiagnosticsMapper; -impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { +pub struct TypecheckDiagnosticsMapper; +impl<'a> StatementMapper<'a> for TypecheckDiagnosticsMapper { type Output = ( StatementId, TextRange, @@ -240,22 +241,20 @@ impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { } } -pub struct SyncDiagnosticsMapper; -impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { - type Output = ( - TextRange, - Option, - Option, - ); +pub struct AnalyserDiagnosticsMapper; +impl<'a> StatementMapper<'a> for AnalyserDiagnosticsMapper { + type Output = (Option, Option); fn map(&self, parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { - let ast_result = parser.ast_db.get_or_cache_ast(&id); + let maybe_node = parser.ast_db.get_or_cache_ast(&id); - let (ast_option, diagnostics) = match &*ast_result { + let (ast_option, diagnostics) = match &*maybe_node { Ok(node) => { let plpgsql_result = parser.ast_db.get_or_cache_plpgsql_parse(&id); if let Some(Err(diag)) = plpgsql_result { - (Some(node.clone()), Some(diag.clone())) + // offset the pgpsql diagnostic from the parent statement start + let span = diag.location().span.map(|sp| sp + range.start()); + (Some(node.clone()), Some(diag.span(span.unwrap_or(range)))) } else { (Some(node.clone()), None) } @@ -263,7 +262,10 @@ impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { Err(diag) => (None, Some(diag.clone())), }; - (range, ast_option, diagnostics) + ( + ast_option.map(|root| AnalysableStatement { range, root }), + diagnostics, + ) } } @@ -401,10 +403,10 @@ END; $$;"; let d = Document::new(input.to_string(), 1); - let results = d.iter(SyncDiagnosticsMapper).collect::>(); + let results = d.iter(AnalyserDiagnosticsMapper).collect::>(); assert_eq!(results.len(), 1); - let (_range, ast, diagnostic) = &results[0]; + let (ast, diagnostic) = &results[0]; // Should have parsed the CREATE FUNCTION statement assert!(ast.is_some()); @@ -432,10 +434,10 @@ END; $$;"; let d = Document::new(input.to_string(), 1); - let results = d.iter(SyncDiagnosticsMapper).collect::>(); + let results = d.iter(AnalyserDiagnosticsMapper).collect::>(); assert_eq!(results.len(), 1); - let (_range, ast, diagnostic) = &results[0]; + let (ast, diagnostic) = &results[0]; // Should have parsed the CREATE FUNCTION statement assert!(ast.is_some()); @@ -458,15 +460,15 @@ $$;"; let d = Document::new(input.to_string(), 1); - let results1 = d.iter(SyncDiagnosticsMapper).collect::>(); + let results1 = d.iter(AnalyserDiagnosticsMapper).collect::>(); assert_eq!(results1.len(), 1); - assert!(results1[0].1.is_some()); - assert!(results1[0].2.is_none()); + assert!(results1[0].0.is_some()); + assert!(results1[0].1.is_none()); - let results2 = d.iter(SyncDiagnosticsMapper).collect::>(); + let results2 = d.iter(AnalyserDiagnosticsMapper).collect::>(); assert_eq!(results2.len(), 1); - assert!(results2[0].1.is_some()); - assert!(results2[0].2.is_none()); + assert!(results2[0].0.is_some()); + assert!(results2[0].1.is_none()); } #[test] @@ -513,7 +515,7 @@ END; $$ LANGUAGE plpgsql;"; let d = Document::new(input.to_string(), 1); - let results = d.iter(AsyncDiagnosticsMapper).collect::>(); + let results = d.iter(TypecheckDiagnosticsMapper).collect::>(); assert_eq!(results.len(), 1); let (_id, _range, ast, cst, sql_fn_sig) = &results[0]; @@ -532,7 +534,7 @@ $$ LANGUAGE plpgsql;"; "CREATE FUNCTION add(a int, b int) RETURNS int AS 'SELECT $1 + $2;' LANGUAGE sql;"; let d = Document::new(input.to_string(), 1); - let results = d.iter(AsyncDiagnosticsMapper).collect::>(); + let results = d.iter(TypecheckDiagnosticsMapper).collect::>(); assert_eq!(results.len(), 2); // Check the function body diff --git a/docs/codegen/src/rules_docs.rs b/docs/codegen/src/rules_docs.rs index 68db53db..1d4b86a9 100644 --- a/docs/codegen/src/rules_docs.rs +++ b/docs/codegen/src/rules_docs.rs @@ -1,7 +1,7 @@ use anyhow::{Result, bail}; use biome_string_case::Case; use pgt_analyse::{AnalyserOptions, AnalysisFilter, RuleFilter, RuleMetadata}; -use pgt_analyser::{Analyser, AnalyserConfig}; +use pgt_analyser::{AnalysableStatement, Analyser, AnalyserConfig}; use pgt_console::StdDisplay; use pgt_diagnostics::{Diagnostic, DiagnosticExt, PrintDiagnostic}; use pgt_query_ext::diagnostics::SyntaxDiagnostic; @@ -443,10 +443,16 @@ fn print_diagnostics( // split and parse each statement let stmts = pgt_statement_splitter::split(code); - for stmt in stmts.ranges { - match pgt_query_ext::parse(&code[stmt]) { + for stmt_range in stmts.ranges { + match pgt_query_ext::parse(&code[stmt_range]) { Ok(ast) => { - for rule_diag in analyser.run(pgt_analyser::AnalyserContext { root: &ast }) { + for rule_diag in analyser.run(pgt_analyser::AnalyserParams { + schema_cache: None, + stmts: vec![AnalysableStatement { + range: stmt_range, + root: ast, + }], + }) { let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); let category = diag.category().expect("linter diagnostic has no code"); diff --git a/docs/rules/ban-drop-column.md b/docs/rules/ban-drop-column.md index 0c46d40a..28b3a4b5 100644 --- a/docs/rules/ban-drop-column.md +++ b/docs/rules/ban-drop-column.md @@ -25,10 +25,14 @@ alter table test drop column id; ``` ```sh -code-block.sql lint/safety/banDropColumn ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +code-block.sql:1:1 lint/safety/banDropColumn ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ! Dropping a column may break existing clients. + > 1 │ alter table test drop column id; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + i You can leave the column as nullable or delete the column once queries no longer select or modify the column. diff --git a/docs/rules/ban-drop-not-null.md b/docs/rules/ban-drop-not-null.md index b860c45c..56ec33c7 100644 --- a/docs/rules/ban-drop-not-null.md +++ b/docs/rules/ban-drop-not-null.md @@ -25,10 +25,14 @@ alter table users alter column email drop not null; ``` ```sh -code-block.sql lint/safety/banDropNotNull ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +code-block.sql:1:1 lint/safety/banDropNotNull ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ! Dropping a NOT NULL constraint may break existing clients. + > 1 │ alter table users alter column email drop not null; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + i Consider using a marker value that represents NULL. Alternatively, create a new table allowing NULL values, copy the data from the old table, and create a view that filters NULL values. diff --git a/docs/rules/ban-drop-table.md b/docs/rules/ban-drop-table.md index 4b81755f..8aeb6e2c 100644 --- a/docs/rules/ban-drop-table.md +++ b/docs/rules/ban-drop-table.md @@ -26,10 +26,14 @@ drop table some_table; ``` ```sh -code-block.sql lint/safety/banDropTable ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +code-block.sql:1:1 lint/safety/banDropTable ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ! Dropping a table may break existing clients. + > 1 │ drop table some_table; + │ ^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + i Update your application code to no longer read or write the table, and only then delete the table. Be sure to create a backup. diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index 72b77bc1..971f07ec 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -435,17 +435,10 @@ export interface OpenFileParams { version: number; } export interface ChangeFileParams { - changes: ChangeParams[]; + content: string; path: PgTPath; version: number; } -export interface ChangeParams { - /** - * The range of the file that changed. If `None`, the whole file changed. - */ - range?: TextRange; - text: string; -} export interface CloseFileParams { path: PgTPath; } diff --git a/xtask/codegen/src/generate_new_analyser_rule.rs b/xtask/codegen/src/generate_new_analyser_rule.rs index fc225712..343b0673 100644 --- a/xtask/codegen/src/generate_new_analyser_rule.rs +++ b/xtask/codegen/src/generate_new_analyser_rule.rs @@ -41,10 +41,11 @@ fn generate_rule_template( format!( r#"use pgt_analyse::{{ - context::RuleContext, {macro_name}, Rule, RuleDiagnostic + AnalysedFileContext, context::RuleContext, {macro_name}, Rule, RuleDiagnostic, }}; use pgt_console::markup; use pgt_diagnostics::Severity; +use pgt_schema_cache::SchemaCache; {macro_name}! {{ /// Succinct description of the rule. @@ -78,7 +79,11 @@ use pgt_diagnostics::Severity; impl Rule for {rule_name_upper_camel} {{ type Options = (); - fn run(ctx: &RuleContext) -> Vec {{ + fn run( + ctx: &RuleContext + _file_context: &AnalysedFileContext, + _schema_cache: Option<&SchemaCache>, + ) -> Vec {{ Vec::new() }} }} diff --git a/xtask/rules_check/src/lib.rs b/xtask/rules_check/src/lib.rs index da4b4c73..0c57d06f 100644 --- a/xtask/rules_check/src/lib.rs +++ b/xtask/rules_check/src/lib.rs @@ -7,7 +7,7 @@ use pgt_analyse::{ AnalyserOptions, AnalysisFilter, GroupCategory, RegistryVisitor, Rule, RuleCategory, RuleFilter, RuleGroup, RuleMetadata, }; -use pgt_analyser::{Analyser, AnalyserConfig}; +use pgt_analyser::{AnalysableStatement, Analyser, AnalyserConfig}; use pgt_console::{markup, Console}; use pgt_diagnostics::{Diagnostic, DiagnosticExt, PrintDiagnostic}; use pgt_query_ext::diagnostics::SyntaxDiagnostic; @@ -127,10 +127,16 @@ fn assert_lint( }); let result = pgt_statement_splitter::split(code); - for stmt in result.ranges { - match pgt_query_ext::parse(&code[stmt]) { + for stmt_range in result.ranges { + match pgt_query_ext::parse(&code[stmt_range]) { Ok(ast) => { - for rule_diag in analyser.run(pgt_analyser::AnalyserContext { root: &ast }) { + for rule_diag in analyser.run(pgt_analyser::AnalyserParams { + schema_cache: None, + stmts: vec![AnalysableStatement { + range: stmt_range, + root: ast, + }], + }) { let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); let category = diag.category().expect("linter diagnostic has no code"); From a34f6a3d37bd2d471a9f991bbed19b992153e698 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Wed, 16 Jul 2025 21:50:45 +0200 Subject: [PATCH 102/114] fix: revoke split (#459) --- crates/pgt_statement_splitter/src/lib.rs | 23 +++++++++++++++++++ .../src/splitter/common.rs | 4 ++++ 2 files changed, 27 insertions(+) diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index de028336..19b2f230 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -149,6 +149,29 @@ mod tests { } } + #[test] + fn revoke() { + Tester::from("revoke delete on table \"public\".\"voice_call\" from \"anon\";") + .expect_statements(vec![ + "revoke delete on table \"public\".\"voice_call\" from \"anon\";", + ]); + + Tester::from("revoke select on table \"public\".\"voice_call\" from \"anon\";") + .expect_statements(vec![ + "revoke select on table \"public\".\"voice_call\" from \"anon\";", + ]); + + Tester::from("revoke update on table \"public\".\"voice_call\" from \"anon\";") + .expect_statements(vec![ + "revoke update on table \"public\".\"voice_call\" from \"anon\";", + ]); + + Tester::from("revoke insert on table \"public\".\"voice_call\" from \"anon\";") + .expect_statements(vec![ + "revoke insert on table \"public\".\"voice_call\" from \"anon\";", + ]); + } + #[test] fn double_newlines() { Tester::from("select 1 from contact\n\nselect 1\n\nselect 3").expect_statements(vec![ diff --git a/crates/pgt_statement_splitter/src/splitter/common.rs b/crates/pgt_statement_splitter/src/splitter/common.rs index 4f2cd069..9c3dea48 100644 --- a/crates/pgt_statement_splitter/src/splitter/common.rs +++ b/crates/pgt_statement_splitter/src/splitter/common.rs @@ -183,6 +183,8 @@ pub(crate) fn unknown(p: &mut Splitter, exclude: &[SyntaxKind]) { SyntaxKind::EXCEPT_KW, // for grant SyntaxKind::GRANT_KW, + // for revoke + SyntaxKind::REVOKE_KW, SyntaxKind::COMMA, ] .iter() @@ -215,6 +217,8 @@ pub(crate) fn unknown(p: &mut Splitter, exclude: &[SyntaxKind]) { SyntaxKind::INSTEAD_KW, // for grant SyntaxKind::GRANT_KW, + // for revoke + SyntaxKind::REVOKE_KW, SyntaxKind::COMMA, // Do update in INSERT stmt SyntaxKind::DO_KW, From 544b6b08feb17657bf8eeff00d71457658b7826f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Wed, 16 Jul 2025 22:03:20 +0200 Subject: [PATCH 103/114] feat: add support for named params (#458) --- crates/pgt_lexer/src/lexer.rs | 12 ++++ crates/pgt_lexer/src/lib.rs | 30 ++++++++ crates/pgt_lexer_codegen/src/syntax_kind.rs | 2 +- crates/pgt_tokenizer/src/lib.rs | 69 ++++++++++++++++++- .../pgt_tokenizer__tests__named_param_at.snap | 23 +++++++ ...__tests__named_param_colon_identifier.snap | 23 +++++++ ...kenizer__tests__named_param_colon_raw.snap | 23 +++++++ ...izer__tests__named_param_colon_string.snap | 23 +++++++ crates/pgt_tokenizer/src/token.rs | 30 ++++++++ 9 files changed, 231 insertions(+), 4 deletions(-) create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_at.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_identifier.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_raw.snap create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_string.snap diff --git a/crates/pgt_lexer/src/lexer.rs b/crates/pgt_lexer/src/lexer.rs index db4b4ae2..ad6db297 100644 --- a/crates/pgt_lexer/src/lexer.rs +++ b/crates/pgt_lexer/src/lexer.rs @@ -132,6 +132,18 @@ impl<'a> Lexer<'a> { pgt_tokenizer::TokenKind::Eof => SyntaxKind::EOF, pgt_tokenizer::TokenKind::Backtick => SyntaxKind::BACKTICK, pgt_tokenizer::TokenKind::PositionalParam => SyntaxKind::POSITIONAL_PARAM, + pgt_tokenizer::TokenKind::NamedParam { kind } => { + match kind { + pgt_tokenizer::NamedParamKind::ColonIdentifier { terminated: false } => { + err = "Missing trailing \" to terminate the named parameter"; + } + pgt_tokenizer::NamedParamKind::ColonString { terminated: false } => { + err = "Missing trailing ' to terminate the named parameter"; + } + _ => {} + }; + SyntaxKind::POSITIONAL_PARAM + } pgt_tokenizer::TokenKind::QuotedIdent { terminated } => { if !terminated { err = "Missing trailing \" to terminate the quoted identifier" diff --git a/crates/pgt_lexer/src/lib.rs b/crates/pgt_lexer/src/lib.rs index 2d8779a7..45fa6c6b 100644 --- a/crates/pgt_lexer/src/lib.rs +++ b/crates/pgt_lexer/src/lib.rs @@ -50,6 +50,36 @@ mod tests { assert!(!errors[0].message.to_string().is_empty()); } + #[test] + fn test_lexing_string_params_with_errors() { + let input = "SELECT :'unterminated string"; + let lexed = lex(input); + + // Should have tokens + assert!(!lexed.is_empty()); + + // Should have an error for unterminated string + let errors = lexed.errors(); + assert!(!errors.is_empty()); + // Check the error message exists + assert!(!errors[0].message.to_string().is_empty()); + } + + #[test] + fn test_lexing_identifier_params_with_errors() { + let input = "SELECT :\"unterminated string"; + let lexed = lex(input); + + // Should have tokens + assert!(!lexed.is_empty()); + + // Should have an error for unterminated string + let errors = lexed.errors(); + assert!(!errors.is_empty()); + // Check the error message exists + assert!(!errors[0].message.to_string().is_empty()); + } + #[test] fn test_token_ranges() { let input = "SELECT id"; diff --git a/crates/pgt_lexer_codegen/src/syntax_kind.rs b/crates/pgt_lexer_codegen/src/syntax_kind.rs index 07b7a419..c671e451 100644 --- a/crates/pgt_lexer_codegen/src/syntax_kind.rs +++ b/crates/pgt_lexer_codegen/src/syntax_kind.rs @@ -43,7 +43,7 @@ const PUNCT: &[(&str, &str)] = &[ ("`", "BACKTICK"), ]; -const EXTRA: &[&str] = &["POSITIONAL_PARAM", "ERROR", "COMMENT", "EOF"]; +const EXTRA: &[&str] = &["POSITIONAL_PARAM", "NAMED_PARAM", "ERROR", "COMMENT", "EOF"]; const LITERALS: &[&str] = &[ "BIT_STRING", diff --git a/crates/pgt_tokenizer/src/lib.rs b/crates/pgt_tokenizer/src/lib.rs index 787adcaa..80b66363 100644 --- a/crates/pgt_tokenizer/src/lib.rs +++ b/crates/pgt_tokenizer/src/lib.rs @@ -1,7 +1,7 @@ mod cursor; mod token; use cursor::{Cursor, EOF_CHAR}; -pub use token::{Base, LiteralKind, Token, TokenKind}; +pub use token::{Base, LiteralKind, NamedParamKind, Token, TokenKind}; // via: https://github.com/postgres/postgres/blob/db0c96cc18aec417101e37e59fcc53d4bf647915/src/backend/parser/scan.l#L346 // ident_start [A-Za-z\200-\377_] @@ -132,6 +132,46 @@ impl Cursor<'_> { } _ => TokenKind::Dot, }, + '@' => { + if is_ident_start(self.first()) { + // Named parameter with @ prefix. + self.eat_while(is_ident_cont); + TokenKind::NamedParam { + kind: NamedParamKind::AtPrefix, + } + } else { + TokenKind::At + } + } + ':' => { + // Named parameters in psql with different substitution styles. + // + // https://www.postgresql.org/docs/current/app-psql.html#APP-PSQL-INTERPOLATION + match self.first() { + '\'' => { + // Named parameter with colon prefix and single quotes. + self.bump(); + let terminated = self.single_quoted_string(); + let kind = NamedParamKind::ColonString { terminated }; + TokenKind::NamedParam { kind } + } + '"' => { + // Named parameter with colon prefix and double quotes. + self.bump(); + let terminated = self.double_quoted_string(); + let kind = NamedParamKind::ColonIdentifier { terminated }; + TokenKind::NamedParam { kind } + } + c if is_ident_start(c) => { + // Named parameter with colon prefix. + self.eat_while(is_ident_cont); + TokenKind::NamedParam { + kind: NamedParamKind::ColonRaw, + } + } + _ => TokenKind::Colon, + } + } // One-symbol tokens. ';' => TokenKind::Semi, '\\' => TokenKind::Backslash, @@ -140,11 +180,9 @@ impl Cursor<'_> { ')' => TokenKind::CloseParen, '[' => TokenKind::OpenBracket, ']' => TokenKind::CloseBracket, - '@' => TokenKind::At, '#' => TokenKind::Pound, '~' => TokenKind::Tilde, '?' => TokenKind::Question, - ':' => TokenKind::Colon, '$' => { // Dollar quoted strings if is_ident_start(self.first()) || self.first() == '$' { @@ -613,6 +651,31 @@ mod tests { } tokens } + + #[test] + fn named_param_at() { + let result = lex("select 1 from c where id = @id;"); + assert_debug_snapshot!(result); + } + + #[test] + fn named_param_colon_raw() { + let result = lex("select 1 from c where id = :id;"); + assert_debug_snapshot!(result); + } + + #[test] + fn named_param_colon_string() { + let result = lex("select 1 from c where id = :'id';"); + assert_debug_snapshot!(result); + } + + #[test] + fn named_param_colon_identifier() { + let result = lex("select 1 from c where id = :\"id\";"); + assert_debug_snapshot!(result); + } + #[test] fn lex_statement() { let result = lex("select 1;"); diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_at.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_at.snap new file mode 100644 index 00000000..30bbe87f --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_at.snap @@ -0,0 +1,23 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "select" @ Ident, + " " @ Space, + "1" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + " " @ Space, + "from" @ Ident, + " " @ Space, + "c" @ Ident, + " " @ Space, + "where" @ Ident, + " " @ Space, + "id" @ Ident, + " " @ Space, + "=" @ Eq, + " " @ Space, + "@id" @ NamedParam { kind: AtPrefix }, + ";" @ Semi, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_identifier.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_identifier.snap new file mode 100644 index 00000000..6986ab0e --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_identifier.snap @@ -0,0 +1,23 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "select" @ Ident, + " " @ Space, + "1" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + " " @ Space, + "from" @ Ident, + " " @ Space, + "c" @ Ident, + " " @ Space, + "where" @ Ident, + " " @ Space, + "id" @ Ident, + " " @ Space, + "=" @ Eq, + " " @ Space, + ":\"id\"" @ NamedParam { kind: ColonIdentifier { terminated: true } }, + ";" @ Semi, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_raw.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_raw.snap new file mode 100644 index 00000000..f6db199d --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_raw.snap @@ -0,0 +1,23 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "select" @ Ident, + " " @ Space, + "1" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + " " @ Space, + "from" @ Ident, + " " @ Space, + "c" @ Ident, + " " @ Space, + "where" @ Ident, + " " @ Space, + "id" @ Ident, + " " @ Space, + "=" @ Eq, + " " @ Space, + ":id" @ NamedParam { kind: ColonRaw }, + ";" @ Semi, +] diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_string.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_string.snap new file mode 100644 index 00000000..d9150083 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_string.snap @@ -0,0 +1,23 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "select" @ Ident, + " " @ Space, + "1" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + " " @ Space, + "from" @ Ident, + " " @ Space, + "c" @ Ident, + " " @ Space, + "where" @ Ident, + " " @ Space, + "id" @ Ident, + " " @ Space, + "=" @ Eq, + " " @ Space, + ":'id'" @ NamedParam { kind: ColonString { terminated: true } }, + ";" @ Semi, +] diff --git a/crates/pgt_tokenizer/src/token.rs b/crates/pgt_tokenizer/src/token.rs index 50a7d12a..e3dbaee2 100644 --- a/crates/pgt_tokenizer/src/token.rs +++ b/crates/pgt_tokenizer/src/token.rs @@ -94,6 +94,12 @@ pub enum TokenKind { /// /// see: PositionalParam, + /// Named Parameter, e.g., `@name` + /// + /// This is used in some ORMs and query builders, like sqlc. + NamedParam { + kind: NamedParamKind, + }, /// Quoted Identifier, e.g., `"update"` in `update "my_table" set "a" = 5;` /// /// These are case-sensitive, unlike [`TokenKind::Ident`] @@ -104,6 +110,30 @@ pub enum TokenKind { }, } +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum NamedParamKind { + /// e.g. `@name` + /// + /// Used in: + /// - sqlc: https://docs.sqlc.dev/en/latest/howto/named_parameters.html + AtPrefix, + + /// e.g. `:name` (raw substitution) + /// + /// Used in: psql + ColonRaw, + + /// e.g. `:'name'` (quoted string substitution) + /// + /// Used in: psql + ColonString { terminated: bool }, + + /// e.g. `:"name"` (quoted identifier substitution) + /// + /// Used in: psql + ColonIdentifier { terminated: bool }, +} + /// Parsed token. /// It doesn't contain information about data that has been parsed, /// only the type of the token and its size. From fdfbd2c57b2ebddbe53a4854ac9328de8cf71945 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Thu, 17 Jul 2025 13:35:02 +0200 Subject: [PATCH 104/114] fix: deadlock (#460) after using it for a day, i saw two deadlocks. not exactly sure where they occur, but this is the only place i see potential issues. this must be coming from the dashmap replacement... Mutex requires us to be more careful. --- .../workspace/server/connection_manager.rs | 50 ++++++++----------- .../src/workspace/server/tree_sitter.rs | 30 +++++------ 2 files changed, 36 insertions(+), 44 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server/connection_manager.rs b/crates/pgt_workspace/src/workspace/server/connection_manager.rs index 8955b378..145b6fa0 100644 --- a/crates/pgt_workspace/src/workspace/server/connection_manager.rs +++ b/crates/pgt_workspace/src/workspace/server/connection_manager.rs @@ -34,23 +34,19 @@ impl ConnectionManager { pub(crate) fn get_pool(&self, settings: &DatabaseSettings) -> Option { let key = ConnectionKey::from(settings); - // Cleanup idle connections first - self.cleanup_idle_pools(&key); - if !settings.enable_connection { tracing::info!("Database connection disabled."); return None; } - // Try read lock first for cache hit - if let Ok(pools) = self.pools.read() { - if let Some(cached_pool) = pools.get(&key) { - // Can't update last_accessed with read lock, but that's okay for occasional misses - return Some(cached_pool.pool.clone()); + { + if let Ok(pools) = self.pools.read() { + if let Some(cached_pool) = pools.get(&key) { + return Some(cached_pool.pool.clone()); + } } } - // Cache miss or need to update timestamp - use write lock let mut pools = self.pools.write().unwrap(); // Double-check after acquiring write lock @@ -59,6 +55,21 @@ impl ConnectionManager { return Some(cached_pool.pool.clone()); } + // Clean up idle connections before creating new ones to avoid unbounded growth + let now = Instant::now(); + pools.retain(|k, cached_pool| { + let idle_duration = now.duration_since(cached_pool.last_accessed); + if idle_duration > cached_pool.idle_timeout && k != &key { + tracing::debug!( + "Removing idle database connection (idle for {:?})", + idle_duration + ); + false + } else { + true + } + }); + // Create a new pool let config = PgConnectOptions::new() .host(&settings.host) @@ -85,25 +96,4 @@ impl ConnectionManager { Some(pool) } - - /// Remove pools that haven't been accessed for longer than the idle timeout - fn cleanup_idle_pools(&self, ignore_key: &ConnectionKey) { - let now = Instant::now(); - - let mut pools = self.pools.write().unwrap(); - - // Use retain to keep only non-idle connections - pools.retain(|key, cached_pool| { - let idle_duration = now.duration_since(cached_pool.last_accessed); - if idle_duration > cached_pool.idle_timeout && key != ignore_key { - tracing::debug!( - "Removing idle database connection (idle for {:?})", - idle_duration - ); - false - } else { - true - } - }); - } } diff --git a/crates/pgt_workspace/src/workspace/server/tree_sitter.rs b/crates/pgt_workspace/src/workspace/server/tree_sitter.rs index b8f62b63..71411d27 100644 --- a/crates/pgt_workspace/src/workspace/server/tree_sitter.rs +++ b/crates/pgt_workspace/src/workspace/server/tree_sitter.rs @@ -28,27 +28,29 @@ impl TreeSitterStore { } pub fn get_or_cache_tree(&self, statement: &StatementId) -> Arc { - let mut cache = self.db.lock().expect("Failed to lock cache"); - - if let Some(existing) = cache.get(statement) { - return existing.clone(); + // First check cache + { + let mut cache = self.db.lock().unwrap(); + if let Some(existing) = cache.get(statement) { + return existing.clone(); + } } - // Cache miss - drop cache lock, parse, then re-acquire to insert - drop(cache); - - let mut parser = self.parser.lock().expect("Failed to lock parser"); + // Cache miss - parse outside of cache lock to avoid deadlock + let mut parser = self.parser.lock().unwrap(); let tree = Arc::new(parser.parse(statement.content(), None).unwrap()); drop(parser); - let mut cache = self.db.lock().expect("Failed to lock cache"); - - // Double-check after re-acquiring lock - if let Some(existing) = cache.get(statement) { - return existing.clone(); + // Insert into cache + { + let mut cache = self.db.lock().unwrap(); + // Double-check in case another thread inserted while we were parsing + if let Some(existing) = cache.get(statement) { + return existing.clone(); + } + cache.put(statement.clone(), tree.clone()); } - cache.put(statement.clone(), tree.clone()); tree } } From 36a3c522fc44712752a75ef5a42d01c51064ab95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 18 Jul 2025 08:11:20 +0200 Subject: [PATCH 105/114] fix: syntax error regression (#461) syntax errors were not shown anymore due to a missing range. added a test to catch this regression next time. --- .../src/workspace/server.tests.rs | 55 +++++++++++++++++++ .../src/workspace/server/document.rs | 2 +- 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/crates/pgt_workspace/src/workspace/server.tests.rs b/crates/pgt_workspace/src/workspace/server.tests.rs index 94b02cd5..c3fbf723 100644 --- a/crates/pgt_workspace/src/workspace/server.tests.rs +++ b/crates/pgt_workspace/src/workspace/server.tests.rs @@ -97,3 +97,58 @@ async fn test_diagnostics(test_db: PgPool) { Some(TextRange::new(106.into(), 136.into())) ); } + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_syntax_error(test_db: PgPool) { + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + + let workspace = get_test_workspace(Some(conf)).expect("Unable to create test workspace"); + + let path = PgTPath::new("test.sql"); + let content = r#" + seect 1; + "#; + + workspace + .open_file(OpenFileParams { + path: path.clone(), + content: content.into(), + version: 1, + }) + .expect("Unable to open test file"); + + let diagnostics = workspace + .pull_diagnostics(crate::workspace::PullDiagnosticsParams { + path: path.clone(), + categories: RuleCategories::all(), + max_diagnostics: 100, + only: vec![], + skip: vec![], + }) + .expect("Unable to pull diagnostics") + .diagnostics; + + assert_eq!(diagnostics.len(), 1, "Expected one diagnostic"); + + let diagnostic = &diagnostics[0]; + + assert_eq!(diagnostic.category().map(|c| c.name()), Some("syntax")); + + assert_eq!( + diagnostic.location().span, + Some(TextRange::new(7.into(), 15.into())) + ); +} diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index b5798370..c9f880ec 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -259,7 +259,7 @@ impl<'a> StatementMapper<'a> for AnalyserDiagnosticsMapper { (Some(node.clone()), None) } } - Err(diag) => (None, Some(diag.clone())), + Err(diag) => (None, Some(diag.clone().span(range))), }; ( From c0567ec08ce42cec8b12c399009718b667684591 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 18 Jul 2025 23:26:11 +0200 Subject: [PATCH 106/114] fix: splitter crash (#464) fixes a crash in the splitter when the last char is a closing parentheses. --- crates/pgt_lsp/src/handlers/text_document.rs | 14 +- crates/pgt_lsp/src/utils.rs | 14 +- crates/pgt_lsp/tests/server.rs | 487 ++++++++++++++++++ crates/pgt_statement_splitter/src/lib.rs | 8 + .../src/splitter/common.rs | 4 +- 5 files changed, 511 insertions(+), 16 deletions(-) diff --git a/crates/pgt_lsp/src/handlers/text_document.rs b/crates/pgt_lsp/src/handlers/text_document.rs index cc2efb4b..1c5a9a11 100644 --- a/crates/pgt_lsp/src/handlers/text_document.rs +++ b/crates/pgt_lsp/src/handlers/text_document.rs @@ -1,12 +1,10 @@ -use crate::{ - diagnostics::LspError, documents::Document, session::Session, utils::apply_document_changes, -}; +use crate::{documents::Document, session::Session, utils::apply_document_changes}; use anyhow::Result; use pgt_workspace::workspace::{ ChangeFileParams, CloseFileParams, GetFileContentParams, OpenFileParams, }; use tower_lsp::lsp_types; -use tracing::error; +use tracing::{error, field}; /// Handler for `textDocument/didOpen` LSP notification #[tracing::instrument(level = "debug", skip(session), err)] @@ -36,12 +34,12 @@ pub(crate) async fn did_open( Ok(()) } -// Handler for `textDocument/didChange` LSP notification -#[tracing::instrument(level = "debug", skip(session), err)] +/// Handler for `textDocument/didChange` LSP notification +#[tracing::instrument(level = "debug", skip_all, fields(url = field::display(¶ms.text_document.uri), version = params.text_document.version), err)] pub(crate) async fn did_change( session: &Session, params: lsp_types::DidChangeTextDocumentParams, -) -> Result<(), LspError> { +) -> Result<()> { let url = params.text_document.uri; let version = params.text_document.version; @@ -56,7 +54,7 @@ pub(crate) async fn did_change( let text = apply_document_changes( session.position_encoding(), old_text, - ¶ms.content_changes, + params.content_changes, ); tracing::trace!("new document: {:?}", text); diff --git a/crates/pgt_lsp/src/utils.rs b/crates/pgt_lsp/src/utils.rs index 92059b66..8361cf08 100644 --- a/crates/pgt_lsp/src/utils.rs +++ b/crates/pgt_lsp/src/utils.rs @@ -1,5 +1,6 @@ +use crate::adapters::from_lsp::text_range; use crate::adapters::line_index::LineIndex; -use crate::adapters::{PositionEncoding, from_lsp, to_lsp}; +use crate::adapters::{PositionEncoding, to_lsp}; use anyhow::{Context, Result, ensure}; use pgt_console::MarkupBuf; use pgt_console::fmt::Termcolor; @@ -10,8 +11,8 @@ use pgt_text_size::{TextRange, TextSize}; use std::any::Any; use std::borrow::Cow; use std::fmt::{Debug, Display}; -use std::io; use std::ops::{Add, Range}; +use std::{io, mem}; use tower_lsp::jsonrpc::Error as LspError; use tower_lsp::lsp_types; use tower_lsp::lsp_types::{self as lsp, CodeDescription, Url}; @@ -183,7 +184,7 @@ pub(crate) fn panic_to_lsp_error(err: Box) -> LspError { pub(crate) fn apply_document_changes( position_encoding: PositionEncoding, current_content: String, - content_changes: &[lsp_types::TextDocumentContentChangeEvent], + mut content_changes: Vec, ) -> String { // Skip to the last full document change, as it invalidates all previous changes anyways. let mut start = content_changes @@ -192,12 +193,12 @@ pub(crate) fn apply_document_changes( .position(|change| change.range.is_none()) .map_or(0, |idx| content_changes.len() - idx - 1); - let mut text: String = match content_changes.get(start) { + let mut text: String = match content_changes.get_mut(start) { // peek at the first content change as an optimization Some(lsp_types::TextDocumentContentChangeEvent { range: None, text, .. }) => { - let text = text.clone(); + let text = mem::take(text); start += 1; // The only change is a full document update @@ -225,12 +226,11 @@ pub(crate) fn apply_document_changes( line_index = LineIndex::new(&text); } index_valid = range.start.line; - if let Ok(range) = from_lsp::text_range(&line_index, range, position_encoding) { + if let Ok(range) = text_range(&line_index, range, position_encoding) { text.replace_range(Range::::from(range), &change.text); } } } - text } diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 176868f5..63953590 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -1773,3 +1773,490 @@ $$; Ok(()) } + +#[tokio::test] +async fn test_crash_on_delete_character() -> Result<()> { + let factory = ServerFactory::default(); + let (service, client) = factory.create(None).into_inner(); + let (stream, sink) = client.split(); + let mut server = Server::new(service); + + let (sender, _) = channel(CHANNEL_BUFFER_SIZE); + let reader = tokio::spawn(client_handler(stream, sink, sender)); + + server.initialize().await?; + server.initialized().await?; + + // Open document with initial CREATE INDEX statement - exactly as in log + let initial_content = "\n\n\n\nCREATE INDEX \"idx_analytics_read_ratio\" ON \"public\".\"message\" USING \"btree\" (\"inbox_id\", \"timestamp\") INCLUDE (\"status\") WHERE (\"is_inbound\" = false);\n"; + + server.open_document(initial_content).await?; + + // Add a space after false (position 148 from the log) + server + .change_document( + 3, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 148, + }, + end: Position { + line: 4, + character: 148, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + // Follow the exact sequence from the logfile + // Type character by character in exact order + + // Version 4: "a" at 149 + server + .change_document( + 4, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 149, + }, + end: Position { + line: 4, + character: 149, + }, + }), + range_length: Some(0), + text: "a".to_string(), + }], + ) + .await?; + + // Version 5: "n" at 150 + server + .change_document( + 5, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 150, + }, + end: Position { + line: 4, + character: 150, + }, + }), + range_length: Some(0), + text: "n".to_string(), + }], + ) + .await?; + + // Version 6: "d" at 151 + server + .change_document( + 6, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 151, + }, + end: Position { + line: 4, + character: 151, + }, + }), + range_length: Some(0), + text: "d".to_string(), + }], + ) + .await?; + + // Version 7: " " at 152 + server + .change_document( + 7, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 152, + }, + end: Position { + line: 4, + character: 152, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + // Version 8: "c" at 153 + server + .change_document( + 8, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 153, + }, + end: Position { + line: 4, + character: 153, + }, + }), + range_length: Some(0), + text: "c".to_string(), + }], + ) + .await?; + + // Version 10: "h" at 154 and "a" at 155 (two changes in one version) + server + .change_document( + 10, + vec![ + TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 154, + }, + end: Position { + line: 4, + character: 154, + }, + }), + range_length: Some(0), + text: "h".to_string(), + }, + TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 155, + }, + end: Position { + line: 4, + character: 155, + }, + }), + range_length: Some(0), + text: "a".to_string(), + }, + ], + ) + .await?; + + // Version 11: "n" at 156 + server + .change_document( + 11, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 156, + }, + end: Position { + line: 4, + character: 156, + }, + }), + range_length: Some(0), + text: "n".to_string(), + }], + ) + .await?; + + // Version 12: "n" at 157 + server + .change_document( + 12, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 157, + }, + end: Position { + line: 4, + character: 157, + }, + }), + range_length: Some(0), + text: "n".to_string(), + }], + ) + .await?; + + // Version 13: "e" at 158 + server + .change_document( + 13, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 158, + }, + end: Position { + line: 4, + character: 158, + }, + }), + range_length: Some(0), + text: "e".to_string(), + }], + ) + .await?; + + // Version 14: "l" at 159 + server + .change_document( + 14, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 159, + }, + end: Position { + line: 4, + character: 159, + }, + }), + range_length: Some(0), + text: "l".to_string(), + }], + ) + .await?; + + // Version 15: "_" at 160 + server + .change_document( + 15, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 160, + }, + end: Position { + line: 4, + character: 160, + }, + }), + range_length: Some(0), + text: "_".to_string(), + }], + ) + .await?; + + // Version 16: "t" at 161 + server + .change_document( + 16, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 161, + }, + end: Position { + line: 4, + character: 161, + }, + }), + range_length: Some(0), + text: "t".to_string(), + }], + ) + .await?; + + // Version 17: "y" at 162 + server + .change_document( + 17, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 162, + }, + end: Position { + line: 4, + character: 162, + }, + }), + range_length: Some(0), + text: "y".to_string(), + }], + ) + .await?; + + // Version 18: "p" at 163 + server + .change_document( + 18, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 163, + }, + end: Position { + line: 4, + character: 163, + }, + }), + range_length: Some(0), + text: "p".to_string(), + }], + ) + .await?; + + // Version 19: "e" at 164 + server + .change_document( + 19, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 164, + }, + end: Position { + line: 4, + character: 164, + }, + }), + range_length: Some(0), + text: "e".to_string(), + }], + ) + .await?; + + // Version 20: " " at 165 + server + .change_document( + 20, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 165, + }, + end: Position { + line: 4, + character: 165, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + // Now we should have: "WHERE ("is_inbound" = false and channel_type )" + + // Version 21: Paste the problematic text with double single quotes + server + .change_document( + 21, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 166, + }, + end: Position { + line: 4, + character: 166, + }, + }), + range_length: Some(0), + text: "channel_type not in (''postal'', ''sms'')".to_string(), + }], + ) + .await?; + + // Delete "channel_type" + server + .change_document( + 22, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 166, + }, + end: Position { + line: 4, + character: 178, + }, + }), + range_length: Some(12), + text: "".to_string(), + }], + ) + .await?; + + // Delete one more character + server + .change_document( + 23, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 166, + }, + end: Position { + line: 4, + character: 167, + }, + }), + range_length: Some(1), + text: "".to_string(), + }], + ) + .await?; + + // This final delete should trigger the panic + let result = server + .change_document( + 24, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 4, + character: 175, + }, + end: Position { + line: 4, + character: 176, + }, + }), + range_length: Some(1), + text: "".to_string(), + }], + ) + .await; + + assert!(result.is_ok()); + + reader.abort(); + + Ok(()) +} diff --git a/crates/pgt_statement_splitter/src/lib.rs b/crates/pgt_statement_splitter/src/lib.rs index 19b2f230..02ca1b30 100644 --- a/crates/pgt_statement_splitter/src/lib.rs +++ b/crates/pgt_statement_splitter/src/lib.rs @@ -114,6 +114,14 @@ mod tests { ); } + #[test] + fn test_crash_eof() { + Tester::from("CREATE INDEX \"idx_analytics_read_ratio\" ON \"public\".\"message\" USING \"btree\" (\"inbox_id\", \"timestamp\") INCLUDE (\"status\") WHERE (\"is_inbound\" = false and channel_type not in ('postal'', 'sms'));") + .expect_statements(vec![ + "CREATE INDEX \"idx_analytics_read_ratio\" ON \"public\".\"message\" USING \"btree\" (\"inbox_id\", \"timestamp\") INCLUDE (\"status\") WHERE (\"is_inbound\" = false and channel_type not in ('postal'', 'sms'));", + ]); + } + #[test] #[timeout(1000)] fn basic() { diff --git a/crates/pgt_statement_splitter/src/splitter/common.rs b/crates/pgt_statement_splitter/src/splitter/common.rs index 9c3dea48..786c2478 100644 --- a/crates/pgt_statement_splitter/src/splitter/common.rs +++ b/crates/pgt_statement_splitter/src/splitter/common.rs @@ -70,7 +70,9 @@ pub(crate) fn parenthesis(p: &mut Splitter) { depth += 1; } SyntaxKind::R_PAREN | SyntaxKind::EOF => { - p.advance(); + if p.current() == SyntaxKind::R_PAREN { + p.advance(); + } depth -= 1; if depth == 0 { break; From fa18370c590e007f40dac1d7c0b96d377fc24653 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Sat, 19 Jul 2025 09:38:49 +0200 Subject: [PATCH 107/114] fix: `files.ignore` setting should work (#462) --- Cargo.lock | 14 +- Cargo.toml | 3 +- crates/pgt_diagnostics/src/serde.rs | 3 +- crates/pgt_workspace/Cargo.toml | 1 + .../src/features/code_actions.rs | 4 +- .../pgt_workspace/src/features/diagnostics.rs | 2 +- crates/pgt_workspace/src/workspace/server.rs | 15 ++- .../src/workspace/server.tests.rs | 60 ++++++++- crates/pgt_workspace_macros/Cargo.toml | 19 +++ crates/pgt_workspace_macros/src/lib.rs | 123 ++++++++++++++++++ 10 files changed, 231 insertions(+), 13 deletions(-) create mode 100644 crates/pgt_workspace_macros/Cargo.toml create mode 100644 crates/pgt_workspace_macros/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 16b1de5e..d5c626e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3108,6 +3108,7 @@ dependencies = [ "pgt_test_utils", "pgt_text_size", "pgt_typecheck", + "pgt_workspace_macros", "rustc-hash 2.1.0", "schemars", "serde", @@ -3122,6 +3123,15 @@ dependencies = [ "tree_sitter_sql", ] +[[package]] +name = "pgt_workspace_macros" +version = "0.0.0" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "pin-project" version = "1.1.7" @@ -3341,9 +3351,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] diff --git a/Cargo.toml b/Cargo.toml index 15c6f02f..4a3454c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,7 @@ convert_case = "0.6.0" prost-reflect = "0.15.3" protox = "0.8.0" sqlx = { version = "0.8.2", features = ["runtime-tokio", "runtime-async-std", "postgres", "json"] } -syn = "1.0.109" +syn = { version = "1.0.109", features = ["full"] } termcolor = "1.4.1" test-log = "0.2.17" tokio = { version = "1.40.0", features = ["full"] } @@ -85,6 +85,7 @@ pgt_tokenizer = { path = "./crates/pgt_tokenizer", version = "0.0.0 pgt_treesitter_queries = { path = "./crates/pgt_treesitter_queries", version = "0.0.0" } pgt_typecheck = { path = "./crates/pgt_typecheck", version = "0.0.0" } pgt_workspace = { path = "./crates/pgt_workspace", version = "0.0.0" } +pgt_workspace_macros = { path = "./crates/pgt_workspace_macros", version = "0.0.0" } pgt_test_macros = { path = "./crates/pgt_test_macros" } pgt_test_utils = { path = "./crates/pgt_test_utils" } diff --git a/crates/pgt_diagnostics/src/serde.rs b/crates/pgt_diagnostics/src/serde.rs index 334bd4e9..57ed3e28 100644 --- a/crates/pgt_diagnostics/src/serde.rs +++ b/crates/pgt_diagnostics/src/serde.rs @@ -164,6 +164,7 @@ impl From> for Location { #[serde(rename_all = "camelCase")] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] #[cfg_attr(test, derive(Eq, PartialEq))] + struct Advices { advices: Vec, } @@ -250,7 +251,7 @@ impl super::Advices for Advices { #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] -#[cfg_attr(test, derive(Eq, PartialEq))] +#[cfg_attr(test, derive(PartialEq, Eq))] enum Advice { Log(LogCategory, MarkupBuf), List(Vec), diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index 3ef4936b..e78f4391 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -32,6 +32,7 @@ pgt_statement_splitter = { workspace = true } pgt_suppressions = { workspace = true } pgt_text_size.workspace = true pgt_typecheck = { workspace = true } +pgt_workspace_macros = { workspace = true } rustc-hash = { workspace = true } schemars = { workspace = true, optional = true } serde = { workspace = true, features = ["derive"] } diff --git a/crates/pgt_workspace/src/features/code_actions.rs b/crates/pgt_workspace/src/features/code_actions.rs index 22223dd3..cd1706d3 100644 --- a/crates/pgt_workspace/src/features/code_actions.rs +++ b/crates/pgt_workspace/src/features/code_actions.rs @@ -12,7 +12,7 @@ pub struct CodeActionsParams { pub skip: Vec, } -#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[derive(Debug, serde::Serialize, serde::Deserialize, Default)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct CodeActionsResult { pub actions: Vec, @@ -57,7 +57,7 @@ pub struct ExecuteStatementParams { pub path: PgTPath, } -#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[derive(Debug, serde::Serialize, serde::Deserialize, Default, PartialEq, Eq)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct ExecuteStatementResult { pub message: String, diff --git a/crates/pgt_workspace/src/features/diagnostics.rs b/crates/pgt_workspace/src/features/diagnostics.rs index ff60e142..a697641e 100644 --- a/crates/pgt_workspace/src/features/diagnostics.rs +++ b/crates/pgt_workspace/src/features/diagnostics.rs @@ -12,7 +12,7 @@ pub struct PullDiagnosticsParams { pub skip: Vec, } -#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[derive(Debug, serde::Serialize, serde::Deserialize, Default)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct PullDiagnosticsResult { pub diagnostics: Vec, diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index e6456afc..c6ed0827 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -21,6 +21,7 @@ use pgt_diagnostics::{ }; use pgt_fs::{ConfigName, PgTPath}; use pgt_typecheck::{IdentifierType, TypecheckParams, TypedIdentifier}; +use pgt_workspace_macros::ignored_path; use schema_cache_manager::SchemaCacheManager; use sqlx::{Executor, PgPool}; use tracing::{debug, info}; @@ -30,7 +31,7 @@ use crate::{ configuration::to_analyser_rules, features::{ code_actions::{ - self, CodeAction, CodeActionKind, CodeActionsResult, CommandAction, + CodeAction, CodeActionKind, CodeActionsParams, CodeActionsResult, CommandAction, CommandActionCategory, ExecuteStatementParams, ExecuteStatementResult, }, completions::{CompletionsResult, GetCompletionsParams, get_statement_for_completions}, @@ -262,6 +263,7 @@ impl Workspace for WorkspaceServer { } /// Add a new file to the workspace + #[ignored_path(path=¶ms.path)] #[tracing::instrument(level = "info", skip_all, fields(path = params.path.as_path().as_os_str().to_str()), err)] fn open_file(&self, params: OpenFileParams) -> Result<(), WorkspaceError> { let mut documents = self.documents.write().unwrap(); @@ -277,6 +279,7 @@ impl Workspace for WorkspaceServer { } /// Remove a file from the workspace + #[ignored_path(path=¶ms.path)] fn close_file(&self, params: super::CloseFileParams) -> Result<(), WorkspaceError> { let mut documents = self.documents.write().unwrap(); documents @@ -291,6 +294,7 @@ impl Workspace for WorkspaceServer { path = params.path.as_os_str().to_str(), version = params.version ), err)] + #[ignored_path(path=¶ms.path)] fn change_file(&self, params: super::ChangeFileParams) -> Result<(), WorkspaceError> { let mut documents = self.documents.write().unwrap(); @@ -312,6 +316,7 @@ impl Workspace for WorkspaceServer { None } + #[ignored_path(path=¶ms.path)] fn get_file_content(&self, params: GetFileContentParams) -> Result { let documents = self.documents.read().unwrap(); let document = documents @@ -324,10 +329,11 @@ impl Workspace for WorkspaceServer { Ok(self.is_ignored(params.pgt_path.as_path())) } + #[ignored_path(path=¶ms.path)] fn pull_code_actions( &self, - params: code_actions::CodeActionsParams, - ) -> Result { + params: CodeActionsParams, + ) -> Result { let documents = self.documents.read().unwrap(); let parser = documents .get(¶ms.path) @@ -366,6 +372,7 @@ impl Workspace for WorkspaceServer { Ok(CodeActionsResult { actions }) } + #[ignored_path(path=¶ms.path)] fn execute_statement( &self, params: ExecuteStatementParams, @@ -409,6 +416,7 @@ impl Workspace for WorkspaceServer { }) } + #[ignored_path(path=¶ms.path)] fn pull_diagnostics( &self, params: PullDiagnosticsParams, @@ -607,6 +615,7 @@ impl Workspace for WorkspaceServer { }) } + #[ignored_path(path=¶ms.path)] #[tracing::instrument(level = "debug", skip_all, fields( path = params.path.as_os_str().to_str(), position = params.position.to_string() diff --git a/crates/pgt_workspace/src/workspace/server.tests.rs b/crates/pgt_workspace/src/workspace/server.tests.rs index c3fbf723..0578f90f 100644 --- a/crates/pgt_workspace/src/workspace/server.tests.rs +++ b/crates/pgt_workspace/src/workspace/server.tests.rs @@ -1,6 +1,10 @@ -use biome_deserialize::Merge; +use std::sync::Arc; + +use biome_deserialize::{Merge, StringSet}; use pgt_analyse::RuleCategories; -use pgt_configuration::{PartialConfiguration, database::PartialDatabaseConfiguration}; +use pgt_configuration::{ + PartialConfiguration, database::PartialDatabaseConfiguration, files::PartialFilesConfiguration, +}; use pgt_diagnostics::Diagnostic; use pgt_fs::PgTPath; use pgt_text_size::TextRange; @@ -8,8 +12,10 @@ use sqlx::PgPool; use crate::{ Workspace, WorkspaceError, + features::code_actions::ExecuteStatementResult, workspace::{ - OpenFileParams, RegisterProjectFolderParams, UpdateSettingsParams, server::WorkspaceServer, + OpenFileParams, RegisterProjectFolderParams, StatementId, UpdateSettingsParams, + server::WorkspaceServer, }, }; @@ -152,3 +158,51 @@ async fn test_syntax_error(test_db: PgPool) { Some(TextRange::new(7.into(), 15.into())) ); } + +#[tokio::test] +async fn correctly_ignores_files() { + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { + files: Some(PartialFilesConfiguration { + ignore: Some(StringSet::from_iter(["test.sql".to_string()])), + ..Default::default() + }), + ..Default::default() + }); + + let workspace = get_test_workspace(Some(conf)).expect("Unable to create test workspace"); + + let path = PgTPath::new("test.sql"); + let content = r#" + seect 1; + "#; + + let diagnostics_result = workspace.pull_diagnostics(crate::workspace::PullDiagnosticsParams { + path: path.clone(), + categories: RuleCategories::all(), + max_diagnostics: 100, + only: vec![], + skip: vec![], + }); + + assert!( + diagnostics_result.is_ok_and(|res| res.diagnostics.is_empty() + && res.errors == 0 + && res.skipped_diagnostics == 0) + ); + + let close_file_result = + workspace.close_file(crate::workspace::CloseFileParams { path: path.clone() }); + + assert!(close_file_result.is_ok()); + + let execute_statement_result = + workspace.execute_statement(crate::workspace::ExecuteStatementParams { + path: path.clone(), + statement_id: StatementId::Root { + content: Arc::from(content), + }, + }); + + assert!(execute_statement_result.is_ok_and(|res| res == ExecuteStatementResult::default())); +} diff --git a/crates/pgt_workspace_macros/Cargo.toml b/crates/pgt_workspace_macros/Cargo.toml new file mode 100644 index 00000000..c192db04 --- /dev/null +++ b/crates/pgt_workspace_macros/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors.workspace = true +categories.workspace = true +description = "" +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +name = "pgt_workspace_macros" +repository.workspace = true +version = "0.0.0" + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = { version = "1.0.95" } +quote = { workspace = true } +syn = { workspace = true } diff --git a/crates/pgt_workspace_macros/src/lib.rs b/crates/pgt_workspace_macros/src/lib.rs new file mode 100644 index 00000000..d46f484d --- /dev/null +++ b/crates/pgt_workspace_macros/src/lib.rs @@ -0,0 +1,123 @@ +use std::ops::Deref; + +use proc_macro::TokenStream; +use quote::quote; +use syn::{TypePath, TypeTuple, parse_macro_input}; + +struct IgnoredPath { + path: syn::Expr, +} + +impl syn::parse::Parse for IgnoredPath { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let arg_name: syn::Ident = input.parse()?; + + if arg_name != "path" { + return Err(syn::Error::new_spanned( + arg_name, + "Expected 'path' argument.", + )); + } + + let _: syn::Token!(=) = input.parse()?; + let path: syn::Expr = input.parse()?; + + Ok(Self { path }) + } +} + +#[proc_macro_attribute] +/// You can use this on a workspace server function to return a default if the specified path +/// is ignored by the user's settings. +/// +/// This will work for any function where &self is in scope and that returns `Result`, `Result<(), E>`, or `T`, where `T: Default`. +/// `path` needs to point at a `&PgTPath`. +/// +/// ### Usage +/// +/// ```ignore +/// impl WorkspaceServer { +/// #[ignore_path(path=¶ms.path)] +/// fn foo(&self, params: FooParams) -> Result { +/// ... codeblock +/// } +/// } +/// +/// // …expands to… +/// +/// impl WorkspaceServer { +/// fn foo(&self, params: FooParams) -> Result { +/// if self.is_ignored(¶ms.path) { +/// return Ok(FooResult::default()); +/// } +/// ... codeblock +/// } +/// } +/// ``` +pub fn ignored_path(args: TokenStream, input: TokenStream) -> TokenStream { + let ignored_path = parse_macro_input!(args as IgnoredPath); + let input_fn = parse_macro_input!(input as syn::ItemFn); + + let macro_specified_path = ignored_path.path; + + let vis = &input_fn.vis; + let sig = &input_fn.sig; + let block = &input_fn.block; + let attrs = &input_fn.attrs; + + // handles cases `fn foo() -> Result` and `fn foo() -> Result<(), E>` + // T needs to implement default + if let syn::ReturnType::Type(_, ty) = &sig.output { + if let syn::Type::Path(TypePath { path, .. }) = ty.deref() { + if let Some(seg) = path.segments.last() { + if seg.ident == "Result" { + if let syn::PathArguments::AngleBracketed(type_args) = &seg.arguments { + if let Some(syn::GenericArgument::Type(t)) = type_args.args.first() { + if let syn::Type::Tuple(TypeTuple { elems, .. }) = t { + // case: Result<(), E> + if elems.is_empty() { + return TokenStream::from(quote! { + #(#attrs)* + #vis #sig { + if self.is_ignored(#macro_specified_path) { + return Ok(()); + }; + #block + } + }); + } + } + if let syn::Type::Path(TypePath { path, .. }) = t { + if let Some(seg) = path.segments.first() { + let ident = &seg.ident; + return TokenStream::from(quote! { + #(#attrs)* + #vis #sig { + if self.is_ignored(#macro_specified_path) { + return Ok(#ident::default()); + }; + #block + } + }); + } + } + }; + }; + }; + }; + }; + }; + + // case fn foo() -> T {} + // handles all other T's + // T needs to implement Default + TokenStream::from(quote! { + #(#attrs)* + #vis #sig { + if self.is_ignored(#macro_specified_path) { + return Default::default(); + } + #block + } + }) +} From be2cd02de82b0c1aeaeb8aa9485645b3d7319c54 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 22 Jul 2025 07:50:56 +0200 Subject: [PATCH 108/114] docs: fix links to rules (#467) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit we'll have to deploy to see if this actually works – using `mike serve` apparently doesn't deploy the local `docs/` directory, but uses a git branch. --- crates/pgt_diagnostics_categories/src/categories.rs | 12 ++++++------ docs/codegen/src/rules_index.rs | 2 +- docs/codegen/src/rules_sources.rs | 4 ++-- docs/rule_sources.md | 12 ++++++------ docs/rules.md | 12 ++++++------ xtask/codegen/src/generate_new_analyser_rule.rs | 2 +- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/crates/pgt_diagnostics_categories/src/categories.rs b/crates/pgt_diagnostics_categories/src/categories.rs index d5ce48d7..b9d29698 100644 --- a/crates/pgt_diagnostics_categories/src/categories.rs +++ b/crates/pgt_diagnostics_categories/src/categories.rs @@ -13,12 +13,12 @@ // must be between `define_categories! {\n` and `\n ;\n`. define_categories! { - "lint/safety/addingRequiredField": "https://pglt.dev/linter/rules/adding-required-field", - "lint/safety/banDropColumn": "https://pglt.dev/linter/rules/ban-drop-column", - "lint/safety/banDropDatabase": "https://pgtools.dev/linter/rules/ban-drop-database", - "lint/safety/banDropNotNull": "https://pglt.dev/linter/rules/ban-drop-not-null", - "lint/safety/banDropTable": "https://pglt.dev/linter/rules/ban-drop-table", - "lint/safety/banTruncateCascade": "https://pgtools.dev/linter/rules/ban-truncate-cascade", + "lint/safety/addingRequiredField": "https://pgtools.dev/latest/rules/adding-required-field", + "lint/safety/banDropColumn": "https://pgtools.dev/latest/rules/ban-drop-column", + "lint/safety/banDropDatabase": "https://pgtools.dev/latest/rules/ban-drop-database", + "lint/safety/banDropNotNull": "https://pgtools.dev/latest/rules/ban-drop-not-null", + "lint/safety/banDropTable": "https://pgtools.dev/latest/rules/ban-drop-table", + "lint/safety/banTruncateCascade": "https://pgtools.dev/latest/rules/ban-truncate-cascade", // end lint rules ; // General categories diff --git a/docs/codegen/src/rules_index.rs b/docs/codegen/src/rules_index.rs index 655e4bdb..99d22580 100644 --- a/docs/codegen/src/rules_index.rs +++ b/docs/codegen/src/rules_index.rs @@ -69,7 +69,7 @@ fn generate_group( write!( content, - "| [{rule_name}](/rules/{dashed_rule}) | {summary} | {properties} |" + "| [{rule_name}](./{dashed_rule}) | {summary} | {properties} |" )?; writeln!(content)?; diff --git a/docs/codegen/src/rules_sources.rs b/docs/codegen/src/rules_sources.rs index b8fac23d..5b21ea93 100644 --- a/docs/codegen/src/rules_sources.rs +++ b/docs/codegen/src/rules_sources.rs @@ -48,12 +48,12 @@ pub fn generate_rule_sources(docs_dir: &Path) -> anyhow::Result<()> { for (rule_name, metadata) in rules { let kebab_rule_name = Case::Kebab.convert(rule_name); if metadata.sources.is_empty() { - exclusive_rules.insert((rule_name.to_string(), format!("./rules/{kebab_rule_name}"))); + exclusive_rules.insert((rule_name.to_string(), format!("../rules/{kebab_rule_name}"))); } else { for source in metadata.sources { let source_set = SourceSet { rule_name: rule_name.to_string(), - link: format!("./rules/{kebab_rule_name}"), + link: format!("../rules/{kebab_rule_name}"), source_link: source.to_rule_url(), source_rule_name: source.as_rule_name().to_string(), }; diff --git a/docs/rule_sources.md b/docs/rule_sources.md index 8c0f085d..679448cd 100644 --- a/docs/rule_sources.md +++ b/docs/rule_sources.md @@ -3,9 +3,9 @@ ### Squawk | Squawk Rule Name | Rule Name | | ---- | ---- | -| [adding-required-field](https://squawkhq.com/docs/adding-required-field) |[addingRequiredField](./rules/adding-required-field) | -| [ban-drop-column](https://squawkhq.com/docs/ban-drop-column) |[banDropColumn](./rules/ban-drop-column) | -| [ban-drop-database](https://squawkhq.com/docs/ban-drop-database) |[banDropDatabase](./rules/ban-drop-database) | -| [ban-drop-not-null](https://squawkhq.com/docs/ban-drop-not-null) |[banDropNotNull](./rules/ban-drop-not-null) | -| [ban-drop-table](https://squawkhq.com/docs/ban-drop-table) |[banDropTable](./rules/ban-drop-table) | -| [ban-truncate-cascade](https://squawkhq.com/docs/ban-truncate-cascade) |[banTruncateCascade](./rules/ban-truncate-cascade) | +| [adding-required-field](https://squawkhq.com/docs/adding-required-field) |[addingRequiredField](../rules/adding-required-field) | +| [ban-drop-column](https://squawkhq.com/docs/ban-drop-column) |[banDropColumn](../rules/ban-drop-column) | +| [ban-drop-database](https://squawkhq.com/docs/ban-drop-database) |[banDropDatabase](../rules/ban-drop-database) | +| [ban-drop-not-null](https://squawkhq.com/docs/ban-drop-not-null) |[banDropNotNull](../rules/ban-drop-not-null) | +| [ban-drop-table](https://squawkhq.com/docs/ban-drop-table) |[banDropTable](../rules/ban-drop-table) | +| [ban-truncate-cascade](https://squawkhq.com/docs/ban-truncate-cascade) |[banTruncateCascade](../rules/ban-truncate-cascade) | diff --git a/docs/rules.md b/docs/rules.md index 19e110c6..d74b67e8 100644 --- a/docs/rules.md +++ b/docs/rules.md @@ -12,12 +12,12 @@ Rules that detect potential safety issues in your code. | Rule name | Description | Properties | | --- | --- | --- | -| [addingRequiredField](/rules/adding-required-field) | Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required. | | -| [banDropColumn](/rules/ban-drop-column) | Dropping a column may break existing clients. | ✅ | -| [banDropDatabase](/rules/ban-drop-database) | Dropping a database may break existing clients (and everything else, really). | | -| [banDropNotNull](/rules/ban-drop-not-null) | Dropping a NOT NULL constraint may break existing clients. | ✅ | -| [banDropTable](/rules/ban-drop-table) | Dropping a table may break existing clients. | ✅ | -| [banTruncateCascade](/rules/ban-truncate-cascade) | Using `TRUNCATE`'s `CASCADE` option will truncate any tables that are also foreign-keyed to the specified tables. | | +| [addingRequiredField](./adding-required-field) | Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required. | | +| [banDropColumn](./ban-drop-column) | Dropping a column may break existing clients. | ✅ | +| [banDropDatabase](./ban-drop-database) | Dropping a database may break existing clients (and everything else, really). | | +| [banDropNotNull](./ban-drop-not-null) | Dropping a NOT NULL constraint may break existing clients. | ✅ | +| [banDropTable](./ban-drop-table) | Dropping a table may break existing clients. | ✅ | +| [banTruncateCascade](./ban-truncate-cascade) | Using `TRUNCATE`'s `CASCADE` option will truncate any tables that are also foreign-keyed to the specified tables. | | [//]: # (END RULES_INDEX) diff --git a/xtask/codegen/src/generate_new_analyser_rule.rs b/xtask/codegen/src/generate_new_analyser_rule.rs index 343b0673..4c4bcc69 100644 --- a/xtask/codegen/src/generate_new_analyser_rule.rs +++ b/xtask/codegen/src/generate_new_analyser_rule.rs @@ -132,7 +132,7 @@ pub fn generate_new_analyser_rule( // We sort rules to reduce conflicts between contributions made in parallel. let rule_line = match category { Category::Lint => format!( - r#" "lint/{group}/{rule_name_camel}": "https://pgtools.dev/linter/rules/{kebab_case_rule}","# + r#" "lint/{group}/{rule_name_camel}": "https://pgtools.dev/latest/rules/{kebab_case_rule}","# ), }; let lint_start = match category { From 51abce1c1a6b73699746090978c0c3f3ac87beb3 Mon Sep 17 00:00:00 2001 From: Julian Domke <68325451+juleswritescode@users.noreply.github.com> Date: Tue, 22 Jul 2025 07:52:18 +0200 Subject: [PATCH 109/114] refactor: extract completions context into `pgt_treesitter` crate (#466) I wanted to extract the treesitter-context that's used in completions, so it can also be used for the on-hover feature. I've also tried to clean up the APIs, detangled completion-context and sanitization, and extracted the general test helpers into the `pgt_test_utils` crate. --- Cargo.lock | 9 +- Cargo.toml | 2 +- crates/pgt_completions/Cargo.toml | 23 +- crates/pgt_completions/src/builder.rs | 7 +- crates/pgt_completions/src/complete.rs | 21 +- crates/pgt_completions/src/lib.rs | 1 - .../pgt_completions/src/providers/columns.rs | 157 +++++--- .../src/providers/functions.rs | 35 +- .../pgt_completions/src/providers/helper.rs | 9 +- .../pgt_completions/src/providers/policies.rs | 26 +- crates/pgt_completions/src/providers/roles.rs | 71 +++- .../pgt_completions/src/providers/schemas.rs | 23 +- .../pgt_completions/src/providers/tables.rs | 146 +++++-- .../pgt_completions/src/providers/triggers.rs | 169 --------- .../src/relevance/filtering.rs | 38 +- .../pgt_completions/src/relevance/scoring.rs | 28 +- crates/pgt_completions/src/sanitization.rs | 8 +- crates/pgt_completions/src/test_helper.rs | 82 +--- crates/pgt_test_utils/src/lib.rs | 84 +++++ .../Cargo.toml | 11 +- .../src/context/base_parser.rs | 0 .../src/context/grant_parser.rs | 27 +- .../src/context/mod.rs | 356 ++++++++++-------- .../src/context/policy_parser.rs | 39 +- .../src/context/revoke_parser.rs | 21 +- crates/pgt_treesitter/src/lib.rs | 5 + .../src/queries/insert_columns.rs | 6 +- .../src/queries/mod.rs} | 92 ++++- .../src/queries/parameters.rs | 4 +- .../src/queries/relations.rs | 7 +- .../src/queries/select_columns.rs | 6 +- .../src/queries/table_aliases.rs | 4 +- .../src/queries/where_columns.rs | 4 +- .../pgt_treesitter_queries/src/queries/mod.rs | 86 ----- crates/pgt_typecheck/Cargo.toml | 20 +- crates/pgt_typecheck/src/typed_identifier.rs | 2 +- .../pgt_workspace/src/features/completions.rs | 34 +- 37 files changed, 905 insertions(+), 758 deletions(-) delete mode 100644 crates/pgt_completions/src/providers/triggers.rs rename crates/{pgt_treesitter_queries => pgt_treesitter}/Cargo.toml (54%) rename crates/{pgt_completions => pgt_treesitter}/src/context/base_parser.rs (100%) rename crates/{pgt_completions => pgt_treesitter}/src/context/grant_parser.rs (94%) rename crates/{pgt_completions => pgt_treesitter}/src/context/mod.rs (78%) rename crates/{pgt_completions => pgt_treesitter}/src/context/policy_parser.rs (95%) rename crates/{pgt_completions => pgt_treesitter}/src/context/revoke_parser.rs (94%) create mode 100644 crates/pgt_treesitter/src/lib.rs rename crates/{pgt_treesitter_queries => pgt_treesitter}/src/queries/insert_columns.rs (97%) rename crates/{pgt_treesitter_queries/src/lib.rs => pgt_treesitter/src/queries/mod.rs} (72%) rename crates/{pgt_treesitter_queries => pgt_treesitter}/src/queries/parameters.rs (96%) rename crates/{pgt_treesitter_queries => pgt_treesitter}/src/queries/relations.rs (98%) rename crates/{pgt_treesitter_queries => pgt_treesitter}/src/queries/select_columns.rs (97%) rename crates/{pgt_treesitter_queries => pgt_treesitter}/src/queries/table_aliases.rs (97%) rename crates/{pgt_treesitter_queries => pgt_treesitter}/src/queries/where_columns.rs (97%) delete mode 100644 crates/pgt_treesitter_queries/src/queries/mod.rs diff --git a/Cargo.lock b/Cargo.lock index d5c626e1..1bf796b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2765,7 +2765,7 @@ dependencies = [ "pgt_schema_cache", "pgt_test_utils", "pgt_text_size", - "pgt_treesitter_queries", + "pgt_treesitter", "schemars", "serde", "serde_json", @@ -3047,10 +3047,13 @@ dependencies = [ ] [[package]] -name = "pgt_treesitter_queries" +name = "pgt_treesitter" version = "0.0.0" dependencies = [ "clap 4.5.23", + "pgt_schema_cache", + "pgt_test_utils", + "pgt_text_size", "tree-sitter", "tree_sitter_sql", ] @@ -3074,7 +3077,7 @@ dependencies = [ "pgt_schema_cache", "pgt_test_utils", "pgt_text_size", - "pgt_treesitter_queries", + "pgt_treesitter", "sqlx", "tokio", "tree-sitter", diff --git a/Cargo.toml b/Cargo.toml index 4a3454c6..23e21889 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,7 +82,7 @@ pgt_suppressions = { path = "./crates/pgt_suppressions", version = "0. pgt_text_edit = { path = "./crates/pgt_text_edit", version = "0.0.0" } pgt_text_size = { path = "./crates/pgt_text_size", version = "0.0.0" } pgt_tokenizer = { path = "./crates/pgt_tokenizer", version = "0.0.0" } -pgt_treesitter_queries = { path = "./crates/pgt_treesitter_queries", version = "0.0.0" } +pgt_treesitter = { path = "./crates/pgt_treesitter", version = "0.0.0" } pgt_typecheck = { path = "./crates/pgt_typecheck", version = "0.0.0" } pgt_workspace = { path = "./crates/pgt_workspace", version = "0.0.0" } pgt_workspace_macros = { path = "./crates/pgt_workspace_macros", version = "0.0.0" } diff --git a/crates/pgt_completions/Cargo.toml b/crates/pgt_completions/Cargo.toml index 916a0020..0ebb8e56 100644 --- a/crates/pgt_completions/Cargo.toml +++ b/crates/pgt_completions/Cargo.toml @@ -14,18 +14,17 @@ version = "0.0.0" [dependencies] async-std = "1.12.0" -pgt_text_size.workspace = true - - -fuzzy-matcher = "0.3.7" -pgt_schema_cache.workspace = true -pgt_treesitter_queries.workspace = true -schemars = { workspace = true, optional = true } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } -tracing = { workspace = true } -tree-sitter.workspace = true -tree_sitter_sql.workspace = true +pgt_schema_cache.workspace = true +pgt_text_size.workspace = true +pgt_treesitter.workspace = true + +fuzzy-matcher = "0.3.7" +schemars = { workspace = true, optional = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +tracing = { workspace = true } +tree-sitter.workspace = true +tree_sitter_sql.workspace = true sqlx.workspace = true diff --git a/crates/pgt_completions/src/builder.rs b/crates/pgt_completions/src/builder.rs index 96576053..bf8eb66a 100644 --- a/crates/pgt_completions/src/builder.rs +++ b/crates/pgt_completions/src/builder.rs @@ -1,10 +1,11 @@ use crate::{ CompletionItemKind, CompletionText, - context::CompletionContext, item::CompletionItem, relevance::{filtering::CompletionFilter, scoring::CompletionScore}, }; +use pgt_treesitter::TreesitterContext; + pub(crate) struct PossibleCompletionItem<'a> { pub label: String, pub description: String, @@ -17,11 +18,11 @@ pub(crate) struct PossibleCompletionItem<'a> { pub(crate) struct CompletionBuilder<'a> { items: Vec>, - ctx: &'a CompletionContext<'a>, + ctx: &'a TreesitterContext<'a>, } impl<'a> CompletionBuilder<'a> { - pub fn new(ctx: &'a CompletionContext) -> Self { + pub fn new(ctx: &'a TreesitterContext) -> Self { CompletionBuilder { items: vec![], ctx } } diff --git a/crates/pgt_completions/src/complete.rs b/crates/pgt_completions/src/complete.rs index bd5efd19..e18589af 100644 --- a/crates/pgt_completions/src/complete.rs +++ b/crates/pgt_completions/src/complete.rs @@ -1,8 +1,9 @@ use pgt_text_size::TextSize; +use pgt_treesitter::{TreeSitterContextParams, context::TreesitterContext}; + use crate::{ builder::CompletionBuilder, - context::CompletionContext, item::CompletionItem, providers::{ complete_columns, complete_functions, complete_policies, complete_roles, complete_schemas, @@ -28,16 +29,20 @@ pub struct CompletionParams<'a> { pub fn complete(params: CompletionParams) -> Vec { let sanitized_params = SanitizedCompletionParams::from(params); - let ctx = CompletionContext::new(&sanitized_params); + let ctx = TreesitterContext::new(TreeSitterContextParams { + position: sanitized_params.position, + text: &sanitized_params.text, + tree: &sanitized_params.tree, + }); let mut builder = CompletionBuilder::new(&ctx); - complete_tables(&ctx, &mut builder); - complete_functions(&ctx, &mut builder); - complete_columns(&ctx, &mut builder); - complete_schemas(&ctx, &mut builder); - complete_policies(&ctx, &mut builder); - complete_roles(&ctx, &mut builder); + complete_tables(&ctx, sanitized_params.schema, &mut builder); + complete_functions(&ctx, sanitized_params.schema, &mut builder); + complete_columns(&ctx, sanitized_params.schema, &mut builder); + complete_schemas(&ctx, sanitized_params.schema, &mut builder); + complete_policies(&ctx, sanitized_params.schema, &mut builder); + complete_roles(&ctx, sanitized_params.schema, &mut builder); builder.finish() } diff --git a/crates/pgt_completions/src/lib.rs b/crates/pgt_completions/src/lib.rs index f8ca1a55..c4e592ee 100644 --- a/crates/pgt_completions/src/lib.rs +++ b/crates/pgt_completions/src/lib.rs @@ -1,6 +1,5 @@ mod builder; mod complete; -mod context; mod item; mod providers; mod relevance; diff --git a/crates/pgt_completions/src/providers/columns.rs b/crates/pgt_completions/src/providers/columns.rs index 04d0af65..ba3b2481 100644 --- a/crates/pgt_completions/src/providers/columns.rs +++ b/crates/pgt_completions/src/providers/columns.rs @@ -1,14 +1,20 @@ +use pgt_schema_cache::SchemaCache; +use pgt_treesitter::{TreesitterContext, WrappingClause}; + use crate::{ CompletionItemKind, builder::{CompletionBuilder, PossibleCompletionItem}, - context::{CompletionContext, WrappingClause}, relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; use super::helper::{find_matching_alias_for_table, get_completion_text_with_schema_or_alias}; -pub fn complete_columns<'a>(ctx: &CompletionContext<'a>, builder: &mut CompletionBuilder<'a>) { - let available_columns = &ctx.schema_cache.columns; +pub fn complete_columns<'a>( + ctx: &TreesitterContext<'a>, + schema_cache: &'a SchemaCache, + builder: &mut CompletionBuilder<'a>, +) { + let available_columns = &schema_cache.columns; for col in available_columns { let relevance = CompletionRelevanceData::Column(col); @@ -49,11 +55,13 @@ mod tests { use crate::{ CompletionItem, CompletionItemKind, complete, test_helper::{ - CURSOR_POS, CompletionAssertion, InputQuery, assert_complete_results, - assert_no_complete_results, get_test_deps, get_test_params, + CompletionAssertion, assert_complete_results, assert_no_complete_results, + get_test_deps, get_test_params, }, }; + use pgt_test_utils::QueryWithCursorPosition; + struct TestCase { query: String, message: &'static str, @@ -62,7 +70,7 @@ mod tests { } impl TestCase { - fn get_input_query(&self) -> InputQuery { + fn get_input_query(&self) -> QueryWithCursorPosition { let strs: Vec<&str> = self.query.split_whitespace().collect(); strs.join(" ").as_str().into() } @@ -94,7 +102,10 @@ mod tests { let queries: Vec = vec![ TestCase { message: "correctly prefers the columns of present tables", - query: format!(r#"select na{} from public.audio_books;"#, CURSOR_POS), + query: format!( + r#"select na{} from public.audio_books;"#, + QueryWithCursorPosition::cursor_marker() + ), label: "narrator", description: "public.audio_books", }, @@ -111,14 +122,17 @@ mod tests { join public.users u on u.id = subquery.id; "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ), label: "narrator_id", description: "private.audio_books", }, TestCase { message: "works without a schema", - query: format!(r#"select na{} from users;"#, CURSOR_POS), + query: format!( + r#"select na{} from users;"#, + QueryWithCursorPosition::cursor_marker() + ), label: "name", description: "public.users", }, @@ -165,7 +179,7 @@ mod tests { pool.execute(setup).await.unwrap(); let case = TestCase { - query: format!(r#"select n{};"#, CURSOR_POS), + query: format!(r#"select n{};"#, QueryWithCursorPosition::cursor_marker()), description: "", label: "", message: "", @@ -220,7 +234,10 @@ mod tests { let test_case = TestCase { message: "suggests user created tables first", - query: format!(r#"select {} from users"#, CURSOR_POS), + query: format!( + r#"select {} from users"#, + QueryWithCursorPosition::cursor_marker() + ), label: "", description: "", }; @@ -270,7 +287,10 @@ mod tests { let test_case = TestCase { message: "suggests user created tables first", - query: format!(r#"select * from private.{}"#, CURSOR_POS), + query: format!( + r#"select * from private.{}"#, + QueryWithCursorPosition::cursor_marker() + ), label: "", description: "", }; @@ -311,7 +331,11 @@ mod tests { pool.execute(setup).await.unwrap(); assert_complete_results( - format!(r#"select {} from users"#, CURSOR_POS).as_str(), + format!( + r#"select {} from users"#, + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::Label("address2".into()), CompletionAssertion::Label("email2".into()), @@ -324,7 +348,11 @@ mod tests { .await; assert_complete_results( - format!(r#"select {} from private.users"#, CURSOR_POS).as_str(), + format!( + r#"select {} from private.users"#, + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::Label("address1".into()), CompletionAssertion::Label("email1".into()), @@ -338,7 +366,11 @@ mod tests { // asserts fuzzy finding for "settings" assert_complete_results( - format!(r#"select sett{} from private.users"#, CURSOR_POS).as_str(), + format!( + r#"select sett{} from private.users"#, + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![CompletionAssertion::Label("user_settings".into())], None, &pool, @@ -372,7 +404,7 @@ mod tests { assert_complete_results( format!( "select u.id, p.{} from auth.users u join auth.posts p on u.id = p.user_id;", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -394,7 +426,7 @@ mod tests { assert_complete_results( format!( "select u.id, p.content from auth.users u join auth.posts p on u.id = p.{};", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -440,7 +472,7 @@ mod tests { assert_complete_results( format!( "select u.id, p.content from auth.users u join auth.{}", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -479,7 +511,7 @@ mod tests { assert_complete_results( format!( "select u.id, auth.posts.content from auth.users u join auth.posts on u.{}", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -496,7 +528,7 @@ mod tests { assert_complete_results( format!( "select u.id, p.content from auth.users u join auth.posts p on p.user_id = u.{}", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -536,7 +568,7 @@ mod tests { assert_complete_results( format!( "select {} from public.one o join public.two on o.id = t.id;", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -555,7 +587,7 @@ mod tests { assert_complete_results( format!( "select a, {} from public.one o join public.two on o.id = t.id;", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -577,7 +609,7 @@ mod tests { assert_complete_results( format!( "select o.id, a, b, c, d, e, {} from public.one o join public.two on o.id = t.id;", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -593,7 +625,7 @@ mod tests { assert_complete_results( format!( "select id, a, b, c, d, e, {} from public.one o join public.two on o.id = t.id;", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![CompletionAssertion::Label("z".to_string())], @@ -625,7 +657,11 @@ mod tests { // are lower in the alphabet assert_complete_results( - format!("insert into instruments ({})", CURSOR_POS).as_str(), + format!( + "insert into instruments ({})", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::Label("id".to_string()), CompletionAssertion::Label("name".to_string()), @@ -637,7 +673,11 @@ mod tests { .await; assert_complete_results( - format!("insert into instruments (id, {})", CURSOR_POS).as_str(), + format!( + "insert into instruments (id, {})", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::Label("name".to_string()), CompletionAssertion::Label("z".to_string()), @@ -648,7 +688,11 @@ mod tests { .await; assert_complete_results( - format!("insert into instruments (id, {}, name)", CURSOR_POS).as_str(), + format!( + "insert into instruments (id, {}, name)", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![CompletionAssertion::Label("z".to_string())], None, &pool, @@ -659,7 +703,7 @@ mod tests { assert_complete_results( format!( "insert into instruments (name, {}) values ('my_bass');", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -673,7 +717,11 @@ mod tests { // no completions in the values list! assert_no_complete_results( - format!("insert into instruments (id, name) values ({})", CURSOR_POS).as_str(), + format!( + "insert into instruments (id, name) values ({})", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), None, &pool, ) @@ -700,7 +748,11 @@ mod tests { pool.execute(setup).await.unwrap(); assert_complete_results( - format!("select name from instruments where {} ", CURSOR_POS).as_str(), + format!( + "select name from instruments where {} ", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::Label("created_at".into()), CompletionAssertion::Label("id".into()), @@ -715,7 +767,7 @@ mod tests { assert_complete_results( format!( "select name from instruments where z = 'something' and created_at > {}", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), // simply do not complete columns + schemas; functions etc. are ok @@ -732,7 +784,7 @@ mod tests { assert_complete_results( format!( "select name from instruments where id = 'something' and {}", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -749,7 +801,7 @@ mod tests { assert_complete_results( format!( "select name from instruments i join others o on i.z = o.a where i.{}", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -783,22 +835,37 @@ mod tests { pool.execute(setup).await.unwrap(); let queries = vec![ - format!("alter table instruments drop column {}", CURSOR_POS), + format!( + "alter table instruments drop column {}", + QueryWithCursorPosition::cursor_marker() + ), format!( "alter table instruments drop column if exists {}", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ), format!( "alter table instruments alter column {} set default", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() + ), + format!( + "alter table instruments alter {} set default", + QueryWithCursorPosition::cursor_marker() + ), + format!( + "alter table public.instruments alter column {}", + QueryWithCursorPosition::cursor_marker() + ), + format!( + "alter table instruments alter {}", + QueryWithCursorPosition::cursor_marker() + ), + format!( + "alter table instruments rename {} to new_col", + QueryWithCursorPosition::cursor_marker() ), - format!("alter table instruments alter {} set default", CURSOR_POS), - format!("alter table public.instruments alter column {}", CURSOR_POS), - format!("alter table instruments alter {}", CURSOR_POS), - format!("alter table instruments rename {} to new_col", CURSOR_POS), format!( "alter table public.instruments rename column {} to new_col", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ), ]; @@ -834,19 +901,19 @@ mod tests { let col_queries = vec![ format!( r#"create policy "my_pol" on public.instruments for select using ({})"#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ), format!( r#"create policy "my_pol" on public.instruments for insert with check ({})"#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ), format!( r#"create policy "my_pol" on public.instruments for update using (id = 1 and {})"#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ), format!( r#"create policy "my_pol" on public.instruments for insert with check (id = 1 and {})"#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ), ]; diff --git a/crates/pgt_completions/src/providers/functions.rs b/crates/pgt_completions/src/providers/functions.rs index 615e4f95..b2ac2fae 100644 --- a/crates/pgt_completions/src/providers/functions.rs +++ b/crates/pgt_completions/src/providers/functions.rs @@ -1,17 +1,21 @@ -use pgt_schema_cache::Function; +use pgt_schema_cache::{Function, SchemaCache}; +use pgt_treesitter::TreesitterContext; use crate::{ CompletionItemKind, CompletionText, builder::{CompletionBuilder, PossibleCompletionItem}, - context::CompletionContext, providers::helper::get_range_to_replace, relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; use super::helper::get_completion_text_with_schema_or_alias; -pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { - let available_functions = &ctx.schema_cache.functions; +pub fn complete_functions<'a>( + ctx: &'a TreesitterContext, + schema_cache: &'a SchemaCache, + builder: &mut CompletionBuilder<'a>, +) { + let available_functions = &schema_cache.functions; for func in available_functions { let relevance = CompletionRelevanceData::Function(func); @@ -30,7 +34,7 @@ pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut Completi } } -fn get_completion_text(ctx: &CompletionContext, func: &Function) -> CompletionText { +fn get_completion_text(ctx: &TreesitterContext, func: &Function) -> CompletionText { let range = get_range_to_replace(ctx); let mut text = get_completion_text_with_schema_or_alias(ctx, &func.name, &func.schema) .map(|ct| ct.text) @@ -70,11 +74,12 @@ mod tests { use crate::{ CompletionItem, CompletionItemKind, complete, test_helper::{ - CURSOR_POS, CompletionAssertion, assert_complete_results, get_test_deps, - get_test_params, + CompletionAssertion, assert_complete_results, get_test_deps, get_test_params, }, }; + use pgt_test_utils::QueryWithCursorPosition; + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] async fn completes_fn(pool: PgPool) { let setup = r#" @@ -89,7 +94,7 @@ mod tests { $$; "#; - let query = format!("select coo{}", CURSOR_POS); + let query = format!("select coo{}", QueryWithCursorPosition::cursor_marker()); let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); @@ -122,7 +127,10 @@ mod tests { $$; "#; - let query = format!(r#"select * from coo{}()"#, CURSOR_POS); + let query = format!( + r#"select * from coo{}()"#, + QueryWithCursorPosition::cursor_marker() + ); let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); @@ -156,7 +164,7 @@ mod tests { $$; "#; - let query = format!(r#"select coo{}"#, CURSOR_POS); + let query = format!(r#"select coo{}"#, QueryWithCursorPosition::cursor_marker()); let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); @@ -190,7 +198,10 @@ mod tests { $$; "#; - let query = format!(r#"select * from coo{}()"#, CURSOR_POS); + let query = format!( + r#"select * from coo{}()"#, + QueryWithCursorPosition::cursor_marker() + ); let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); @@ -259,7 +270,7 @@ mod tests { let query = format!( r#"create policy "my_pol" on public.instruments for insert with check (id = {})"#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ); assert_complete_results( diff --git a/crates/pgt_completions/src/providers/helper.rs b/crates/pgt_completions/src/providers/helper.rs index 811125bd..cd1046f1 100644 --- a/crates/pgt_completions/src/providers/helper.rs +++ b/crates/pgt_completions/src/providers/helper.rs @@ -1,9 +1,10 @@ use pgt_text_size::{TextRange, TextSize}; +use pgt_treesitter::TreesitterContext; -use crate::{CompletionText, context::CompletionContext, remove_sanitized_token}; +use crate::{CompletionText, remove_sanitized_token}; pub(crate) fn find_matching_alias_for_table( - ctx: &CompletionContext, + ctx: &TreesitterContext, table_name: &str, ) -> Option { for (alias, table) in ctx.mentioned_table_aliases.iter() { @@ -14,7 +15,7 @@ pub(crate) fn find_matching_alias_for_table( None } -pub(crate) fn get_range_to_replace(ctx: &CompletionContext) -> TextRange { +pub(crate) fn get_range_to_replace(ctx: &TreesitterContext) -> TextRange { match ctx.node_under_cursor.as_ref() { Some(node) => { let content = ctx.get_node_under_cursor_content().unwrap_or("".into()); @@ -30,7 +31,7 @@ pub(crate) fn get_range_to_replace(ctx: &CompletionContext) -> TextRange { } pub(crate) fn get_completion_text_with_schema_or_alias( - ctx: &CompletionContext, + ctx: &TreesitterContext, item_name: &str, schema_or_alias_name: &str, ) -> Option { diff --git a/crates/pgt_completions/src/providers/policies.rs b/crates/pgt_completions/src/providers/policies.rs index 216fcefa..a5ffdb43 100644 --- a/crates/pgt_completions/src/providers/policies.rs +++ b/crates/pgt_completions/src/providers/policies.rs @@ -1,16 +1,21 @@ +use pgt_schema_cache::SchemaCache; use pgt_text_size::{TextRange, TextSize}; +use pgt_treesitter::TreesitterContext; use crate::{ CompletionItemKind, CompletionText, builder::{CompletionBuilder, PossibleCompletionItem}, - context::CompletionContext, relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; use super::helper::get_range_to_replace; -pub fn complete_policies<'a>(ctx: &CompletionContext<'a>, builder: &mut CompletionBuilder<'a>) { - let available_policies = &ctx.schema_cache.policies; +pub fn complete_policies<'a>( + ctx: &TreesitterContext<'a>, + schema_cache: &'a SchemaCache, + builder: &mut CompletionBuilder<'a>, +) { + let available_policies = &schema_cache.policies; let surrounded_by_quotes = ctx .get_node_under_cursor_content() @@ -61,7 +66,8 @@ pub fn complete_policies<'a>(ctx: &CompletionContext<'a>, builder: &mut Completi mod tests { use sqlx::{Executor, PgPool}; - use crate::test_helper::{CURSOR_POS, CompletionAssertion, assert_complete_results}; + use crate::test_helper::{CompletionAssertion, assert_complete_results}; + use pgt_test_utils::QueryWithCursorPosition; #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] async fn completes_within_quotation_marks(pool: PgPool) { @@ -89,7 +95,11 @@ mod tests { pool.execute(setup).await.unwrap(); assert_complete_results( - format!("alter policy \"{}\" on private.users;", CURSOR_POS).as_str(), + format!( + "alter policy \"{}\" on private.users;", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::Label("read for public users disallowed".into()), CompletionAssertion::Label("write for public users allowed".into()), @@ -100,7 +110,11 @@ mod tests { .await; assert_complete_results( - format!("alter policy \"w{}\" on private.users;", CURSOR_POS).as_str(), + format!( + "alter policy \"w{}\" on private.users;", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![CompletionAssertion::Label( "write for public users allowed".into(), )], diff --git a/crates/pgt_completions/src/providers/roles.rs b/crates/pgt_completions/src/providers/roles.rs index 01641543..b7664349 100644 --- a/crates/pgt_completions/src/providers/roles.rs +++ b/crates/pgt_completions/src/providers/roles.rs @@ -1,12 +1,17 @@ use crate::{ CompletionItemKind, builder::{CompletionBuilder, PossibleCompletionItem}, - context::CompletionContext, relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; +use pgt_schema_cache::SchemaCache; +use pgt_treesitter::TreesitterContext; -pub fn complete_roles<'a>(ctx: &CompletionContext<'a>, builder: &mut CompletionBuilder<'a>) { - let available_roles = &ctx.schema_cache.roles; +pub fn complete_roles<'a>( + _ctx: &TreesitterContext<'a>, + schema_cache: &'a SchemaCache, + builder: &mut CompletionBuilder<'a>, +) { + let available_roles = &schema_cache.roles; for role in available_roles { let relevance = CompletionRelevanceData::Role(role); @@ -29,7 +34,9 @@ pub fn complete_roles<'a>(ctx: &CompletionContext<'a>, builder: &mut CompletionB mod tests { use sqlx::{Executor, PgPool}; - use crate::test_helper::{CURSOR_POS, CompletionAssertion, assert_complete_results}; + use crate::test_helper::{CompletionAssertion, assert_complete_results}; + + use pgt_test_utils::QueryWithCursorPosition; const SETUP: &str = r#" create table users ( @@ -42,7 +49,7 @@ mod tests { #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] async fn works_in_drop_role(pool: PgPool) { assert_complete_results( - format!("drop role {}", CURSOR_POS).as_str(), + format!("drop role {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![ CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), CompletionAssertion::LabelAndKind( @@ -63,7 +70,7 @@ mod tests { #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] async fn works_in_alter_role(pool: PgPool) { assert_complete_results( - format!("alter role {}", CURSOR_POS).as_str(), + format!("alter role {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![ CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), CompletionAssertion::LabelAndKind( @@ -86,7 +93,7 @@ mod tests { pool.execute(SETUP).await.unwrap(); assert_complete_results( - format!("set role {}", CURSOR_POS).as_str(), + format!("set role {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![ CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), CompletionAssertion::LabelAndKind( @@ -104,7 +111,11 @@ mod tests { .await; assert_complete_results( - format!("set session authorization {}", CURSOR_POS).as_str(), + format!( + "set session authorization {}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), CompletionAssertion::LabelAndKind( @@ -133,7 +144,7 @@ mod tests { for all to {} using (true);"#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -157,7 +168,7 @@ mod tests { r#"create policy "my cool policy" on public.users for select to {}"#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -186,7 +197,7 @@ mod tests { r#"grant select on table public.users to {}"#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -211,7 +222,7 @@ mod tests { r#"grant select on table public.users to owner, {}"#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ @@ -232,7 +243,11 @@ mod tests { .await; assert_complete_results( - format!(r#"grant {} to owner"#, CURSOR_POS).as_str(), + format!( + r#"grant {} to owner"#, + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ // recognizing already mentioned roles is not supported for now CompletionAssertion::LabelAndKind("owner".into(), crate::CompletionItemKind::Role), @@ -256,12 +271,30 @@ mod tests { pool.execute(SETUP).await.unwrap(); let queries = vec![ - format!("revoke {} from owner", CURSOR_POS), - format!("revoke admin option for {} from owner", CURSOR_POS), - format!("revoke owner from {}", CURSOR_POS), - format!("revoke all on schema public from {} granted by", CURSOR_POS), - format!("revoke all on schema public from owner, {}", CURSOR_POS), - format!("revoke all on table userse from owner, {}", CURSOR_POS), + format!( + "revoke {} from owner", + QueryWithCursorPosition::cursor_marker() + ), + format!( + "revoke admin option for {} from owner", + QueryWithCursorPosition::cursor_marker() + ), + format!( + "revoke owner from {}", + QueryWithCursorPosition::cursor_marker() + ), + format!( + "revoke all on schema public from {} granted by", + QueryWithCursorPosition::cursor_marker() + ), + format!( + "revoke all on schema public from owner, {}", + QueryWithCursorPosition::cursor_marker() + ), + format!( + "revoke all on table userse from owner, {}", + QueryWithCursorPosition::cursor_marker() + ), ]; for query in queries { diff --git a/crates/pgt_completions/src/providers/schemas.rs b/crates/pgt_completions/src/providers/schemas.rs index 561da0f8..43c52387 100644 --- a/crates/pgt_completions/src/providers/schemas.rs +++ b/crates/pgt_completions/src/providers/schemas.rs @@ -1,11 +1,16 @@ use crate::{ builder::{CompletionBuilder, PossibleCompletionItem}, - context::CompletionContext, relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; +use pgt_schema_cache::SchemaCache; +use pgt_treesitter::TreesitterContext; -pub fn complete_schemas<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { - let available_schemas = &ctx.schema_cache.schemas; +pub fn complete_schemas<'a>( + _ctx: &'a TreesitterContext, + schema_cache: &'a SchemaCache, + builder: &mut CompletionBuilder<'a>, +) { + let available_schemas = &schema_cache.schemas; for schema in available_schemas { let relevance = CompletionRelevanceData::Schema(schema); @@ -31,9 +36,11 @@ mod tests { use crate::{ CompletionItemKind, - test_helper::{CURSOR_POS, CompletionAssertion, assert_complete_results}, + test_helper::{CompletionAssertion, assert_complete_results}, }; + use pgt_test_utils::QueryWithCursorPosition; + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] async fn autocompletes_schemas(pool: PgPool) { let setup = r#" @@ -50,7 +57,7 @@ mod tests { "#; assert_complete_results( - format!("select * from {}", CURSOR_POS).as_str(), + format!("select * from {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![ CompletionAssertion::LabelAndKind("public".to_string(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("auth".to_string(), CompletionItemKind::Schema), @@ -97,7 +104,11 @@ mod tests { "#; assert_complete_results( - format!("select * from u{}", CURSOR_POS).as_str(), + format!( + "select * from u{}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::LabelAndKind("users".into(), CompletionItemKind::Table), CompletionAssertion::LabelAndKind("ultimate".into(), CompletionItemKind::Schema), diff --git a/crates/pgt_completions/src/providers/tables.rs b/crates/pgt_completions/src/providers/tables.rs index 3fbee8f1..f78b697c 100644 --- a/crates/pgt_completions/src/providers/tables.rs +++ b/crates/pgt_completions/src/providers/tables.rs @@ -1,14 +1,20 @@ +use pgt_schema_cache::SchemaCache; +use pgt_treesitter::TreesitterContext; + use crate::{ builder::{CompletionBuilder, PossibleCompletionItem}, - context::CompletionContext, item::CompletionItemKind, relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, }; use super::helper::get_completion_text_with_schema_or_alias; -pub fn complete_tables<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { - let available_tables = &ctx.schema_cache.tables; +pub fn complete_tables<'a>( + ctx: &'a TreesitterContext, + schema_cache: &'a SchemaCache, + builder: &mut CompletionBuilder<'a>, +) { + let available_tables = &schema_cache.tables; for table in available_tables { let relevance = CompletionRelevanceData::Table(table); @@ -47,11 +53,13 @@ mod tests { use crate::{ CompletionItem, CompletionItemKind, complete, test_helper::{ - CURSOR_POS, CompletionAssertion, assert_complete_results, assert_no_complete_results, + CompletionAssertion, assert_complete_results, assert_no_complete_results, get_test_deps, get_test_params, }, }; + use pgt_test_utils::QueryWithCursorPosition; + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] async fn autocompletes_simple_table(pool: PgPool) { let setup = r#" @@ -62,7 +70,10 @@ mod tests { ); "#; - let query = format!("select * from u{}", CURSOR_POS); + let query = format!( + "select * from u{}", + QueryWithCursorPosition::cursor_marker() + ); let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); @@ -98,9 +109,27 @@ mod tests { pool.execute(setup).await.unwrap(); let test_cases = vec![ - (format!("select * from u{}", CURSOR_POS), "users"), - (format!("select * from e{}", CURSOR_POS), "emails"), - (format!("select * from a{}", CURSOR_POS), "addresses"), + ( + format!( + "select * from u{}", + QueryWithCursorPosition::cursor_marker() + ), + "users", + ), + ( + format!( + "select * from e{}", + QueryWithCursorPosition::cursor_marker() + ), + "emails", + ), + ( + format!( + "select * from a{}", + QueryWithCursorPosition::cursor_marker() + ), + "addresses", + ), ]; for (query, expected_label) in test_cases { @@ -142,10 +171,25 @@ mod tests { pool.execute(setup).await.unwrap(); let test_cases = vec![ - (format!("select * from u{}", CURSOR_POS), "user_y"), // user_y is preferred alphanumerically - (format!("select * from private.u{}", CURSOR_POS), "user_z"), ( - format!("select * from customer_support.u{}", CURSOR_POS), + format!( + "select * from u{}", + QueryWithCursorPosition::cursor_marker() + ), + "user_y", + ), // user_y is preferred alphanumerically + ( + format!( + "select * from private.u{}", + QueryWithCursorPosition::cursor_marker() + ), + "user_z", + ), + ( + format!( + "select * from customer_support.u{}", + QueryWithCursorPosition::cursor_marker() + ), "user_y", ), ]; @@ -186,7 +230,10 @@ mod tests { $$; "#; - let query = format!(r#"select * from coo{}"#, CURSOR_POS); + let query = format!( + r#"select * from coo{}"#, + QueryWithCursorPosition::cursor_marker() + ); let (tree, cache) = get_test_deps(Some(setup), query.as_str().into(), &pool).await; let params = get_test_params(&tree, &cache, query.as_str().into()); @@ -213,7 +260,7 @@ mod tests { pool.execute(setup).await.unwrap(); assert_complete_results( - format!("update {}", CURSOR_POS).as_str(), + format!("update {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![CompletionAssertion::LabelAndKind( "public".into(), CompletionItemKind::Schema, @@ -224,7 +271,7 @@ mod tests { .await; assert_complete_results( - format!("update public.{}", CURSOR_POS).as_str(), + format!("update public.{}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![CompletionAssertion::LabelAndKind( "coos".into(), CompletionItemKind::Table, @@ -235,14 +282,22 @@ mod tests { .await; assert_no_complete_results( - format!("update public.coos {}", CURSOR_POS).as_str(), + format!( + "update public.coos {}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), None, &pool, ) .await; assert_complete_results( - format!("update coos set {}", CURSOR_POS).as_str(), + format!( + "update coos set {}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::Label("id".into()), CompletionAssertion::Label("name".into()), @@ -253,7 +308,11 @@ mod tests { .await; assert_complete_results( - format!("update coos set name = 'cool' where {}", CURSOR_POS).as_str(), + format!( + "update coos set name = 'cool' where {}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::Label("id".into()), CompletionAssertion::Label("name".into()), @@ -275,10 +334,15 @@ mod tests { pool.execute(setup).await.unwrap(); - assert_no_complete_results(format!("delete {}", CURSOR_POS).as_str(), None, &pool).await; + assert_no_complete_results( + format!("delete {}", QueryWithCursorPosition::cursor_marker()).as_str(), + None, + &pool, + ) + .await; assert_complete_results( - format!("delete from {}", CURSOR_POS).as_str(), + format!("delete from {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![ CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("coos".into(), CompletionItemKind::Table), @@ -289,7 +353,11 @@ mod tests { .await; assert_complete_results( - format!("delete from public.{}", CURSOR_POS).as_str(), + format!( + "delete from public.{}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![CompletionAssertion::Label("coos".into())], None, &pool, @@ -297,7 +365,11 @@ mod tests { .await; assert_complete_results( - format!("delete from public.coos where {}", CURSOR_POS).as_str(), + format!( + "delete from public.coos where {}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::Label("id".into()), CompletionAssertion::Label("name".into()), @@ -329,7 +401,11 @@ mod tests { "#; assert_complete_results( - format!("select * from auth.users u join {}", CURSOR_POS).as_str(), + format!( + "select * from auth.users u join {}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), @@ -365,7 +441,7 @@ mod tests { pool.execute(setup).await.unwrap(); assert_complete_results( - format!("alter table {}", CURSOR_POS).as_str(), + format!("alter table {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![ CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), @@ -378,7 +454,11 @@ mod tests { .await; assert_complete_results( - format!("alter table if exists {}", CURSOR_POS).as_str(), + format!( + "alter table if exists {}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), @@ -391,7 +471,7 @@ mod tests { .await; assert_complete_results( - format!("drop table {}", CURSOR_POS).as_str(), + format!("drop table {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![ CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), @@ -404,7 +484,11 @@ mod tests { .await; assert_complete_results( - format!("drop table if exists {}", CURSOR_POS).as_str(), + format!( + "drop table if exists {}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![ CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), @@ -432,7 +516,7 @@ mod tests { pool.execute(setup).await.unwrap(); assert_complete_results( - format!("insert into {}", CURSOR_POS).as_str(), + format!("insert into {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![ CompletionAssertion::LabelAndKind("public".into(), CompletionItemKind::Schema), CompletionAssertion::LabelAndKind("auth".into(), CompletionItemKind::Schema), @@ -444,7 +528,11 @@ mod tests { .await; assert_complete_results( - format!("insert into auth.{}", CURSOR_POS).as_str(), + format!( + "insert into auth.{}", + QueryWithCursorPosition::cursor_marker() + ) + .as_str(), vec![CompletionAssertion::LabelAndKind( "users".into(), CompletionItemKind::Table, @@ -458,7 +546,7 @@ mod tests { assert_complete_results( format!( "insert into {} (name, email) values ('jules', 'a@b.com');", - CURSOR_POS + QueryWithCursorPosition::cursor_marker() ) .as_str(), vec![ diff --git a/crates/pgt_completions/src/providers/triggers.rs b/crates/pgt_completions/src/providers/triggers.rs deleted file mode 100644 index 6bc04deb..00000000 --- a/crates/pgt_completions/src/providers/triggers.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::{ - CompletionItemKind, - builder::{CompletionBuilder, PossibleCompletionItem}, - context::CompletionContext, - relevance::{CompletionRelevanceData, filtering::CompletionFilter, scoring::CompletionScore}, -}; - -use super::helper::get_completion_text_with_schema_or_alias; - -pub fn complete_functions<'a>(ctx: &'a CompletionContext, builder: &mut CompletionBuilder<'a>) { - let available_functions = &ctx.schema_cache.functions; - - for func in available_functions { - let relevance = CompletionRelevanceData::Function(func); - - let item = PossibleCompletionItem { - label: func.name.clone(), - score: CompletionScore::from(relevance.clone()), - filter: CompletionFilter::from(relevance), - description: format!("Schema: {}", func.schema), - kind: CompletionItemKind::Function, - completion_text: get_completion_text_with_schema_or_alias( - ctx, - &func.name, - &func.schema, - ), - }; - - builder.add_item(item); - } -} - -#[cfg(test)] -mod tests { - use crate::{ - CompletionItem, CompletionItemKind, complete, - test_helper::{CURSOR_POS, get_test_deps, get_test_params}, - }; - - #[tokio::test] - async fn completes_fn() { - let setup = r#" - create or replace function cool() - returns trigger - language plpgsql - security invoker - as $$ - begin - raise exception 'dont matter'; - end; - $$; - "#; - - let query = format!("select coo{}", CURSOR_POS); - - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; - let params = get_test_params(&tree, &cache, query.as_str().into()); - let results = complete(params); - - let CompletionItem { label, .. } = results - .into_iter() - .next() - .expect("Should return at least one completion item"); - - assert_eq!(label, "cool"); - } - - #[tokio::test] - async fn prefers_fn_if_invocation() { - let setup = r#" - create table coos ( - id serial primary key, - name text - ); - - create or replace function cool() - returns trigger - language plpgsql - security invoker - as $$ - begin - raise exception 'dont matter'; - end; - $$; - "#; - - let query = format!(r#"select * from coo{}()"#, CURSOR_POS); - - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; - let params = get_test_params(&tree, &cache, query.as_str().into()); - let results = complete(params); - - let CompletionItem { label, kind, .. } = results - .into_iter() - .next() - .expect("Should return at least one completion item"); - - assert_eq!(label, "cool"); - assert_eq!(kind, CompletionItemKind::Function); - } - - #[tokio::test] - async fn prefers_fn_in_select_clause() { - let setup = r#" - create table coos ( - id serial primary key, - name text - ); - - create or replace function cool() - returns trigger - language plpgsql - security invoker - as $$ - begin - raise exception 'dont matter'; - end; - $$; - "#; - - let query = format!(r#"select coo{}"#, CURSOR_POS); - - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; - let params = get_test_params(&tree, &cache, query.as_str().into()); - let results = complete(params); - - let CompletionItem { label, kind, .. } = results - .into_iter() - .next() - .expect("Should return at least one completion item"); - - assert_eq!(label, "cool"); - assert_eq!(kind, CompletionItemKind::Function); - } - - #[tokio::test] - async fn prefers_function_in_from_clause_if_invocation() { - let setup = r#" - create table coos ( - id serial primary key, - name text - ); - - create or replace function cool() - returns trigger - language plpgsql - security invoker - as $$ - begin - raise exception 'dont matter'; - end; - $$; - "#; - - let query = format!(r#"select * from coo{}()"#, CURSOR_POS); - - let (tree, cache) = get_test_deps(setup, query.as_str().into()).await; - let params = get_test_params(&tree, &cache, query.as_str().into()); - let results = complete(params); - - let CompletionItem { label, kind, .. } = results - .into_iter() - .next() - .expect("Should return at least one completion item"); - - assert_eq!(label, "cool"); - assert_eq!(kind, CompletionItemKind::Function); - } -} diff --git a/crates/pgt_completions/src/relevance/filtering.rs b/crates/pgt_completions/src/relevance/filtering.rs index beea6ddb..18e3d7ce 100644 --- a/crates/pgt_completions/src/relevance/filtering.rs +++ b/crates/pgt_completions/src/relevance/filtering.rs @@ -1,6 +1,6 @@ use pgt_schema_cache::ProcKind; -use crate::context::{CompletionContext, NodeUnderCursor, WrappingClause, WrappingNode}; +use pgt_treesitter::context::{NodeUnderCursor, TreesitterContext, WrappingClause, WrappingNode}; use super::CompletionRelevanceData; @@ -16,7 +16,7 @@ impl<'a> From> for CompletionFilter<'a> { } impl CompletionFilter<'_> { - pub fn is_relevant(&self, ctx: &CompletionContext) -> Option<()> { + pub fn is_relevant(&self, ctx: &TreesitterContext) -> Option<()> { self.completable_context(ctx)?; self.check_clause(ctx)?; self.check_invocation(ctx)?; @@ -25,7 +25,7 @@ impl CompletionFilter<'_> { Some(()) } - fn completable_context(&self, ctx: &CompletionContext) -> Option<()> { + fn completable_context(&self, ctx: &TreesitterContext) -> Option<()> { if ctx.wrapping_node_kind.is_none() && ctx.wrapping_clause_type.is_none() { return None; } @@ -70,7 +70,7 @@ impl CompletionFilter<'_> { Some(()) } - fn check_clause(&self, ctx: &CompletionContext) -> Option<()> { + fn check_clause(&self, ctx: &TreesitterContext) -> Option<()> { ctx.wrapping_clause_type .as_ref() .map(|clause| { @@ -208,7 +208,7 @@ impl CompletionFilter<'_> { .and_then(|is_ok| if is_ok { Some(()) } else { None }) } - fn check_invocation(&self, ctx: &CompletionContext) -> Option<()> { + fn check_invocation(&self, ctx: &TreesitterContext) -> Option<()> { if !ctx.is_invocation { return Some(()); } @@ -221,7 +221,7 @@ impl CompletionFilter<'_> { Some(()) } - fn check_mentioned_schema_or_alias(&self, ctx: &CompletionContext) -> Option<()> { + fn check_mentioned_schema_or_alias(&self, ctx: &TreesitterContext) -> Option<()> { if ctx.schema_or_alias_name.is_none() { return Some(()); } @@ -255,9 +255,11 @@ mod tests { use sqlx::{Executor, PgPool}; use crate::test_helper::{ - CURSOR_POS, CompletionAssertion, assert_complete_results, assert_no_complete_results, + CompletionAssertion, assert_complete_results, assert_no_complete_results, }; + use pgt_test_utils::QueryWithCursorPosition; + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] async fn completion_after_asterisk(pool: PgPool) { let setup = r#" @@ -270,11 +272,16 @@ mod tests { pool.execute(setup).await.unwrap(); - assert_no_complete_results(format!("select * {}", CURSOR_POS).as_str(), None, &pool).await; + assert_no_complete_results( + format!("select * {}", QueryWithCursorPosition::cursor_marker()).as_str(), + None, + &pool, + ) + .await; // if there s a COMMA after the asterisk, we're good assert_complete_results( - format!("select *, {}", CURSOR_POS).as_str(), + format!("select *, {}", QueryWithCursorPosition::cursor_marker()).as_str(), vec![ CompletionAssertion::Label("address".into()), CompletionAssertion::Label("email".into()), @@ -288,13 +295,20 @@ mod tests { #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] async fn completion_after_create_table(pool: PgPool) { - assert_no_complete_results(format!("create table {}", CURSOR_POS).as_str(), None, &pool) - .await; + assert_no_complete_results( + format!("create table {}", QueryWithCursorPosition::cursor_marker()).as_str(), + None, + &pool, + ) + .await; } #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] async fn completion_in_column_definitions(pool: PgPool) { - let query = format!(r#"create table instruments ( {} )"#, CURSOR_POS); + let query = format!( + r#"create table instruments ( {} )"#, + QueryWithCursorPosition::cursor_marker() + ); assert_no_complete_results(query.as_str(), None, &pool).await; } } diff --git a/crates/pgt_completions/src/relevance/scoring.rs b/crates/pgt_completions/src/relevance/scoring.rs index a0b5efa5..4bbf325f 100644 --- a/crates/pgt_completions/src/relevance/scoring.rs +++ b/crates/pgt_completions/src/relevance/scoring.rs @@ -1,6 +1,8 @@ use fuzzy_matcher::{FuzzyMatcher, skim::SkimMatcherV2}; -use crate::context::{CompletionContext, WrappingClause, WrappingNode}; +use pgt_treesitter::context::{TreesitterContext, WrappingClause, WrappingNode}; + +use crate::sanitization; use super::CompletionRelevanceData; @@ -24,7 +26,7 @@ impl CompletionScore<'_> { self.score } - pub fn calc_score(&mut self, ctx: &CompletionContext) { + pub fn calc_score(&mut self, ctx: &TreesitterContext) { self.check_is_user_defined(); self.check_matches_schema(ctx); self.check_matches_query_input(ctx); @@ -35,10 +37,10 @@ impl CompletionScore<'_> { self.check_columns_in_stmt(ctx); } - fn check_matches_query_input(&mut self, ctx: &CompletionContext) { + fn check_matches_query_input(&mut self, ctx: &TreesitterContext) { let content = match ctx.get_node_under_cursor_content() { - Some(c) => c.replace('"', ""), - None => return, + Some(c) if !sanitization::is_sanitized_token(c.as_str()) => c.replace('"', ""), + _ => return, }; let name = match self.data { @@ -69,7 +71,7 @@ impl CompletionScore<'_> { } } - fn check_matching_clause_type(&mut self, ctx: &CompletionContext) { + fn check_matching_clause_type(&mut self, ctx: &TreesitterContext) { let clause_type = match ctx.wrapping_clause_type.as_ref() { None => return, Some(ct) => ct, @@ -135,14 +137,16 @@ impl CompletionScore<'_> { } } - fn check_matching_wrapping_node(&mut self, ctx: &CompletionContext) { + fn check_matching_wrapping_node(&mut self, ctx: &TreesitterContext) { let wrapping_node = match ctx.wrapping_node_kind.as_ref() { None => return, Some(wn) => wn, }; let has_mentioned_schema = ctx.schema_or_alias_name.is_some(); - let has_node_text = ctx.get_node_under_cursor_content().is_some(); + let has_node_text = ctx + .get_node_under_cursor_content() + .is_some_and(|txt| !sanitization::is_sanitized_token(txt.as_str())); self.score += match self.data { CompletionRelevanceData::Table(_) => match wrapping_node { @@ -170,7 +174,7 @@ impl CompletionScore<'_> { } } - fn check_is_invocation(&mut self, ctx: &CompletionContext) { + fn check_is_invocation(&mut self, ctx: &TreesitterContext) { self.score += match self.data { CompletionRelevanceData::Function(_) if ctx.is_invocation => 30, CompletionRelevanceData::Function(_) if !ctx.is_invocation => -10, @@ -179,7 +183,7 @@ impl CompletionScore<'_> { }; } - fn check_matches_schema(&mut self, ctx: &CompletionContext) { + fn check_matches_schema(&mut self, ctx: &TreesitterContext) { let schema_name = match ctx.schema_or_alias_name.as_ref() { None => return, Some(n) => n, @@ -228,7 +232,7 @@ impl CompletionScore<'_> { } } - fn check_relations_in_stmt(&mut self, ctx: &CompletionContext) { + fn check_relations_in_stmt(&mut self, ctx: &TreesitterContext) { match self.data { CompletionRelevanceData::Table(_) | CompletionRelevanceData::Function(_) => return, _ => {} @@ -312,7 +316,7 @@ impl CompletionScore<'_> { } } - fn check_columns_in_stmt(&mut self, ctx: &CompletionContext) { + fn check_columns_in_stmt(&mut self, ctx: &TreesitterContext) { if let CompletionRelevanceData::Column(column) = self.data { /* * Columns can be mentioned in one of two ways: diff --git a/crates/pgt_completions/src/sanitization.rs b/crates/pgt_completions/src/sanitization.rs index bf4d9816..155256c8 100644 --- a/crates/pgt_completions/src/sanitization.rs +++ b/crates/pgt_completions/src/sanitization.rs @@ -23,6 +23,10 @@ pub(crate) fn remove_sanitized_token(it: &str) -> String { it.replace(SANITIZED_TOKEN, "") } +pub(crate) fn is_sanitized_token(txt: &str) -> bool { + txt == SANITIZED_TOKEN +} + #[derive(PartialEq, Eq, Debug)] pub(crate) enum NodeText { Replaced, @@ -118,10 +122,6 @@ where tree: Cow::Borrowed(params.tree), } } - - pub fn is_sanitized_token(txt: &str) -> bool { - txt == SANITIZED_TOKEN - } } /// Checks if the cursor is positioned inbetween two SQL nodes. diff --git a/crates/pgt_completions/src/test_helper.rs b/crates/pgt_completions/src/test_helper.rs index 1bd5229c..e6c34761 100644 --- a/crates/pgt_completions/src/test_helper.rs +++ b/crates/pgt_completions/src/test_helper.rs @@ -1,40 +1,12 @@ -use std::fmt::Display; - use pgt_schema_cache::SchemaCache; +use pgt_test_utils::QueryWithCursorPosition; use sqlx::{Executor, PgPool}; use crate::{CompletionItem, CompletionItemKind, CompletionParams, complete}; -pub static CURSOR_POS: char = '€'; - -#[derive(Clone)] -pub struct InputQuery { - sql: String, - position: usize, -} - -impl From<&str> for InputQuery { - fn from(value: &str) -> Self { - let position = value - .find(CURSOR_POS) - .expect("Insert Cursor Position into your Query."); - - InputQuery { - sql: value.replace(CURSOR_POS, "").trim().to_string(), - position, - } - } -} - -impl Display for InputQuery { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.sql) - } -} - pub(crate) async fn get_test_deps( setup: Option<&str>, - input: InputQuery, + input: QueryWithCursorPosition, test_db: &PgPool, ) -> (tree_sitter::Tree, pgt_schema_cache::SchemaCache) { if let Some(setup) = setup { @@ -63,7 +35,7 @@ pub(crate) async fn get_test_deps( #[allow(dead_code)] pub(crate) async fn test_against_connection_string( conn_str: &str, - input: InputQuery, + input: QueryWithCursorPosition, ) -> (tree_sitter::Tree, pgt_schema_cache::SchemaCache) { let pool = sqlx::PgPool::connect(conn_str) .await @@ -83,16 +55,12 @@ pub(crate) async fn test_against_connection_string( (tree, schema_cache) } -pub(crate) fn get_text_and_position(q: InputQuery) -> (usize, String) { - (q.position, q.sql) -} - pub(crate) fn get_test_params<'a>( tree: &'a tree_sitter::Tree, schema_cache: &'a pgt_schema_cache::SchemaCache, - sql: InputQuery, + sql: QueryWithCursorPosition, ) -> CompletionParams<'a> { - let (position, text) = get_text_and_position(sql); + let (position, text) = sql.get_text_and_position(); CompletionParams { position: (position as u32).into(), @@ -102,46 +70,6 @@ pub(crate) fn get_test_params<'a>( } } -#[cfg(test)] -mod tests { - use crate::test_helper::CURSOR_POS; - - use super::InputQuery; - - #[test] - fn input_query_should_extract_correct_position() { - struct TestCase { - query: String, - expected_pos: usize, - expected_sql_len: usize, - } - - let cases = vec![ - TestCase { - query: format!("select * from{}", CURSOR_POS), - expected_pos: 13, - expected_sql_len: 13, - }, - TestCase { - query: format!("{}select * from", CURSOR_POS), - expected_pos: 0, - expected_sql_len: 13, - }, - TestCase { - query: format!("select {} from", CURSOR_POS), - expected_pos: 7, - expected_sql_len: 12, - }, - ]; - - for case in cases { - let query = InputQuery::from(case.query.as_str()); - assert_eq!(query.position, case.expected_pos); - assert_eq!(query.sql.len(), case.expected_sql_len); - } - } -} - #[derive(Debug, PartialEq, Eq)] pub(crate) enum CompletionAssertion { Label(String), diff --git a/crates/pgt_test_utils/src/lib.rs b/crates/pgt_test_utils/src/lib.rs index e21c6ce4..11bb1aeb 100644 --- a/crates/pgt_test_utils/src/lib.rs +++ b/crates/pgt_test_utils/src/lib.rs @@ -1 +1,85 @@ +use std::fmt::Display; + pub static MIGRATIONS: sqlx::migrate::Migrator = sqlx::migrate!("./testdb_migrations"); + +static CURSOR_POS: char = '€'; + +#[derive(Clone)] +pub struct QueryWithCursorPosition { + sql: String, + position: usize, +} + +impl QueryWithCursorPosition { + pub fn cursor_marker() -> char { + CURSOR_POS + } + + pub fn get_text_and_position(&self) -> (usize, String) { + (self.position, self.sql.clone()) + } +} + +impl From for QueryWithCursorPosition { + fn from(value: String) -> Self { + value.as_str().into() + } +} + +impl From<&str> for QueryWithCursorPosition { + fn from(value: &str) -> Self { + let position = value + .find(CURSOR_POS) + .expect("Use `QueryWithCursorPosition::cursor_marker()` to insert cursor position into your Query."); + + QueryWithCursorPosition { + sql: value.replace(CURSOR_POS, "").trim().to_string(), + position, + } + } +} + +impl Display for QueryWithCursorPosition { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.sql) + } +} + +#[cfg(test)] +mod tests { + + use super::QueryWithCursorPosition; + + #[test] + fn input_query_should_extract_correct_position() { + struct TestCase { + query: String, + expected_pos: usize, + expected_sql_len: usize, + } + + let cases = vec![ + TestCase { + query: format!("select * from{}", QueryWithCursorPosition::cursor_marker()), + expected_pos: 13, + expected_sql_len: 13, + }, + TestCase { + query: format!("{}select * from", QueryWithCursorPosition::cursor_marker()), + expected_pos: 0, + expected_sql_len: 13, + }, + TestCase { + query: format!("select {} from", QueryWithCursorPosition::cursor_marker()), + expected_pos: 7, + expected_sql_len: 12, + }, + ]; + + for case in cases { + let query = QueryWithCursorPosition::from(case.query.as_str()); + assert_eq!(query.position, case.expected_pos); + assert_eq!(query.sql.len(), case.expected_sql_len); + } + } +} diff --git a/crates/pgt_treesitter_queries/Cargo.toml b/crates/pgt_treesitter/Cargo.toml similarity index 54% rename from crates/pgt_treesitter_queries/Cargo.toml rename to crates/pgt_treesitter/Cargo.toml index 5806861f..f2d8b46e 100644 --- a/crates/pgt_treesitter_queries/Cargo.toml +++ b/crates/pgt_treesitter/Cargo.toml @@ -6,17 +6,20 @@ edition.workspace = true homepage.workspace = true keywords.workspace = true license.workspace = true -name = "pgt_treesitter_queries" +name = "pgt_treesitter" repository.workspace = true version = "0.0.0" [dependencies] -clap = { version = "4.5.23", features = ["derive"] } -tree-sitter.workspace = true -tree_sitter_sql.workspace = true +clap = { version = "4.5.23", features = ["derive"] } +pgt_schema_cache.workspace = true +pgt_text_size.workspace = true +tree-sitter.workspace = true +tree_sitter_sql.workspace = true [dev-dependencies] +pgt_test_utils.workspace = true [lib] doctest = false diff --git a/crates/pgt_completions/src/context/base_parser.rs b/crates/pgt_treesitter/src/context/base_parser.rs similarity index 100% rename from crates/pgt_completions/src/context/base_parser.rs rename to crates/pgt_treesitter/src/context/base_parser.rs diff --git a/crates/pgt_completions/src/context/grant_parser.rs b/crates/pgt_treesitter/src/context/grant_parser.rs similarity index 94% rename from crates/pgt_completions/src/context/grant_parser.rs rename to crates/pgt_treesitter/src/context/grant_parser.rs index 14ba882a..c9aebc33 100644 --- a/crates/pgt_completions/src/context/grant_parser.rs +++ b/crates/pgt_treesitter/src/context/grant_parser.rs @@ -187,14 +187,15 @@ mod tests { use crate::{ context::base_parser::CompletionStatementParser, context::grant_parser::{GrantContext, GrantParser}, - test_helper::CURSOR_POS, }; + use pgt_test_utils::QueryWithCursorPosition; + fn with_pos(query: String) -> (usize, String) { let mut pos: Option = None; for (p, c) in query.char_indices() { - if c == CURSOR_POS { + if c == QueryWithCursorPosition::cursor_marker() { pos = Some(p); break; } @@ -202,7 +203,9 @@ mod tests { ( pos.expect("Please add cursor position!"), - query.replace(CURSOR_POS, "REPLACED_TOKEN").to_string(), + query + .replace(QueryWithCursorPosition::cursor_marker(), "REPLACED_TOKEN") + .to_string(), ) } @@ -212,7 +215,7 @@ mod tests { r#" grant {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = GrantParser::get_context(query.as_str(), pos); @@ -235,7 +238,7 @@ mod tests { r#" grant select on {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = GrantParser::get_context(query.as_str(), pos); @@ -258,7 +261,7 @@ mod tests { r#" grant select on table {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = GrantParser::get_context(query.as_str(), pos); @@ -281,7 +284,7 @@ mod tests { r#" grant select on public.{} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = GrantParser::get_context(query.as_str(), pos); @@ -304,7 +307,7 @@ mod tests { r#" grant select on table public.{} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = GrantParser::get_context(query.as_str(), pos); @@ -327,7 +330,7 @@ mod tests { r#" grant select on public.users to {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = GrantParser::get_context(query.as_str(), pos); @@ -350,7 +353,7 @@ mod tests { r#" grant select on public.{} to test_role "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = GrantParser::get_context(query.as_str(), pos); @@ -373,7 +376,7 @@ mod tests { r#" grant select on "MySchema"."MyTable" to {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = GrantParser::get_context(query.as_str(), pos); @@ -396,7 +399,7 @@ mod tests { r#" grant select on public.users to alice, {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = GrantParser::get_context(query.as_str(), pos); diff --git a/crates/pgt_completions/src/context/mod.rs b/crates/pgt_treesitter/src/context/mod.rs similarity index 78% rename from crates/pgt_completions/src/context/mod.rs rename to crates/pgt_treesitter/src/context/mod.rs index 01e563b0..9cfaadea 100644 --- a/crates/pgt_completions/src/context/mod.rs +++ b/crates/pgt_treesitter/src/context/mod.rs @@ -7,22 +7,14 @@ mod grant_parser; mod policy_parser; mod revoke_parser; -use pgt_schema_cache::SchemaCache; -use pgt_text_size::TextRange; -use pgt_treesitter_queries::{ - TreeSitterQueriesExecutor, - queries::{self, QueryResult}, -}; - -use crate::{ - NodeText, - context::{ - base_parser::CompletionStatementParser, - grant_parser::GrantParser, - policy_parser::{PolicyParser, PolicyStmtKind}, - revoke_parser::RevokeParser, - }, - sanitization::SanitizedCompletionParams, +use crate::queries::{self, QueryResult, TreeSitterQueriesExecutor}; +use pgt_text_size::{TextRange, TextSize}; + +use crate::context::{ + base_parser::CompletionStatementParser, + grant_parser::GrantParser, + policy_parser::{PolicyParser, PolicyStmtKind}, + revoke_parser::RevokeParser, }; #[derive(Debug, PartialEq, Eq, Hash, Clone)] @@ -59,9 +51,9 @@ pub enum WrappingClause<'a> { } #[derive(PartialEq, Eq, Hash, Debug, Clone)] -pub(crate) struct MentionedColumn { - pub(crate) column: String, - pub(crate) alias: Option, +pub struct MentionedColumn { + pub column: String, + pub alias: Option, } /// We can map a few nodes, such as the "update" node, to actual SQL clauses. @@ -81,10 +73,10 @@ pub enum WrappingNode { } #[derive(Debug)] -pub(crate) enum NodeUnderCursor<'a> { +pub enum NodeUnderCursor<'a> { TsNode(tree_sitter::Node<'a>), CustomNode { - text: NodeText, + text: String, range: TextRange, kind: String, previous_node_kind: Option, @@ -150,13 +142,18 @@ impl TryFrom for WrappingNode { } } +pub struct TreeSitterContextParams<'a> { + pub position: TextSize, + pub text: &'a str, + pub tree: &'a tree_sitter::Tree, +} + #[derive(Debug)] -pub(crate) struct CompletionContext<'a> { +pub struct TreesitterContext<'a> { pub node_under_cursor: Option>, pub tree: &'a tree_sitter::Tree, pub text: &'a str, - pub schema_cache: &'a SchemaCache, pub position: usize, /// If the cursor is on a node that uses dot notation @@ -178,6 +175,7 @@ pub(crate) struct CompletionContext<'a> { /// on u.id = i.user_id; /// ``` pub schema_or_alias_name: Option, + pub wrapping_clause_type: Option>, pub wrapping_node_kind: Option, @@ -190,12 +188,11 @@ pub(crate) struct CompletionContext<'a> { pub mentioned_columns: HashMap>, HashSet>, } -impl<'a> CompletionContext<'a> { - pub fn new(params: &'a SanitizedCompletionParams) -> Self { +impl<'a> TreesitterContext<'a> { + pub fn new(params: TreeSitterContextParams<'a>) -> Self { let mut ctx = Self { - tree: params.tree.as_ref(), - text: ¶ms.text, - schema_cache: params.schema, + tree: params.tree, + text: params.text, position: usize::from(params.position), node_under_cursor: None, schema_or_alias_name: None, @@ -211,11 +208,11 @@ impl<'a> CompletionContext<'a> { // policy handling is important to Supabase, but they are a PostgreSQL specific extension, // so the tree_sitter_sql language does not support it. // We infer the context manually. - if PolicyParser::looks_like_matching_stmt(¶ms.text) { + if PolicyParser::looks_like_matching_stmt(params.text) { ctx.gather_policy_context(); - } else if GrantParser::looks_like_matching_stmt(¶ms.text) { + } else if GrantParser::looks_like_matching_stmt(params.text) { ctx.gather_grant_context(); - } else if RevokeParser::looks_like_matching_stmt(¶ms.text) { + } else if RevokeParser::looks_like_matching_stmt(params.text) { ctx.gather_revoke_context(); } else { ctx.gather_tree_context(); @@ -229,7 +226,7 @@ impl<'a> CompletionContext<'a> { let revoke_context = RevokeParser::get_context(self.text, self.position); self.node_under_cursor = Some(NodeUnderCursor::CustomNode { - text: revoke_context.node_text.into(), + text: revoke_context.node_text, range: revoke_context.node_range, kind: revoke_context.node_kind.clone(), previous_node_kind: None, @@ -257,7 +254,7 @@ impl<'a> CompletionContext<'a> { let grant_context = GrantParser::get_context(self.text, self.position); self.node_under_cursor = Some(NodeUnderCursor::CustomNode { - text: grant_context.node_text.into(), + text: grant_context.node_text, range: grant_context.node_range, kind: grant_context.node_kind.clone(), previous_node_kind: None, @@ -285,7 +282,7 @@ impl<'a> CompletionContext<'a> { let policy_context = PolicyParser::get_context(self.text, self.position); self.node_under_cursor = Some(NodeUnderCursor::CustomNode { - text: policy_context.node_text.into(), + text: policy_context.node_text, range: policy_context.node_range, kind: policy_context.node_kind.clone(), previous_node_kind: Some(policy_context.previous_node_kind), @@ -397,29 +394,18 @@ impl<'a> CompletionContext<'a> { } } - fn get_ts_node_content(&self, ts_node: &tree_sitter::Node<'a>) -> Option { + fn get_ts_node_content(&self, ts_node: &tree_sitter::Node<'a>) -> Option { let source = self.text; - ts_node.utf8_text(source.as_bytes()).ok().map(|txt| { - if SanitizedCompletionParams::is_sanitized_token(txt) { - NodeText::Replaced - } else { - NodeText::Original(txt.into()) - } - }) + ts_node + .utf8_text(source.as_bytes()) + .ok() + .map(|txt| txt.into()) } pub fn get_node_under_cursor_content(&self) -> Option { match self.node_under_cursor.as_ref()? { - NodeUnderCursor::TsNode(node) => { - self.get_ts_node_content(node).and_then(|nt| match nt { - NodeText::Replaced => None, - NodeText::Original(c) => Some(c.to_string()), - }) - } - NodeUnderCursor::CustomNode { text, .. } => match text { - NodeText::Replaced => None, - NodeText::Original(c) => Some(c.to_string()), - }, + NodeUnderCursor::TsNode(node) => self.get_ts_node_content(node), + NodeUnderCursor::CustomNode { text, .. } => Some(text.clone()), } } @@ -501,15 +487,10 @@ impl<'a> CompletionContext<'a> { match current_node_kind { "object_reference" | "field" => { let content = self.get_ts_node_content(¤t_node); - if let Some(node_txt) = content { - match node_txt { - NodeText::Original(txt) => { - let parts: Vec<&str> = txt.split('.').collect(); - if parts.len() == 2 { - self.schema_or_alias_name = Some(parts[0].to_string()); - } - } - NodeText::Replaced => {} + if let Some(txt) = content { + let parts: Vec<&str> = txt.split('.').collect(); + if parts.len() == 2 { + self.schema_or_alias_name = Some(parts[0].to_string()); } } } @@ -638,12 +619,7 @@ impl<'a> CompletionContext<'a> { break; } - if let Some(sibling_content) = - self.get_ts_node_content(&sib).and_then(|txt| match txt { - NodeText::Original(txt) => Some(txt), - NodeText::Replaced => None, - }) - { + if let Some(sibling_content) = self.get_ts_node_content(&sib) { if sibling_content == tokens[idx] { idx += 1; } @@ -674,9 +650,7 @@ impl<'a> CompletionContext<'a> { while let Some(sib) = first_sibling.next_sibling() { match sib.kind() { "object_reference" => { - if let Some(NodeText::Original(txt)) = - self.get_ts_node_content(&sib) - { + if let Some(txt) = self.get_ts_node_content(&sib) { let mut iter = txt.split('.').rev(); let table = iter.next().unwrap().to_string(); let schema = iter.next().map(|s| s.to_string()); @@ -690,9 +664,7 @@ impl<'a> CompletionContext<'a> { } "column" => { - if let Some(NodeText::Original(txt)) = - self.get_ts_node_content(&sib) - { + if let Some(txt) = self.get_ts_node_content(&sib) { let entry = MentionedColumn { column: txt, alias: None, @@ -717,7 +689,7 @@ impl<'a> CompletionContext<'a> { WrappingClause::AlterColumn => { while let Some(sib) = first_sibling.next_sibling() { if sib.kind() == "object_reference" { - if let Some(NodeText::Original(txt)) = self.get_ts_node_content(&sib) { + if let Some(txt) = self.get_ts_node_content(&sib) { let mut iter = txt.split('.').rev(); let table = iter.next().unwrap().to_string(); let schema = iter.next().map(|s| s.to_string()); @@ -777,7 +749,7 @@ impl<'a> CompletionContext<'a> { } } - pub(crate) fn parent_matches_one_of_kind(&self, kinds: &[&'static str]) -> bool { + pub fn parent_matches_one_of_kind(&self, kinds: &[&'static str]) -> bool { self.node_under_cursor .as_ref() .is_some_and(|under_cursor| match under_cursor { @@ -788,7 +760,7 @@ impl<'a> CompletionContext<'a> { NodeUnderCursor::CustomNode { .. } => false, }) } - pub(crate) fn before_cursor_matches_kind(&self, kinds: &[&'static str]) -> bool { + pub fn before_cursor_matches_kind(&self, kinds: &[&'static str]) -> bool { self.node_under_cursor.as_ref().is_some_and(|under_cursor| { match under_cursor { NodeUnderCursor::TsNode(node) => { @@ -816,12 +788,9 @@ impl<'a> CompletionContext<'a> { #[cfg(test)] mod tests { - use crate::{ - NodeText, - context::{CompletionContext, WrappingClause}, - sanitization::SanitizedCompletionParams, - test_helper::{CURSOR_POS, get_text_and_position}, - }; + use crate::context::{TreeSitterContextParams, TreesitterContext, WrappingClause}; + + use pgt_test_utils::QueryWithCursorPosition; use super::NodeUnderCursor; @@ -838,56 +807,82 @@ mod tests { fn identifies_clauses() { let test_cases = vec![ ( - format!("Select {}* from users;", CURSOR_POS), + format!( + "Select {}* from users;", + QueryWithCursorPosition::cursor_marker() + ), WrappingClause::Select, ), ( - format!("Select * from u{};", CURSOR_POS), + format!( + "Select * from u{};", + QueryWithCursorPosition::cursor_marker() + ), WrappingClause::From, ), ( - format!("Select {}* from users where n = 1;", CURSOR_POS), + format!( + "Select {}* from users where n = 1;", + QueryWithCursorPosition::cursor_marker() + ), WrappingClause::Select, ), ( - format!("Select * from users where {}n = 1;", CURSOR_POS), + format!( + "Select * from users where {}n = 1;", + QueryWithCursorPosition::cursor_marker() + ), WrappingClause::Where, ), ( - format!("update users set u{} = 1 where n = 2;", CURSOR_POS), + format!( + "update users set u{} = 1 where n = 2;", + QueryWithCursorPosition::cursor_marker() + ), WrappingClause::Update, ), ( - format!("update users set u = 1 where n{} = 2;", CURSOR_POS), + format!( + "update users set u = 1 where n{} = 2;", + QueryWithCursorPosition::cursor_marker() + ), WrappingClause::Where, ), ( - format!("delete{} from users;", CURSOR_POS), + format!( + "delete{} from users;", + QueryWithCursorPosition::cursor_marker() + ), WrappingClause::Delete, ), ( - format!("delete from {}users;", CURSOR_POS), + format!( + "delete from {}users;", + QueryWithCursorPosition::cursor_marker() + ), WrappingClause::From, ), ( - format!("select name, age, location from public.u{}sers", CURSOR_POS), + format!( + "select name, age, location from public.u{}sers", + QueryWithCursorPosition::cursor_marker() + ), WrappingClause::From, ), ]; for (query, expected_clause) in test_cases { - let (position, text) = get_text_and_position(query.as_str().into()); + let (position, text) = QueryWithCursorPosition::from(query).get_text_and_position(); let tree = get_tree(text.as_str()); - let params = SanitizedCompletionParams { + let params = TreeSitterContextParams { position: (position as u32).into(), - text, - tree: std::borrow::Cow::Owned(tree), - schema: &pgt_schema_cache::SchemaCache::default(), + text: &text, + tree: &tree, }; - let ctx = CompletionContext::new(¶ms); + let ctx = TreesitterContext::new(params); assert_eq!(ctx.wrapping_clause_type, Some(expected_clause)); } @@ -897,29 +892,46 @@ mod tests { fn identifies_schema() { let test_cases = vec![ ( - format!("Select * from private.u{}", CURSOR_POS), + format!( + "Select * from private.u{}", + QueryWithCursorPosition::cursor_marker() + ), Some("private"), ), ( - format!("Select * from private.u{}sers()", CURSOR_POS), + format!( + "Select * from private.u{}sers()", + QueryWithCursorPosition::cursor_marker() + ), Some("private"), ), - (format!("Select * from u{}sers", CURSOR_POS), None), - (format!("Select * from u{}sers()", CURSOR_POS), None), + ( + format!( + "Select * from u{}sers", + QueryWithCursorPosition::cursor_marker() + ), + None, + ), + ( + format!( + "Select * from u{}sers()", + QueryWithCursorPosition::cursor_marker() + ), + None, + ), ]; for (query, expected_schema) in test_cases { - let (position, text) = get_text_and_position(query.as_str().into()); + let (position, text) = QueryWithCursorPosition::from(query).get_text_and_position(); let tree = get_tree(text.as_str()); - let params = SanitizedCompletionParams { + let params = TreeSitterContextParams { position: (position as u32).into(), - text, - tree: std::borrow::Cow::Owned(tree), - schema: &pgt_schema_cache::SchemaCache::default(), + text: &text, + tree: &tree, }; - let ctx = CompletionContext::new(¶ms); + let ctx = TreesitterContext::new(params); assert_eq!( ctx.schema_or_alias_name, @@ -931,32 +943,55 @@ mod tests { #[test] fn identifies_invocation() { let test_cases = vec![ - (format!("Select * from u{}sers", CURSOR_POS), false), - (format!("Select * from u{}sers()", CURSOR_POS), true), - (format!("Select cool{};", CURSOR_POS), false), - (format!("Select cool{}();", CURSOR_POS), true), ( - format!("Select upp{}ercase as title from users;", CURSOR_POS), + format!( + "Select * from u{}sers", + QueryWithCursorPosition::cursor_marker() + ), + false, + ), + ( + format!( + "Select * from u{}sers()", + QueryWithCursorPosition::cursor_marker() + ), + true, + ), + ( + format!("Select cool{};", QueryWithCursorPosition::cursor_marker()), + false, + ), + ( + format!("Select cool{}();", QueryWithCursorPosition::cursor_marker()), + true, + ), + ( + format!( + "Select upp{}ercase as title from users;", + QueryWithCursorPosition::cursor_marker() + ), false, ), ( - format!("Select upp{}ercase(name) as title from users;", CURSOR_POS), + format!( + "Select upp{}ercase(name) as title from users;", + QueryWithCursorPosition::cursor_marker() + ), true, ), ]; for (query, is_invocation) in test_cases { - let (position, text) = get_text_and_position(query.as_str().into()); + let (position, text) = QueryWithCursorPosition::from(query).get_text_and_position(); let tree = get_tree(text.as_str()); - let params = SanitizedCompletionParams { + let params = TreeSitterContextParams { position: (position as u32).into(), - text, - tree: std::borrow::Cow::Owned(tree), - schema: &pgt_schema_cache::SchemaCache::default(), + text: text.as_str(), + tree: &tree, }; - let ctx = CompletionContext::new(¶ms); + let ctx = TreesitterContext::new(params); assert_eq!(ctx.is_invocation, is_invocation); } @@ -965,32 +1000,34 @@ mod tests { #[test] fn does_not_fail_on_leading_whitespace() { let cases = vec![ - format!("{} select * from", CURSOR_POS), - format!(" {} select * from", CURSOR_POS), + format!( + "{} select * from", + QueryWithCursorPosition::cursor_marker() + ), + format!( + " {} select * from", + QueryWithCursorPosition::cursor_marker() + ), ]; for query in cases { - let (position, text) = get_text_and_position(query.as_str().into()); + let (position, text) = QueryWithCursorPosition::from(query).get_text_and_position(); let tree = get_tree(text.as_str()); - let params = SanitizedCompletionParams { + let params = TreeSitterContextParams { position: (position as u32).into(), - text, - tree: std::borrow::Cow::Owned(tree), - schema: &pgt_schema_cache::SchemaCache::default(), + text: &text, + tree: &tree, }; - let ctx = CompletionContext::new(¶ms); + let ctx = TreesitterContext::new(params); let node = ctx.node_under_cursor.as_ref().unwrap(); match node { NodeUnderCursor::TsNode(node) => { - assert_eq!( - ctx.get_ts_node_content(node), - Some(NodeText::Original("select".into())) - ); + assert_eq!(ctx.get_ts_node_content(node), Some("select".into())); assert_eq!( ctx.wrapping_clause_type, @@ -1004,29 +1041,28 @@ mod tests { #[test] fn does_not_fail_on_trailing_whitespace() { - let query = format!("select * from {}", CURSOR_POS); + let query = format!( + "select * from {}", + QueryWithCursorPosition::cursor_marker() + ); - let (position, text) = get_text_and_position(query.as_str().into()); + let (position, text) = QueryWithCursorPosition::from(query).get_text_and_position(); let tree = get_tree(text.as_str()); - let params = SanitizedCompletionParams { + let params = TreeSitterContextParams { position: (position as u32).into(), - text, - tree: std::borrow::Cow::Owned(tree), - schema: &pgt_schema_cache::SchemaCache::default(), + text: &text, + tree: &tree, }; - let ctx = CompletionContext::new(¶ms); + let ctx = TreesitterContext::new(params); let node = ctx.node_under_cursor.as_ref().unwrap(); match node { NodeUnderCursor::TsNode(node) => { - assert_eq!( - ctx.get_ts_node_content(node), - Some(NodeText::Original("from".into())) - ); + assert_eq!(ctx.get_ts_node_content(node), Some("from".into())); } _ => unreachable!(), } @@ -1034,29 +1070,25 @@ mod tests { #[test] fn does_not_fail_with_empty_statements() { - let query = format!("{}", CURSOR_POS); + let query = format!("{}", QueryWithCursorPosition::cursor_marker()); - let (position, text) = get_text_and_position(query.as_str().into()); + let (position, text) = QueryWithCursorPosition::from(query).get_text_and_position(); let tree = get_tree(text.as_str()); - let params = SanitizedCompletionParams { + let params = TreeSitterContextParams { position: (position as u32).into(), - text, - tree: std::borrow::Cow::Owned(tree), - schema: &pgt_schema_cache::SchemaCache::default(), + text: &text, + tree: &tree, }; - let ctx = CompletionContext::new(¶ms); + let ctx = TreesitterContext::new(params); let node = ctx.node_under_cursor.as_ref().unwrap(); match node { NodeUnderCursor::TsNode(node) => { - assert_eq!( - ctx.get_ts_node_content(node), - Some(NodeText::Original("".into())) - ); + assert_eq!(ctx.get_ts_node_content(node), Some("".into())); assert_eq!(ctx.wrapping_clause_type, None); } _ => unreachable!(), @@ -1067,29 +1099,25 @@ mod tests { fn does_not_fail_on_incomplete_keywords() { // Instead of autocompleting "FROM", we'll assume that the user // is selecting a certain column name, such as `frozen_account`. - let query = format!("select * fro{}", CURSOR_POS); + let query = format!("select * fro{}", QueryWithCursorPosition::cursor_marker()); - let (position, text) = get_text_and_position(query.as_str().into()); + let (position, text) = QueryWithCursorPosition::from(query).get_text_and_position(); let tree = get_tree(text.as_str()); - let params = SanitizedCompletionParams { + let params = TreeSitterContextParams { position: (position as u32).into(), - text, - tree: std::borrow::Cow::Owned(tree), - schema: &pgt_schema_cache::SchemaCache::default(), + text: &text, + tree: &tree, }; - let ctx = CompletionContext::new(¶ms); + let ctx = TreesitterContext::new(params); let node = ctx.node_under_cursor.as_ref().unwrap(); match node { NodeUnderCursor::TsNode(node) => { - assert_eq!( - ctx.get_ts_node_content(node), - Some(NodeText::Original("fro".into())) - ); + assert_eq!(ctx.get_ts_node_content(node), Some("fro".into())); assert_eq!(ctx.wrapping_clause_type, Some(WrappingClause::Select)); } _ => unreachable!(), diff --git a/crates/pgt_completions/src/context/policy_parser.rs b/crates/pgt_treesitter/src/context/policy_parser.rs similarity index 95% rename from crates/pgt_completions/src/context/policy_parser.rs rename to crates/pgt_treesitter/src/context/policy_parser.rs index bcc60499..77664516 100644 --- a/crates/pgt_completions/src/context/policy_parser.rs +++ b/crates/pgt_treesitter/src/context/policy_parser.rs @@ -212,16 +212,17 @@ mod tests { use crate::{ context::base_parser::CompletionStatementParser, context::policy_parser::{PolicyContext, PolicyStmtKind}, - test_helper::CURSOR_POS, }; + use pgt_test_utils::QueryWithCursorPosition; + use super::PolicyParser; fn with_pos(query: String) -> (usize, String) { let mut pos: Option = None; for (p, c) in query.char_indices() { - if c == CURSOR_POS { + if c == QueryWithCursorPosition::cursor_marker() { pos = Some(p); break; } @@ -229,7 +230,9 @@ mod tests { ( pos.expect("Please add cursor position!"), - query.replace(CURSOR_POS, "REPLACED_TOKEN").to_string(), + query + .replace(QueryWithCursorPosition::cursor_marker(), "REPLACED_TOKEN") + .to_string(), ) } @@ -239,7 +242,7 @@ mod tests { r#" create policy {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -265,7 +268,7 @@ mod tests { r#" create policy "my cool policy" {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -291,7 +294,7 @@ mod tests { r#" create policy "my cool policy" on {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -317,7 +320,7 @@ mod tests { r#" create policy "my cool policy" on auth.{} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -344,7 +347,7 @@ mod tests { create policy "my cool policy" on auth.users as {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -372,7 +375,7 @@ mod tests { as permissive {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -400,7 +403,7 @@ mod tests { as permissive to {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -432,7 +435,7 @@ mod tests { to all using (true); "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -464,7 +467,7 @@ mod tests { to all using (true); "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -493,7 +496,7 @@ mod tests { r#" drop policy {} on auth.users; "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -520,7 +523,7 @@ mod tests { r#" drop policy "{}" on auth.users; "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -549,7 +552,7 @@ mod tests { r#" drop policy "{} on auth.users; "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -567,7 +570,7 @@ mod tests { to all using (id = {}) "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -598,7 +601,7 @@ mod tests { to all using ({} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); @@ -629,7 +632,7 @@ mod tests { to all with check ({} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = PolicyParser::get_context(query.as_str(), pos); diff --git a/crates/pgt_completions/src/context/revoke_parser.rs b/crates/pgt_treesitter/src/context/revoke_parser.rs similarity index 94% rename from crates/pgt_completions/src/context/revoke_parser.rs rename to crates/pgt_treesitter/src/context/revoke_parser.rs index e0c43934..4f5b09ec 100644 --- a/crates/pgt_completions/src/context/revoke_parser.rs +++ b/crates/pgt_treesitter/src/context/revoke_parser.rs @@ -180,14 +180,15 @@ mod tests { use crate::{ context::base_parser::CompletionStatementParser, context::revoke_parser::{RevokeContext, RevokeParser}, - test_helper::CURSOR_POS, }; + use pgt_test_utils::QueryWithCursorPosition; + fn with_pos(query: String) -> (usize, String) { let mut pos: Option = None; for (p, c) in query.char_indices() { - if c == CURSOR_POS { + if c == QueryWithCursorPosition::cursor_marker() { pos = Some(p); break; } @@ -195,7 +196,9 @@ mod tests { ( pos.expect("Please add cursor position!"), - query.replace(CURSOR_POS, "REPLACED_TOKEN").to_string(), + query + .replace(QueryWithCursorPosition::cursor_marker(), "REPLACED_TOKEN") + .to_string(), ) } @@ -205,7 +208,7 @@ mod tests { r#" revoke {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = RevokeParser::get_context(query.as_str(), pos); @@ -228,7 +231,7 @@ mod tests { r#" revoke select on {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = RevokeParser::get_context(query.as_str(), pos); @@ -251,7 +254,7 @@ mod tests { r#" revoke select on public.{} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = RevokeParser::get_context(query.as_str(), pos); @@ -274,7 +277,7 @@ mod tests { r#" revoke select on public.users from {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = RevokeParser::get_context(query.as_str(), pos); @@ -297,7 +300,7 @@ mod tests { r#" revoke select on public.users from alice, {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = RevokeParser::get_context(query.as_str(), pos); @@ -320,7 +323,7 @@ mod tests { r#" revoke select on "MySchema"."MyTable" from {} "#, - CURSOR_POS + QueryWithCursorPosition::cursor_marker() )); let context = RevokeParser::get_context(query.as_str(), pos); diff --git a/crates/pgt_treesitter/src/lib.rs b/crates/pgt_treesitter/src/lib.rs new file mode 100644 index 00000000..6b19db53 --- /dev/null +++ b/crates/pgt_treesitter/src/lib.rs @@ -0,0 +1,5 @@ +pub mod context; +pub mod queries; + +pub use context::*; +pub use queries::*; diff --git a/crates/pgt_treesitter_queries/src/queries/insert_columns.rs b/crates/pgt_treesitter/src/queries/insert_columns.rs similarity index 97% rename from crates/pgt_treesitter_queries/src/queries/insert_columns.rs rename to crates/pgt_treesitter/src/queries/insert_columns.rs index 3e88d998..94d67b69 100644 --- a/crates/pgt_treesitter_queries/src/queries/insert_columns.rs +++ b/crates/pgt_treesitter/src/queries/insert_columns.rs @@ -1,6 +1,6 @@ use std::sync::LazyLock; -use crate::{Query, QueryResult}; +use crate::queries::{Query, QueryResult}; use super::QueryTryFrom; @@ -51,7 +51,7 @@ impl<'a> QueryTryFrom<'a> for InsertColumnMatch<'a> { } impl<'a> Query<'a> for InsertColumnMatch<'a> { - fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { let mut cursor = tree_sitter::QueryCursor::new(); let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); @@ -73,7 +73,7 @@ impl<'a> Query<'a> for InsertColumnMatch<'a> { #[cfg(test)] mod tests { use super::InsertColumnMatch; - use crate::TreeSitterQueriesExecutor; + use crate::queries::TreeSitterQueriesExecutor; #[test] fn finds_all_insert_columns() { diff --git a/crates/pgt_treesitter_queries/src/lib.rs b/crates/pgt_treesitter/src/queries/mod.rs similarity index 72% rename from crates/pgt_treesitter_queries/src/lib.rs rename to crates/pgt_treesitter/src/queries/mod.rs index 4bf71e74..1d24f07a 100644 --- a/crates/pgt_treesitter_queries/src/lib.rs +++ b/crates/pgt_treesitter/src/queries/mod.rs @@ -1,8 +1,91 @@ -pub mod queries; +mod insert_columns; +mod parameters; +mod relations; +mod select_columns; +mod table_aliases; +mod where_columns; use std::slice::Iter; -use queries::{Query, QueryResult}; +pub use insert_columns::*; +pub use parameters::*; +pub use relations::*; +pub use select_columns::*; +pub use table_aliases::*; +pub use where_columns::*; + +#[derive(Debug)] +pub enum QueryResult<'a> { + Relation(RelationMatch<'a>), + Parameter(ParameterMatch<'a>), + TableAliases(TableAliasMatch<'a>), + SelectClauseColumns(SelectColumnMatch<'a>), + InsertClauseColumns(InsertColumnMatch<'a>), + WhereClauseColumns(WhereColumnMatch<'a>), +} + +impl QueryResult<'_> { + pub fn within_range(&self, range: &tree_sitter::Range) -> bool { + match self { + QueryResult::Relation(rm) => { + let start = match rm.schema { + Some(s) => s.start_position(), + None => rm.table.start_position(), + }; + + let end = rm.table.end_position(); + + start >= range.start_point && end <= range.end_point + } + Self::Parameter(pm) => { + let node_range = pm.node.range(); + + node_range.start_point >= range.start_point + && node_range.end_point <= range.end_point + } + QueryResult::TableAliases(m) => { + let start = m.table.start_position(); + let end = m.alias.end_position(); + start >= range.start_point && end <= range.end_point + } + Self::SelectClauseColumns(cm) => { + let start = match cm.alias { + Some(n) => n.start_position(), + None => cm.column.start_position(), + }; + + let end = cm.column.end_position(); + + start >= range.start_point && end <= range.end_point + } + Self::WhereClauseColumns(cm) => { + let start = match cm.alias { + Some(n) => n.start_position(), + None => cm.column.start_position(), + }; + + let end = cm.column.end_position(); + + start >= range.start_point && end <= range.end_point + } + Self::InsertClauseColumns(cm) => { + let start = cm.column.start_position(); + let end = cm.column.end_position(); + start >= range.start_point && end <= range.end_point + } + } + } +} + +// This trait enforces that for any `Self` that implements `Query`, +// its &Self must implement TryFrom<&QueryResult> +pub(crate) trait QueryTryFrom<'a>: Sized { + type Ref: for<'any> TryFrom<&'a QueryResult<'a>, Error = String>; +} + +pub(crate) trait Query<'a>: QueryTryFrom<'a> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec>; +} pub struct TreeSitterQueriesExecutor<'a> { root_node: tree_sitter::Node<'a>, @@ -68,9 +151,8 @@ impl<'a> Iterator for QueryResultIter<'a> { #[cfg(test)] mod tests { - use crate::{ - TreeSitterQueriesExecutor, - queries::{ParameterMatch, RelationMatch, TableAliasMatch}, + use crate::queries::{ + ParameterMatch, RelationMatch, TableAliasMatch, TreeSitterQueriesExecutor, }; #[test] diff --git a/crates/pgt_treesitter_queries/src/queries/parameters.rs b/crates/pgt_treesitter/src/queries/parameters.rs similarity index 96% rename from crates/pgt_treesitter_queries/src/queries/parameters.rs rename to crates/pgt_treesitter/src/queries/parameters.rs index 85ea9ad2..0b7f2e3d 100644 --- a/crates/pgt_treesitter_queries/src/queries/parameters.rs +++ b/crates/pgt_treesitter/src/queries/parameters.rs @@ -1,6 +1,6 @@ use std::sync::LazyLock; -use crate::{Query, QueryResult}; +use crate::queries::{Query, QueryResult}; use super::QueryTryFrom; @@ -59,7 +59,7 @@ impl<'a> QueryTryFrom<'a> for ParameterMatch<'a> { } impl<'a> Query<'a> for ParameterMatch<'a> { - fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { let mut cursor = tree_sitter::QueryCursor::new(); let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); diff --git a/crates/pgt_treesitter_queries/src/queries/relations.rs b/crates/pgt_treesitter/src/queries/relations.rs similarity index 98% rename from crates/pgt_treesitter_queries/src/queries/relations.rs rename to crates/pgt_treesitter/src/queries/relations.rs index 2d7e4431..cb6a6bea 100644 --- a/crates/pgt_treesitter_queries/src/queries/relations.rs +++ b/crates/pgt_treesitter/src/queries/relations.rs @@ -1,6 +1,6 @@ use std::sync::LazyLock; -use crate::{Query, QueryResult}; +use crate::queries::{Query, QueryResult}; use super::QueryTryFrom; @@ -79,7 +79,7 @@ impl<'a> QueryTryFrom<'a> for RelationMatch<'a> { } impl<'a> Query<'a> for RelationMatch<'a> { - fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { let mut cursor = tree_sitter::QueryCursor::new(); let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); @@ -112,8 +112,9 @@ impl<'a> Query<'a> for RelationMatch<'a> { #[cfg(test)] mod tests { + use crate::queries::TreeSitterQueriesExecutor; + use super::RelationMatch; - use crate::TreeSitterQueriesExecutor; #[test] fn finds_table_without_schema() { diff --git a/crates/pgt_treesitter_queries/src/queries/select_columns.rs b/crates/pgt_treesitter/src/queries/select_columns.rs similarity index 97% rename from crates/pgt_treesitter_queries/src/queries/select_columns.rs rename to crates/pgt_treesitter/src/queries/select_columns.rs index 00b6977d..f232abc3 100644 --- a/crates/pgt_treesitter_queries/src/queries/select_columns.rs +++ b/crates/pgt_treesitter/src/queries/select_columns.rs @@ -1,6 +1,6 @@ use std::sync::LazyLock; -use crate::{Query, QueryResult}; +use crate::queries::{Query, QueryResult}; use super::QueryTryFrom; @@ -63,7 +63,7 @@ impl<'a> QueryTryFrom<'a> for SelectColumnMatch<'a> { } impl<'a> Query<'a> for SelectColumnMatch<'a> { - fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { let mut cursor = tree_sitter::QueryCursor::new(); let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); @@ -96,7 +96,7 @@ impl<'a> Query<'a> for SelectColumnMatch<'a> { #[cfg(test)] mod tests { - use crate::TreeSitterQueriesExecutor; + use crate::queries::TreeSitterQueriesExecutor; use super::SelectColumnMatch; diff --git a/crates/pgt_treesitter_queries/src/queries/table_aliases.rs b/crates/pgt_treesitter/src/queries/table_aliases.rs similarity index 97% rename from crates/pgt_treesitter_queries/src/queries/table_aliases.rs rename to crates/pgt_treesitter/src/queries/table_aliases.rs index 4297a218..70d4d52e 100644 --- a/crates/pgt_treesitter_queries/src/queries/table_aliases.rs +++ b/crates/pgt_treesitter/src/queries/table_aliases.rs @@ -1,6 +1,6 @@ use std::sync::LazyLock; -use crate::{Query, QueryResult}; +use crate::queries::{Query, QueryResult}; use super::QueryTryFrom; @@ -69,7 +69,7 @@ impl<'a> QueryTryFrom<'a> for TableAliasMatch<'a> { } impl<'a> Query<'a> for TableAliasMatch<'a> { - fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { let mut cursor = tree_sitter::QueryCursor::new(); let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); diff --git a/crates/pgt_treesitter_queries/src/queries/where_columns.rs b/crates/pgt_treesitter/src/queries/where_columns.rs similarity index 97% rename from crates/pgt_treesitter_queries/src/queries/where_columns.rs rename to crates/pgt_treesitter/src/queries/where_columns.rs index 8e19590d..b683300b 100644 --- a/crates/pgt_treesitter_queries/src/queries/where_columns.rs +++ b/crates/pgt_treesitter/src/queries/where_columns.rs @@ -1,6 +1,6 @@ use std::sync::LazyLock; -use crate::{Query, QueryResult}; +use crate::queries::{Query, QueryResult}; use super::QueryTryFrom; @@ -64,7 +64,7 @@ impl<'a> QueryTryFrom<'a> for WhereColumnMatch<'a> { } impl<'a> Query<'a> for WhereColumnMatch<'a> { - fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { + fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec> { let mut cursor = tree_sitter::QueryCursor::new(); let matches = cursor.matches(&TS_QUERY, root_node, stmt.as_bytes()); diff --git a/crates/pgt_treesitter_queries/src/queries/mod.rs b/crates/pgt_treesitter_queries/src/queries/mod.rs deleted file mode 100644 index b9f39aed..00000000 --- a/crates/pgt_treesitter_queries/src/queries/mod.rs +++ /dev/null @@ -1,86 +0,0 @@ -mod insert_columns; -mod parameters; -mod relations; -mod select_columns; -mod table_aliases; -mod where_columns; - -pub use insert_columns::*; -pub use parameters::*; -pub use relations::*; -pub use select_columns::*; -pub use table_aliases::*; -pub use where_columns::*; - -#[derive(Debug)] -pub enum QueryResult<'a> { - Relation(RelationMatch<'a>), - Parameter(ParameterMatch<'a>), - TableAliases(TableAliasMatch<'a>), - SelectClauseColumns(SelectColumnMatch<'a>), - InsertClauseColumns(InsertColumnMatch<'a>), - WhereClauseColumns(WhereColumnMatch<'a>), -} - -impl QueryResult<'_> { - pub fn within_range(&self, range: &tree_sitter::Range) -> bool { - match self { - QueryResult::Relation(rm) => { - let start = match rm.schema { - Some(s) => s.start_position(), - None => rm.table.start_position(), - }; - - let end = rm.table.end_position(); - - start >= range.start_point && end <= range.end_point - } - Self::Parameter(pm) => { - let node_range = pm.node.range(); - - node_range.start_point >= range.start_point - && node_range.end_point <= range.end_point - } - QueryResult::TableAliases(m) => { - let start = m.table.start_position(); - let end = m.alias.end_position(); - start >= range.start_point && end <= range.end_point - } - Self::SelectClauseColumns(cm) => { - let start = match cm.alias { - Some(n) => n.start_position(), - None => cm.column.start_position(), - }; - - let end = cm.column.end_position(); - - start >= range.start_point && end <= range.end_point - } - Self::WhereClauseColumns(cm) => { - let start = match cm.alias { - Some(n) => n.start_position(), - None => cm.column.start_position(), - }; - - let end = cm.column.end_position(); - - start >= range.start_point && end <= range.end_point - } - Self::InsertClauseColumns(cm) => { - let start = cm.column.start_position(); - let end = cm.column.end_position(); - start >= range.start_point && end <= range.end_point - } - } - } -} - -// This trait enforces that for any `Self` that implements `Query`, -// its &Self must implement TryFrom<&QueryResult> -pub(crate) trait QueryTryFrom<'a>: Sized { - type Ref: for<'any> TryFrom<&'a QueryResult<'a>, Error = String>; -} - -pub(crate) trait Query<'a>: QueryTryFrom<'a> { - fn execute(root_node: tree_sitter::Node<'a>, stmt: &'a str) -> Vec>; -} diff --git a/crates/pgt_typecheck/Cargo.toml b/crates/pgt_typecheck/Cargo.toml index caacc6d1..175ecd59 100644 --- a/crates/pgt_typecheck/Cargo.toml +++ b/crates/pgt_typecheck/Cargo.toml @@ -12,16 +12,16 @@ version = "0.0.0" [dependencies] -pgt_console.workspace = true -pgt_diagnostics.workspace = true -pgt_query_ext.workspace = true -pgt_schema_cache.workspace = true -pgt_text_size.workspace = true -pgt_treesitter_queries.workspace = true -sqlx.workspace = true -tokio.workspace = true -tree-sitter.workspace = true -tree_sitter_sql.workspace = true +pgt_console.workspace = true +pgt_diagnostics.workspace = true +pgt_query_ext.workspace = true +pgt_schema_cache.workspace = true +pgt_text_size.workspace = true +pgt_treesitter.workspace = true +sqlx.workspace = true +tokio.workspace = true +tree-sitter.workspace = true +tree_sitter_sql.workspace = true [dev-dependencies] insta.workspace = true diff --git a/crates/pgt_typecheck/src/typed_identifier.rs b/crates/pgt_typecheck/src/typed_identifier.rs index 710b2fe9..1ee4095d 100644 --- a/crates/pgt_typecheck/src/typed_identifier.rs +++ b/crates/pgt_typecheck/src/typed_identifier.rs @@ -1,5 +1,5 @@ use pgt_schema_cache::PostgresType; -use pgt_treesitter_queries::{TreeSitterQueriesExecutor, queries::ParameterMatch}; +use pgt_treesitter::queries::{ParameterMatch, TreeSitterQueriesExecutor}; /// A typed identifier is a parameter that has a type associated with it. /// It is used to replace parameters within the SQL string. diff --git a/crates/pgt_workspace/src/features/completions.rs b/crates/pgt_workspace/src/features/completions.rs index c6f05c6e..a41dd06e 100644 --- a/crates/pgt_workspace/src/features/completions.rs +++ b/crates/pgt_workspace/src/features/completions.rs @@ -82,17 +82,17 @@ mod tests { use super::get_statement_for_completions; - static CURSOR_POSITION: &str = "€"; + use pgt_test_utils::QueryWithCursorPosition; fn get_doc_and_pos(sql: &str) -> (Document, TextSize) { let pos = sql - .find(CURSOR_POSITION) + .find(QueryWithCursorPosition::cursor_marker()) .expect("Please add cursor position to test sql"); let pos: u32 = pos.try_into().unwrap(); ( - Document::new(sql.replace(CURSOR_POSITION, ""), 5), + Document::new(sql.replace(QueryWithCursorPosition::cursor_marker(), ""), 5), TextSize::new(pos), ) } @@ -107,7 +107,7 @@ mod tests { select 1; "#, - CURSOR_POSITION + QueryWithCursorPosition::cursor_marker() ); let (doc, position) = get_doc_and_pos(sql.as_str()); @@ -120,7 +120,7 @@ mod tests { #[test] fn does_not_break_when_no_statements_exist() { - let sql = CURSOR_POSITION.to_string(); + let sql = QueryWithCursorPosition::cursor_marker().to_string(); let (doc, position) = get_doc_and_pos(sql.as_str()); @@ -129,7 +129,10 @@ mod tests { #[test] fn does_not_return_overlapping_statements_if_too_close() { - let sql = format!("select * from {}select 1;", CURSOR_POSITION); + let sql = format!( + "select * from {}select 1;", + QueryWithCursorPosition::cursor_marker() + ); let (doc, position) = get_doc_and_pos(sql.as_str()); @@ -141,7 +144,10 @@ mod tests { #[test] fn is_fine_with_spaces() { - let sql = format!("select * from {} ;", CURSOR_POSITION); + let sql = format!( + "select * from {} ;", + QueryWithCursorPosition::cursor_marker() + ); let (doc, position) = get_doc_and_pos(sql.as_str()); @@ -153,7 +159,7 @@ mod tests { #[test] fn considers_offset() { - let sql = format!("select * from {}", CURSOR_POSITION); + let sql = format!("select * from {}", QueryWithCursorPosition::cursor_marker()); let (doc, position) = get_doc_and_pos(sql.as_str()); @@ -174,7 +180,7 @@ mod tests { select {} from cool; $$; "#, - CURSOR_POSITION + QueryWithCursorPosition::cursor_marker() ); let sql = sql.trim(); @@ -189,7 +195,10 @@ mod tests { #[test] fn does_not_consider_too_far_offset() { - let sql = format!("select * from {}", CURSOR_POSITION); + let sql = format!( + "select * from {}", + QueryWithCursorPosition::cursor_marker() + ); let (doc, position) = get_doc_and_pos(sql.as_str()); @@ -198,7 +207,10 @@ mod tests { #[test] fn does_not_consider_offset_if_statement_terminated_by_semi() { - let sql = format!("select * from users;{}", CURSOR_POSITION); + let sql = format!( + "select * from users;{}", + QueryWithCursorPosition::cursor_marker() + ); let (doc, position) = get_doc_and_pos(sql.as_str()); From e745328cbca0022fdaa25cda4ed8f66472b1f168 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Tue, 22 Jul 2025 19:03:46 +0200 Subject: [PATCH 110/114] chore: add custom libpg_query binding (#465) brings home the custom binding that was experimented in pg_parse. no functional changes, just different imports and a bit of cleanup. also removes the libpg_query submodule - we are now cloning the repo "live" in the build script. closes #453 --- .gitmodules | 4 - Cargo.lock | 277 +- Cargo.toml | 5 +- crates/pgt_analyse/Cargo.toml | 2 +- .../pgt_analyse/src/analysed_file_context.rs | 2 +- crates/pgt_analyse/src/context.rs | 6 +- crates/pgt_analyse/src/registry.rs | 2 +- crates/pgt_analyser/Cargo.toml | 2 +- crates/pgt_analyser/src/lib.rs | 9 +- .../src/lint/safety/adding_required_field.rs | 7 +- .../src/lint/safety/ban_drop_column.rs | 6 +- .../src/lint/safety/ban_drop_database.rs | 2 +- .../src/lint/safety/ban_drop_not_null.rs | 6 +- .../src/lint/safety/ban_drop_table.rs | 4 +- .../src/lint/safety/ban_truncate_cascade.rs | 4 +- crates/pgt_analyser/tests/rules_tests.rs | 4 +- crates/pgt_query/Cargo.toml | 36 + crates/pgt_query/build.rs | 260 + crates/pgt_query/examples/api_example.rs | 42 + crates/pgt_query/src/deparse.rs | 93 + crates/pgt_query/src/error.rs | 23 + crates/pgt_query/src/fingerprint.rs | 359 + crates/pgt_query/src/iter_mut.rs | 1 + crates/pgt_query/src/iter_ref.rs | 1 + crates/pgt_query/src/lib.rs | 91 + crates/pgt_query/src/node_enum.rs | 33 + crates/pgt_query/src/node_mut.rs | 26 + crates/pgt_query/src/node_ref.rs | 26 + crates/pgt_query/src/node_structs.rs | 16 + crates/pgt_query/src/normalize.rs | 136 + crates/pgt_query/src/parse.rs | 149 + crates/pgt_query/src/plpgsql.rs | 38 + crates/pgt_query/src/protobuf.rs | 8846 +++++++++++++++++ crates/pgt_query/src/scan.rs | 33 + crates/pgt_query/src/split.rs | 86 + crates/pgt_query_ext/Cargo.toml | 4 +- crates/pgt_query_ext/src/diagnostics.rs | 4 +- crates/pgt_query_ext/src/lib.rs | 61 - .../Cargo.toml | 16 +- crates/pgt_query_macros/build.rs | 59 + .../pgt_query_macros/postgres/17-6.1.0.proto | 4110 ++++++++ crates/pgt_query_macros/src/iter_mut.rs | 142 + crates/pgt_query_macros/src/iter_ref.rs | 105 + crates/pgt_query_macros/src/lib.rs | 106 + crates/pgt_query_macros/src/node_enum.rs | 44 + crates/pgt_query_macros/src/node_mut.rs | 50 + crates/pgt_query_macros/src/node_ref.rs | 46 + crates/pgt_query_macros/src/node_structs.rs | 30 + crates/pgt_query_macros/src/proto_analyser.rs | 252 + crates/pgt_query_proto_parser/src/lib.rs | 9 - .../pgt_query_proto_parser/src/proto_file.rs | 60 - .../src/proto_parser.rs | 179 - crates/pgt_statement_splitter/Cargo.toml | 2 +- crates/pgt_type_resolver/Cargo.toml | 2 +- crates/pgt_type_resolver/src/functions.rs | 4 +- crates/pgt_type_resolver/src/types.rs | 14 +- crates/pgt_type_resolver/src/util.rs | 4 +- crates/pgt_typecheck/Cargo.toml | 2 +- crates/pgt_typecheck/src/lib.rs | 12 +- crates/pgt_typecheck/tests/diagnostics.rs | 5 +- crates/pgt_workspace/Cargo.toml | 1 + .../src/workspace/server/document.rs | 9 +- .../src/workspace/server/function_utils.rs | 14 +- .../src/workspace/server/pg_query.rs | 18 +- .../src/workspace/server/sql_function.rs | 14 +- docs/codegen/Cargo.toml | 1 + docs/codegen/src/rules_docs.rs | 36 +- libpg_query | 1 - xtask/rules_check/Cargo.toml | 1 + xtask/rules_check/src/lib.rs | 36 +- 70 files changed, 15556 insertions(+), 534 deletions(-) create mode 100644 crates/pgt_query/Cargo.toml create mode 100644 crates/pgt_query/build.rs create mode 100644 crates/pgt_query/examples/api_example.rs create mode 100644 crates/pgt_query/src/deparse.rs create mode 100644 crates/pgt_query/src/error.rs create mode 100644 crates/pgt_query/src/fingerprint.rs create mode 100644 crates/pgt_query/src/iter_mut.rs create mode 100644 crates/pgt_query/src/iter_ref.rs create mode 100644 crates/pgt_query/src/lib.rs create mode 100644 crates/pgt_query/src/node_enum.rs create mode 100644 crates/pgt_query/src/node_mut.rs create mode 100644 crates/pgt_query/src/node_ref.rs create mode 100644 crates/pgt_query/src/node_structs.rs create mode 100644 crates/pgt_query/src/normalize.rs create mode 100644 crates/pgt_query/src/parse.rs create mode 100644 crates/pgt_query/src/plpgsql.rs create mode 100644 crates/pgt_query/src/protobuf.rs create mode 100644 crates/pgt_query/src/scan.rs create mode 100644 crates/pgt_query/src/split.rs rename crates/{pgt_query_proto_parser => pgt_query_macros}/Cargo.toml (51%) create mode 100644 crates/pgt_query_macros/build.rs create mode 100644 crates/pgt_query_macros/postgres/17-6.1.0.proto create mode 100644 crates/pgt_query_macros/src/iter_mut.rs create mode 100644 crates/pgt_query_macros/src/iter_ref.rs create mode 100644 crates/pgt_query_macros/src/lib.rs create mode 100644 crates/pgt_query_macros/src/node_enum.rs create mode 100644 crates/pgt_query_macros/src/node_mut.rs create mode 100644 crates/pgt_query_macros/src/node_ref.rs create mode 100644 crates/pgt_query_macros/src/node_structs.rs create mode 100644 crates/pgt_query_macros/src/proto_analyser.rs delete mode 100644 crates/pgt_query_proto_parser/src/lib.rs delete mode 100644 crates/pgt_query_proto_parser/src/proto_file.rs delete mode 100644 crates/pgt_query_proto_parser/src/proto_parser.rs delete mode 160000 libpg_query diff --git a/.gitmodules b/.gitmodules index 4b56d748..9b62ce88 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,7 +1,3 @@ -[submodule "libpg_query"] - path = libpg_query - url = https://github.com/pganalyze/libpg_query.git - branch = 17-latest [submodule "crates/tree_sitter_sql/tree-sitter-sql"] path = lib/tree_sitter_sql/tree-sitter-sql url = https://github.com/DerekStride/tree-sitter-sql diff --git a/Cargo.lock b/Cargo.lock index 1bf796b7..d76baca3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,7 +24,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -106,6 +106,18 @@ version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + [[package]] name = "assert_cmd" version = "2.0.16" @@ -333,6 +345,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + [[package]] name = "base64" version = "0.22.1" @@ -353,25 +371,22 @@ checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" [[package]] name = "bindgen" -version = "0.66.1" +version = "0.72.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" +checksum = "4f72209734318d0b619a5e0f5129918b848c416e122a3c4ce054e03cb87b726f" dependencies = [ "bitflags 2.6.0", "cexpr", "clang-sys", - "lazy_static", - "lazycell", + "itertools 0.10.5", "log", - "peeking_take_while", "prettyplease", "proc-macro2", "quote", "regex", - "rustc-hash 1.1.0", + "rustc-hash 2.1.0", "shlex", "syn 2.0.90", - "which", ] [[package]] @@ -683,6 +698,17 @@ dependencies = [ "serde", ] +[[package]] +name = "blake2b_simd" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -884,6 +910,15 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +[[package]] +name = "clippy" +version = "0.0.302" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d911ee15579a3f50880d8c1d59ef6e79f9533127a3bd342462f5d584f5e8c294" +dependencies = [ + "term", +] + [[package]] name = "colorchoice" version = "1.0.3" @@ -917,6 +952,12 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + [[package]] name = "convert_case" version = "0.6.0" @@ -1204,6 +1245,17 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "dirs" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" +dependencies = [ + "libc", + "redox_users 0.3.5", + "winapi", +] + [[package]] name = "dirs-sys" version = "0.4.1" @@ -1212,7 +1264,7 @@ checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", "option-ext", - "redox_users", + "redox_users 0.4.6", "windows-sys 0.48.0", ] @@ -1247,6 +1299,7 @@ dependencies = [ "pgt_console", "pgt_diagnostics", "pgt_flags", + "pgt_query", "pgt_query_ext", "pgt_statement_splitter", "pgt_workspace", @@ -1287,6 +1340,12 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" +[[package]] +name = "easy-parallel" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2afbb9b0aef60e4f0d2b18129b6c0dff035a6f7dbbd17c2f38c1432102ee223c" + [[package]] name = "either" version = "1.13.0" @@ -1641,6 +1700,17 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + [[package]] name = "getrandom" version = "0.2.15" @@ -1649,7 +1719,7 @@ checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -2128,12 +2198,6 @@ dependencies = [ "spin", ] -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" version = "0.2.168" @@ -2373,7 +2437,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -2605,7 +2669,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.7", "smallvec", "windows-targets 0.52.6", ] @@ -2634,12 +2698,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -2665,24 +2723,6 @@ dependencies = [ "indexmap 2.7.0", ] -[[package]] -name = "pg_query" -version = "6.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f71c7c56dfe299ec6f98aa210aa23458be3b0610c485be60a5873c2f3627c40e" -dependencies = [ - "bindgen", - "cc", - "fs_extra", - "glob", - "itertools 0.10.5", - "prost", - "prost-build", - "serde", - "serde_json", - "thiserror 1.0.69", -] - [[package]] name = "pgt_analyse" version = "0.0.0" @@ -2692,7 +2732,7 @@ dependencies = [ "enumflags2", "pgt_console", "pgt_diagnostics", - "pgt_query_ext", + "pgt_query", "pgt_schema_cache", "pgt_text_size", "rustc-hash 2.1.0", @@ -2708,7 +2748,7 @@ dependencies = [ "pgt_analyse", "pgt_console", "pgt_diagnostics", - "pgt_query_ext", + "pgt_query", "pgt_schema_cache", "pgt_test_macros", "pgt_text_size", @@ -2936,23 +2976,42 @@ dependencies = [ "quote", ] +[[package]] +name = "pgt_query" +version = "0.0.0" +dependencies = [ + "bindgen", + "cc", + "clippy", + "easy-parallel", + "fs_extra", + "glob", + "pgt_query_macros", + "prost", + "prost-build", + "thiserror 1.0.69", + "which", +] + [[package]] name = "pgt_query_ext" version = "0.0.0" dependencies = [ - "petgraph", - "pg_query", "pgt_diagnostics", + "pgt_query", "pgt_text_size", ] [[package]] -name = "pgt_query_proto_parser" +name = "pgt_query_macros" version = "0.0.0" dependencies = [ "convert_case", - "protobuf", - "protobuf-parse", + "proc-macro2", + "prost-reflect", + "protox", + "quote", + "ureq", ] [[package]] @@ -2980,7 +3039,7 @@ dependencies = [ "ntest", "pgt_diagnostics", "pgt_lexer", - "pgt_query_ext", + "pgt_query", "pgt_text_size", "regex", ] @@ -3062,7 +3121,7 @@ dependencies = [ name = "pgt_type_resolver" version = "0.0.0" dependencies = [ - "pgt_query_ext", + "pgt_query", "pgt_schema_cache", ] @@ -3073,7 +3132,7 @@ dependencies = [ "insta", "pgt_console", "pgt_diagnostics", - "pgt_query_ext", + "pgt_query", "pgt_schema_cache", "pgt_test_utils", "pgt_text_size", @@ -3104,6 +3163,7 @@ dependencies = [ "pgt_diagnostics", "pgt_fs", "pgt_lexer", + "pgt_query", "pgt_query_ext", "pgt_schema_cache", "pgt_statement_splitter", @@ -3425,42 +3485,6 @@ dependencies = [ "prost", ] -[[package]] -name = "protobuf" -version = "3.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3a7c64d9bf75b1b8d981124c14c179074e8caa7dfe7b6a12e6222ddcd0c8f72" -dependencies = [ - "once_cell", - "protobuf-support", - "thiserror 1.0.69", -] - -[[package]] -name = "protobuf-parse" -version = "3.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322330e133eab455718444b4e033ebfac7c6528972c784fcde28d2cc783c6257" -dependencies = [ - "anyhow", - "indexmap 2.7.0", - "log", - "protobuf", - "protobuf-support", - "tempfile", - "thiserror 1.0.69", - "which", -] - -[[package]] -name = "protobuf-support" -version = "3.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b088fd20b938a875ea00843b6faf48579462630015c3788d397ad6a786663252" -dependencies = [ - "thiserror 1.0.69", -] - [[package]] name = "protox" version = "0.8.0" @@ -3567,7 +3591,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -3590,6 +3614,12 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + [[package]] name = "redox_syscall" version = "0.5.7" @@ -3599,13 +3629,24 @@ dependencies = [ "bitflags 2.6.0", ] +[[package]] +name = "redox_users" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" +dependencies = [ + "getrandom 0.1.16", + "redox_syscall 0.1.57", + "rust-argon2", +] + [[package]] name = "redox_users" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom", + "getrandom 0.2.15", "libredox", "thiserror 1.0.69", ] @@ -3662,7 +3703,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "untrusted", "windows-sys 0.52.0", @@ -3697,12 +3738,25 @@ dependencies = [ "pgt_analyser", "pgt_console", "pgt_diagnostics", + "pgt_query", "pgt_query_ext", "pgt_statement_splitter", "pgt_workspace", "pulldown-cmark", ] +[[package]] +name = "rust-argon2" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" +dependencies = [ + "base64 0.13.1", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", +] + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -4197,7 +4251,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a" dependencies = [ "atoi", - "base64", + "base64 0.22.1", "bitflags 2.6.0", "byteorder", "bytes", @@ -4239,7 +4293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8" dependencies = [ "atoi", - "base64", + "base64 0.22.1", "bitflags 2.6.0", "byteorder", "crc", @@ -4415,12 +4469,23 @@ checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand 2.3.0", - "getrandom", + "getrandom 0.2.15", "once_cell", "rustix 0.38.42", "windows-sys 0.59.0", ] +[[package]] +name = "term" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd106a334b7657c10b7c540a0106114feadeb4dc314513e97df481d5d966f42" +dependencies = [ + "byteorder", + "dirs", + "winapi", +] + [[package]] name = "termcolor" version = "1.4.1" @@ -4991,7 +5056,7 @@ version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" dependencies = [ - "base64", + "base64 0.22.1", "flate2", "log", "once_cell", @@ -5037,7 +5102,7 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -5109,6 +5174,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -5218,14 +5289,14 @@ dependencies = [ [[package]] name = "which" -version = "4.4.2" +version = "6.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +checksum = "b4ee928febd44d98f2f459a4a79bd4d928591333a494a10a868418ac1b39cf1f" dependencies = [ "either", "home", - "once_cell", "rustix 0.38.42", + "winsafe", ] [[package]] @@ -5234,7 +5305,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall", + "redox_syscall 0.5.7", "wasite", ] @@ -5426,6 +5497,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "winsafe" +version = "0.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" + [[package]] name = "write-json" version = "0.1.4" diff --git a/Cargo.toml b/Cargo.toml index 23e21889..e243ab3e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,12 +44,14 @@ smallvec = { version = "1.13.2", features = ["union", "const_new strum = { version = "0.27.1", features = ["derive"] } # this will use tokio if available, otherwise async-std convert_case = "0.6.0" +prost = "0.13.5" prost-reflect = "0.15.3" protox = "0.8.0" sqlx = { version = "0.8.2", features = ["runtime-tokio", "runtime-async-std", "postgres", "json"] } syn = { version = "1.0.109", features = ["full"] } termcolor = "1.4.1" test-log = "0.2.17" +thiserror = "1.0.31" tokio = { version = "1.40.0", features = ["full"] } tracing = { version = "0.1.40", default-features = false, features = ["std"] } tracing-bunyan-formatter = { version = "0.3.10 " } @@ -74,8 +76,9 @@ pgt_lexer = { path = "./crates/pgt_lexer", version = "0.0.0" } pgt_lexer_codegen = { path = "./crates/pgt_lexer_codegen", version = "0.0.0" } pgt_lsp = { path = "./crates/pgt_lsp", version = "0.0.0" } pgt_markup = { path = "./crates/pgt_markup", version = "0.0.0" } +pgt_query = { path = "./crates/pgt_query", version = "0.0.0" } pgt_query_ext = { path = "./crates/pgt_query_ext", version = "0.0.0" } -pgt_query_proto_parser = { path = "./crates/pgt_query_proto_parser", version = "0.0.0" } +pgt_query_macros = { path = "./crates/pgt_query_macros", version = "0.0.0" } pgt_schema_cache = { path = "./crates/pgt_schema_cache", version = "0.0.0" } pgt_statement_splitter = { path = "./crates/pgt_statement_splitter", version = "0.0.0" } pgt_suppressions = { path = "./crates/pgt_suppressions", version = "0.0.0" } diff --git a/crates/pgt_analyse/Cargo.toml b/crates/pgt_analyse/Cargo.toml index 4d30784c..3da60034 100644 --- a/crates/pgt_analyse/Cargo.toml +++ b/crates/pgt_analyse/Cargo.toml @@ -15,7 +15,7 @@ version = "0.0.0" [dependencies] pgt_console.workspace = true pgt_diagnostics.workspace = true -pgt_query_ext.workspace = true +pgt_query.workspace = true pgt_schema_cache.workspace = true rustc-hash = { workspace = true } diff --git a/crates/pgt_analyse/src/analysed_file_context.rs b/crates/pgt_analyse/src/analysed_file_context.rs index cba53eeb..82dc4071 100644 --- a/crates/pgt_analyse/src/analysed_file_context.rs +++ b/crates/pgt_analyse/src/analysed_file_context.rs @@ -3,5 +3,5 @@ pub struct AnalysedFileContext {} impl AnalysedFileContext { #[allow(unused)] - pub fn update_from(&mut self, stmt_root: &pgt_query_ext::NodeEnum) {} + pub fn update_from(&mut self, stmt_root: &pgt_query::NodeEnum) {} } diff --git a/crates/pgt_analyse/src/context.rs b/crates/pgt_analyse/src/context.rs index 7447c0bb..ddd5d28d 100644 --- a/crates/pgt_analyse/src/context.rs +++ b/crates/pgt_analyse/src/context.rs @@ -7,7 +7,7 @@ use crate::{ }; pub struct RuleContext<'a, R: Rule> { - stmt: &'a pgt_query_ext::NodeEnum, + stmt: &'a pgt_query::NodeEnum, options: &'a R::Options, schema_cache: Option<&'a SchemaCache>, file_context: &'a AnalysedFileContext, @@ -19,7 +19,7 @@ where { #[allow(clippy::too_many_arguments)] pub fn new( - stmt: &'a pgt_query_ext::NodeEnum, + stmt: &'a pgt_query::NodeEnum, options: &'a R::Options, schema_cache: Option<&'a SchemaCache>, file_context: &'a AnalysedFileContext, @@ -43,7 +43,7 @@ where } /// Returns the AST root - pub fn stmt(&self) -> &pgt_query_ext::NodeEnum { + pub fn stmt(&self) -> &pgt_query::NodeEnum { self.stmt } diff --git a/crates/pgt_analyse/src/registry.rs b/crates/pgt_analyse/src/registry.rs index d43d7711..45d2c202 100644 --- a/crates/pgt_analyse/src/registry.rs +++ b/crates/pgt_analyse/src/registry.rs @@ -157,7 +157,7 @@ impl RuleRegistry { } pub struct RegistryRuleParams<'a> { - pub root: &'a pgt_query_ext::NodeEnum, + pub root: &'a pgt_query::NodeEnum, pub options: &'a AnalyserOptions, pub analysed_file_context: &'a AnalysedFileContext, pub schema_cache: Option<&'a pgt_schema_cache::SchemaCache>, diff --git a/crates/pgt_analyser/Cargo.toml b/crates/pgt_analyser/Cargo.toml index 5f65b978..0cf7a334 100644 --- a/crates/pgt_analyser/Cargo.toml +++ b/crates/pgt_analyser/Cargo.toml @@ -15,7 +15,7 @@ version = "0.0.0" pgt_analyse = { workspace = true } pgt_console = { workspace = true } pgt_diagnostics = { workspace = true } -pgt_query_ext = { workspace = true } +pgt_query = { workspace = true } pgt_schema_cache = { workspace = true } pgt_text_size = { workspace = true } serde = { workspace = true } diff --git a/crates/pgt_analyser/src/lib.rs b/crates/pgt_analyser/src/lib.rs index f96b6f6d..ccdc0420 100644 --- a/crates/pgt_analyser/src/lib.rs +++ b/crates/pgt_analyser/src/lib.rs @@ -32,7 +32,7 @@ pub struct Analyser<'a> { #[derive(Debug)] pub struct AnalysableStatement { - pub root: pgt_query_ext::NodeEnum, + pub root: pgt_query::NodeEnum, pub range: pgt_text_size::TextRange, } @@ -123,7 +123,7 @@ mod tests { ..Default::default() }; - let ast = pgt_query_ext::parse(SQL).expect("failed to parse SQL"); + let ast = pgt_query::parse(SQL).expect("failed to parse SQL"); let range = TextRange::new(0.into(), u32::try_from(SQL.len()).unwrap().into()); let options = AnalyserOptions::default(); @@ -134,7 +134,10 @@ mod tests { }); let results = analyser.run(crate::AnalyserParams { - stmts: vec![AnalysableStatement { root: ast, range }], + stmts: vec![AnalysableStatement { + root: ast.into_root().unwrap(), + range, + }], schema_cache: None, }); diff --git a/crates/pgt_analyser/src/lint/safety/adding_required_field.rs b/crates/pgt_analyser/src/lint/safety/adding_required_field.rs index 06901952..d853d30a 100644 --- a/crates/pgt_analyser/src/lint/safety/adding_required_field.rs +++ b/crates/pgt_analyser/src/lint/safety/adding_required_field.rs @@ -30,7 +30,7 @@ impl Rule for AddingRequiredField { fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = vec![]; - if let pgt_query_ext::NodeEnum::AlterTableStmt(stmt) = ctx.stmt() { + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = ctx.stmt() { // We are currently lacking a way to check if a `AtAddColumn` subtype sets a // not null constraint – so we'll need to check the plain SQL. let plain_sql = ctx.stmt().to_ref().deparse().unwrap().to_ascii_lowercase(); @@ -41,9 +41,8 @@ impl Rule for AddingRequiredField { } for cmd in &stmt.cmds { - if let Some(pgt_query_ext::NodeEnum::AlterTableCmd(alter_table_cmd)) = &cmd.node { - if alter_table_cmd.subtype() - == pgt_query_ext::protobuf::AlterTableType::AtAddColumn + if let Some(pgt_query::NodeEnum::AlterTableCmd(alter_table_cmd)) = &cmd.node { + if alter_table_cmd.subtype() == pgt_query::protobuf::AlterTableType::AtAddColumn { diagnostics.push( RuleDiagnostic::new( diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs index 165d4230..d73b39d2 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs @@ -32,10 +32,10 @@ impl Rule for BanDropColumn { fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = Vec::new(); - if let pgt_query_ext::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { for cmd in &stmt.cmds { - if let Some(pgt_query_ext::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { - if cmd.subtype() == pgt_query_ext::protobuf::AlterTableType::AtDropColumn { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtDropColumn { diagnostics.push(RuleDiagnostic::new( rule_category!(), None, diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs index 11d07da9..3011cf88 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs @@ -21,7 +21,7 @@ impl Rule for BanDropDatabase { fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = vec![]; - if let pgt_query_ext::NodeEnum::DropdbStmt(_) = &ctx.stmt() { + if let pgt_query::NodeEnum::DropdbStmt(_) = &ctx.stmt() { diagnostics.push( RuleDiagnostic::new( rule_category!(), diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs index fa4c9011..c1e69461 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs @@ -32,10 +32,10 @@ impl Rule for BanDropNotNull { fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = Vec::new(); - if let pgt_query_ext::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { + if let pgt_query::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { for cmd in &stmt.cmds { - if let Some(pgt_query_ext::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { - if cmd.subtype() == pgt_query_ext::protobuf::AlterTableType::AtDropNotNull { + if let Some(pgt_query::NodeEnum::AlterTableCmd(cmd)) = &cmd.node { + if cmd.subtype() == pgt_query::protobuf::AlterTableType::AtDropNotNull { diagnostics.push(RuleDiagnostic::new( rule_category!(), None, diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs index 90c08514..bcf78453 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs @@ -31,8 +31,8 @@ impl Rule for BanDropTable { fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = vec![]; - if let pgt_query_ext::NodeEnum::DropStmt(stmt) = &ctx.stmt() { - if stmt.remove_type() == pgt_query_ext::protobuf::ObjectType::ObjectTable { + if let pgt_query::NodeEnum::DropStmt(stmt) = &ctx.stmt() { + if stmt.remove_type() == pgt_query::protobuf::ObjectType::ObjectTable { diagnostics.push( RuleDiagnostic::new( rule_category!(), diff --git a/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs b/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs index cef5cd47..1bc42d49 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs @@ -1,7 +1,7 @@ use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; use pgt_diagnostics::Severity; -use pgt_query_ext::protobuf::DropBehavior; +use pgt_query::protobuf::DropBehavior; declare_lint_rule! { /// Using `TRUNCATE`'s `CASCADE` option will truncate any tables that are also foreign-keyed to the specified tables. @@ -34,7 +34,7 @@ impl Rule for BanTruncateCascade { fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = Vec::new(); - if let pgt_query_ext::NodeEnum::TruncateStmt(stmt) = &ctx.stmt() { + if let pgt_query::NodeEnum::TruncateStmt(stmt) = &ctx.stmt() { if stmt.behavior() == DropBehavior::DropCascade { diagnostics.push(RuleDiagnostic::new( rule_category!(), diff --git a/crates/pgt_analyser/tests/rules_tests.rs b/crates/pgt_analyser/tests/rules_tests.rs index 0a6b47ec..d8e5b0ef 100644 --- a/crates/pgt_analyser/tests/rules_tests.rs +++ b/crates/pgt_analyser/tests/rules_tests.rs @@ -25,7 +25,7 @@ fn rule_test(full_path: &'static str, _: &str, _: &str) { let query = read_to_string(full_path).unwrap_or_else(|_| panic!("Failed to read file: {} ", full_path)); - let ast = pgt_query_ext::parse(&query).expect("failed to parse SQL"); + let ast = pgt_query::parse(&query).expect("failed to parse SQL"); let options = AnalyserOptions::default(); let analyser = Analyser::new(AnalyserConfig { options: &options, @@ -33,7 +33,7 @@ fn rule_test(full_path: &'static str, _: &str, _: &str) { }); let stmt = AnalysableStatement { - root: ast, + root: ast.into_root().expect("Failed to convert AST to root node"), range: pgt_text_size::TextRange::new(0.into(), u32::try_from(query.len()).unwrap().into()), }; diff --git a/crates/pgt_query/Cargo.toml b/crates/pgt_query/Cargo.toml new file mode 100644 index 00000000..881b1b80 --- /dev/null +++ b/crates/pgt_query/Cargo.toml @@ -0,0 +1,36 @@ +[package] +authors.workspace = true +categories.workspace = true +description = "" +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +name = "pgt_query" +repository.workspace = true +version = "0.0.0" + +[dependencies] +prost = { workspace = true } +thiserror = { workspace = true } + +pgt_query_macros = { workspace = true } + + +[features] +default = ["postgres-17"] +postgres-15 = [] +postgres-16 = [] +postgres-17 = [] + +[build-dependencies] +bindgen = "0.72.0" +cc = "1.0.83" +clippy = { version = "0.0.302", optional = true } +fs_extra = "1.2.0" +glob = "0.3.1" +prost-build = "0.13.5" +which = "6.0.0" + +[dev-dependencies] +easy-parallel = "3.2.0" diff --git a/crates/pgt_query/build.rs b/crates/pgt_query/build.rs new file mode 100644 index 00000000..292b3af2 --- /dev/null +++ b/crates/pgt_query/build.rs @@ -0,0 +1,260 @@ +#![cfg_attr(feature = "clippy", feature(plugin))] +#![cfg_attr(feature = "clippy", plugin(clippy))] + +use fs_extra::dir::CopyOptions; +use glob::glob; +use std::env; +use std::path::PathBuf; +use std::process::Command; + +static LIBRARY_NAME: &str = "pg_query"; +static LIBPG_QUERY_REPO: &str = "https://github.com/pganalyze/libpg_query.git"; +fn get_libpg_query_tag() -> &'static str { + #[cfg(feature = "postgres-15")] + return "15-5.3.0"; + #[cfg(feature = "postgres-16")] + return "16-6.1.0"; + #[cfg(feature = "postgres-17")] + return "17-6.1.0"; +} + +fn main() -> Result<(), Box> { + let libpg_query_tag = get_libpg_query_tag(); + let out_dir = PathBuf::from(env::var("OUT_DIR")?); + let vendor_dir = out_dir.join("vendor"); + let libpg_query_dir = vendor_dir.join("libpg_query").join(libpg_query_tag); + let stamp_file = libpg_query_dir.join(".stamp"); + + let src_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?).join("src"); + let target = env::var("TARGET").unwrap(); + let is_emscripten = target.contains("emscripten"); + + // Configure cargo through stdout + println!("cargo:rustc-link-search=native={}", out_dir.display()); + println!("cargo:rustc-link-lib=static={LIBRARY_NAME}"); + + // Clone libpg_query if not already present + if !stamp_file.exists() { + println!("cargo:warning=Cloning libpg_query {}", libpg_query_tag); + + // Create vendor directory + std::fs::create_dir_all(&vendor_dir)?; + + // Clone the repository with partial clone for faster download + let status = Command::new("git") + .args([ + "clone", + "--filter=blob:none", + "--depth", + "1", + "--branch", + libpg_query_tag, + LIBPG_QUERY_REPO, + libpg_query_dir.to_str().unwrap(), + ]) + .status()?; + + if !status.success() { + return Err("Failed to clone libpg_query".into()); + } + + // Create stamp file + std::fs::File::create(&stamp_file)?; + } + + // Tell cargo to rerun if the stamp file is deleted + println!("cargo:rerun-if-changed={}", stamp_file.display()); + + // Copy necessary files to OUT_DIR for compilation + let out_header_path = out_dir.join(LIBRARY_NAME).with_extension("h"); + let out_protobuf_path = out_dir.join("protobuf"); + + let source_paths = vec![ + libpg_query_dir.join(LIBRARY_NAME).with_extension("h"), + libpg_query_dir.join("Makefile"), + libpg_query_dir.join("src"), + libpg_query_dir.join("protobuf"), + libpg_query_dir.join("vendor"), + ]; + + let copy_options = CopyOptions { + overwrite: true, + ..CopyOptions::default() + }; + + fs_extra::copy_items(&source_paths, &out_dir, ©_options)?; + + // Compile the C library. + let mut build = cc::Build::new(); + + // Configure for Emscripten if needed + if is_emscripten { + // Use emcc as the compiler instead of gcc/clang + build.compiler("emcc"); + // Use emar as the archiver instead of ar + build.archiver("emar"); + // Note: We don't add WASM-specific flags here as this creates a static library + // The final linking flags should be added when building the final WASM module + } + + build + .files( + glob(out_dir.join("src/*.c").to_str().unwrap()) + .unwrap() + .map(|p| p.unwrap()), + ) + .files( + glob(out_dir.join("src/postgres/*.c").to_str().unwrap()) + .unwrap() + .map(|p| p.unwrap()), + ) + .file(out_dir.join("vendor/protobuf-c/protobuf-c.c")) + .file(out_dir.join("vendor/xxhash/xxhash.c")) + .file(out_dir.join("protobuf/pg_query.pb-c.c")) + .include(out_dir.join(".")) + .include(out_dir.join("./vendor")) + .include(out_dir.join("./src/postgres/include")) + .include(out_dir.join("./src/include")) + .warnings(false); // Avoid unnecessary warnings, as they are already considered as part of libpg_query development + if env::var("PROFILE").unwrap() == "debug" || env::var("DEBUG").unwrap() == "1" { + build.define("USE_ASSERT_CHECKING", None); + } + if target.contains("windows") && !is_emscripten { + build.include(out_dir.join("./src/postgres/include/port/win32")); + if target.contains("msvc") { + build.include(out_dir.join("./src/postgres/include/port/win32_msvc")); + } + } + build.compile(LIBRARY_NAME); + + // Generate bindings for Rust + let mut bindgen_builder = bindgen::Builder::default() + .header(out_header_path.to_str().ok_or("Invalid header path")?) + // Allowlist only the functions we need + .allowlist_function("pg_query_parse_protobuf") + .allowlist_function("pg_query_scan") + .allowlist_function("pg_query_deparse_protobuf") + .allowlist_function("pg_query_normalize") + .allowlist_function("pg_query_fingerprint") + .allowlist_function("pg_query_split_with_parser") + .allowlist_function("pg_query_split_with_scanner") + .allowlist_function("pg_query_parse_plpgsql") + .allowlist_function("pg_query_free_protobuf_parse_result") + .allowlist_function("pg_query_free_scan_result") + .allowlist_function("pg_query_free_deparse_result") + .allowlist_function("pg_query_free_normalize_result") + .allowlist_function("pg_query_free_fingerprint_result") + .allowlist_function("pg_query_free_split_result") + .allowlist_function("pg_query_free_plpgsql_parse_result") + // Allowlist the types used by these functions + .allowlist_type("PgQueryProtobufParseResult") + .allowlist_type("PgQueryScanResult") + .allowlist_type("PgQueryError") + .allowlist_type("PgQueryProtobuf") + .allowlist_type("PgQueryDeparseResult") + .allowlist_type("PgQueryNormalizeResult") + .allowlist_type("PgQueryFingerprintResult") + .allowlist_type("PgQuerySplitResult") + .allowlist_type("PgQuerySplitStmt") + // Also generate bindings for size_t since it's used in PgQueryProtobuf + .allowlist_type("size_t") + .allowlist_var("PG_VERSION_NUM"); + + // Configure bindgen for Emscripten target + if is_emscripten { + // Tell bindgen to generate bindings for the wasm32 target + bindgen_builder = bindgen_builder.clang_arg("--target=wasm32-unknown-emscripten"); + + // Add emscripten sysroot includes + // First try to use EMSDK environment variable (set in CI and when sourcing emsdk_env.sh) + if let Ok(emsdk) = env::var("EMSDK") { + bindgen_builder = bindgen_builder.clang_arg(format!( + "-I{}/upstream/emscripten/cache/sysroot/include", + emsdk + )); + } else { + // Fallback to the default path if EMSDK is not set + bindgen_builder = + bindgen_builder.clang_arg("-I/emsdk/upstream/emscripten/cache/sysroot/include"); + } + + // Ensure we have the basic C standard library headers + bindgen_builder = bindgen_builder.clang_arg("-D__EMSCRIPTEN__"); + + // Use environment variable if set (from our justfile) + if let Ok(extra_args) = env::var("BINDGEN_EXTRA_CLANG_ARGS") { + for arg in extra_args.split_whitespace() { + bindgen_builder = bindgen_builder.clang_arg(arg); + } + } + } + + let bindings = bindgen_builder + .generate() + .map_err(|_| "Unable to generate bindings")?; + + let bindings_path = out_dir.join("bindings.rs"); + bindings.write_to_file(&bindings_path)?; + + // For WASM/emscripten builds, manually add the function declarations + // since bindgen sometimes misses them due to preprocessor conditions + if is_emscripten { + let mut bindings_content = std::fs::read_to_string(&bindings_path)?; + + // Check if we need to add the extern "C" block + if !bindings_content.contains("extern \"C\"") { + bindings_content.push_str("\nextern \"C\" {\n"); + bindings_content.push_str(" pub fn pg_query_scan(input: *const ::std::os::raw::c_char) -> PgQueryScanResult;\n"); + bindings_content.push_str(" pub fn pg_query_parse_protobuf(input: *const ::std::os::raw::c_char) -> PgQueryProtobufParseResult;\n"); + bindings_content.push_str(" pub fn pg_query_parse_plpgsql(input: *const ::std::os::raw::c_char) -> PgQueryPlpgsqlParseResult;\n"); + bindings_content.push_str(" pub fn pg_query_deparse_protobuf(protobuf: PgQueryProtobuf) -> PgQueryDeparseResult;\n"); + bindings_content.push_str(" pub fn pg_query_normalize(input: *const ::std::os::raw::c_char) -> PgQueryNormalizeResult;\n"); + bindings_content.push_str(" pub fn pg_query_fingerprint(input: *const ::std::os::raw::c_char) -> PgQueryFingerprintResult;\n"); + bindings_content.push_str(" pub fn pg_query_split_with_parser(input: *const ::std::os::raw::c_char) -> PgQuerySplitResult;\n"); + bindings_content.push_str(" pub fn pg_query_split_with_scanner(input: *const ::std::os::raw::c_char) -> PgQuerySplitResult;\n"); + bindings_content + .push_str(" pub fn pg_query_free_scan_result(result: PgQueryScanResult);\n"); + bindings_content.push_str(" pub fn pg_query_free_protobuf_parse_result(result: PgQueryProtobufParseResult);\n"); + bindings_content.push_str(" pub fn pg_query_free_plpgsql_parse_result(result: PgQueryPlpgsqlParseResult);\n"); + bindings_content.push_str( + " pub fn pg_query_free_deparse_result(result: PgQueryDeparseResult);\n", + ); + bindings_content.push_str( + " pub fn pg_query_free_normalize_result(result: PgQueryNormalizeResult);\n", + ); + bindings_content.push_str( + " pub fn pg_query_free_fingerprint_result(result: PgQueryFingerprintResult);\n", + ); + bindings_content + .push_str(" pub fn pg_query_free_split_result(result: PgQuerySplitResult);\n"); + bindings_content.push_str("}\n"); + + std::fs::write(&bindings_path, bindings_content)?; + } + } + + let protoc_exists = Command::new("protoc").arg("--version").status().is_ok(); + if protoc_exists { + println!("generating protobuf bindings"); + // HACK: Set OUT_DIR to src/ so that the generated protobuf file is copied to src/protobuf.rs + unsafe { + env::set_var("OUT_DIR", &src_dir); + } + + prost_build::compile_protos( + &[&out_protobuf_path.join(LIBRARY_NAME).with_extension("proto")], + &[&out_protobuf_path], + )?; + + std::fs::rename(src_dir.join("pg_query.rs"), src_dir.join("protobuf.rs"))?; + + // Reset OUT_DIR to the original value + unsafe { + env::set_var("OUT_DIR", &out_dir); + } + } else { + println!("skipping protobuf generation"); + } + + Ok(()) +} diff --git a/crates/pgt_query/examples/api_example.rs b/crates/pgt_query/examples/api_example.rs new file mode 100644 index 00000000..d71b1c0f --- /dev/null +++ b/crates/pgt_query/examples/api_example.rs @@ -0,0 +1,42 @@ +use pgt_query::{NodeRef, parse}; + +fn main() { + let mut result = parse("SELECT * FROM users WHERE id IN (SELECT id FROM admins)").unwrap(); + + // Immutable access + { + let stmts = result.stmts(); + let stmt = stmts.first().unwrap(); + + // nodes() returns a Vec + let all_nodes = stmt.nodes(); + println!("Total nodes in AST: {}", all_nodes.len()); + + // Can still iterate with iter() + let select_count = stmt + .iter() + .filter(|n| matches!(n, NodeRef::SelectStmt(_))) + .count(); + println!("Number of SELECT statements: {}", select_count); + } + + // Mutable access - no cloning needed! + { + let mut stmts = result.stmts_mut(); + if let Some(stmt) = stmts.first_mut() { + // Now we can iterate mutably without cloning + for mut_node in stmt.iter_mut() { + // Modify nodes here if needed + if let pgt_query::NodeMut::SelectStmt(_select) = mut_node { + println!("Found a SELECT statement to modify"); + // You can modify _select here + } + } + } + } + + // Alternative: using root_mut() for single statement queries + if let Some(root) = result.root_mut() { + println!("Root node type: {:?}", std::mem::discriminant(root)); + } +} diff --git a/crates/pgt_query/src/deparse.rs b/crates/pgt_query/src/deparse.rs new file mode 100644 index 00000000..91f3d450 --- /dev/null +++ b/crates/pgt_query/src/deparse.rs @@ -0,0 +1,93 @@ +use std::ffi::CStr; +use std::os::raw::c_char; + +use crate::bindings::*; +use crate::error::*; +use crate::protobuf; + +use prost::Message; + +/// Converts a parsed tree back into a string. +/// +/// # Example +/// +/// ```rust +/// use pgt_query::{parse, NodeEnum, NodeRef}; +/// +/// let result = parse("INSERT INTO other (name) SELECT name FROM contacts"); +/// let result = result.unwrap(); +/// let stmts = result.stmts(); +/// let insert = stmts.first().unwrap(); +/// assert!(matches!(insert, NodeEnum::InsertStmt(_))); +/// let select = insert.iter().find(|n| matches!(n, NodeRef::SelectStmt(_))).unwrap(); +/// +/// // The entire parse result can be deparsed: +/// assert_eq!(result.deparse().unwrap(), "INSERT INTO other (name) SELECT name FROM contacts"); +/// // Or an individual node can be deparsed: +/// assert_eq!(insert.deparse().unwrap(), "INSERT INTO other (name) SELECT name FROM contacts"); +/// assert_eq!(select.deparse().unwrap(), "SELECT name FROM contacts"); +/// ``` +/// +/// Note that this function will panic if called on a node not defined in `deparseStmt` +pub fn deparse(protobuf: &protobuf::ParseResult) -> Result { + let buffer = protobuf.encode_to_vec(); + let len = buffer.len(); + let data = buffer.as_ptr() as *const c_char as *mut c_char; + let protobuf = PgQueryProtobuf { data, len }; + let result = unsafe { pg_query_deparse_protobuf(protobuf) }; + + let deparse_result = if !result.error.is_null() { + let message = unsafe { CStr::from_ptr((*result.error).message) } + .to_string_lossy() + .to_string(); + Err(Error::Parse(message)) + } else { + let query = unsafe { CStr::from_ptr(result.query) } + .to_string_lossy() + .to_string(); + Ok(query) + }; + + unsafe { pg_query_free_deparse_result(result) }; + deparse_result +} + +#[cfg(test)] +mod tests { + use crate::parse; + + fn assert_deparse(input: &str, output: &str) { + let result = parse(input).unwrap(); + assert_eq!(result.deparse().unwrap(), output); + } + + #[test] + fn it_deparses_select() { + let query = "SELECT a AS b FROM x WHERE y = 5 AND z = y"; + assert_deparse(query, query); + } + + #[test] + fn it_deparses_select_with_empty_target_list() { + let query = "SELECT FROM x WHERE y = 5 AND z = y"; + assert_deparse(query, query); + } + + #[test] + fn it_deparses_select_with_schema() { + let query = "SELECT a AS b FROM public.x WHERE y = 5 AND z = y"; + assert_deparse(query, query); + } + + #[test] + fn it_deparses_select_with_distinct() { + let query = "SELECT DISTINCT a, b, * FROM c WHERE d = e"; + assert_deparse(query, query); + } + + #[test] + fn it_deparses_select_with_distinct_on() { + let query = "SELECT DISTINCT ON (a) a, b FROM c"; + assert_deparse(query, query); + } +} diff --git a/crates/pgt_query/src/error.rs b/crates/pgt_query/src/error.rs new file mode 100644 index 00000000..50845b44 --- /dev/null +++ b/crates/pgt_query/src/error.rs @@ -0,0 +1,23 @@ +use thiserror::Error; + +/// Error structure representing the basic error scenarios for `pg_query`. +#[derive(Debug, Error, Eq, PartialEq)] +pub enum Error { + #[error("Invalid statement format: {0}")] + Conversion(#[from] std::ffi::NulError), + #[error("Error decoding result: {0}")] + Decode(#[from] prost::DecodeError), + #[error("Invalid statement: {0}")] + Parse(String), + #[error("Error parsing JSON: {0}")] + InvalidJson(String), + #[error("Invalid pointer")] + InvalidPointer, + #[error("Error scanning: {0}")] + Scan(String), + #[error("Error splitting: {0}")] + Split(String), +} + +/// Convenient Result alias for returning `pg_query::Error`. +pub type Result = core::result::Result; diff --git a/crates/pgt_query/src/fingerprint.rs b/crates/pgt_query/src/fingerprint.rs new file mode 100644 index 00000000..127b6ca6 --- /dev/null +++ b/crates/pgt_query/src/fingerprint.rs @@ -0,0 +1,359 @@ +use std::ffi::{CStr, CString}; + +use crate::bindings::*; +use crate::error::*; + +/// Represents the resulting fingerprint containing both the raw integer form as well as the +/// corresponding 16 character hex value. +pub struct Fingerprint { + pub value: u64, + pub hex: String, +} + +/// Fingerprints the given SQL statement. Useful for comparing parse trees across different implementations +/// of `libpg_query`. +/// +/// # Example +/// +/// ```rust +/// let result = pgt_query::fingerprint("SELECT * FROM contacts WHERE name='Paul'"); +/// assert!(result.is_ok()); +/// let result = result.unwrap(); +/// assert_eq!(result.hex, "0e2581a461ece536"); +/// ``` +pub fn fingerprint(statement: &str) -> Result { + let input = CString::new(statement)?; + let result = unsafe { pg_query_fingerprint(input.as_ptr()) }; + let fingerprint = if !result.error.is_null() { + let message = unsafe { CStr::from_ptr((*result.error).message) } + .to_string_lossy() + .to_string(); + Err(Error::Parse(message)) + } else { + let hex = unsafe { CStr::from_ptr(result.fingerprint_str) }; + Ok(Fingerprint { + value: result.fingerprint, + hex: hex.to_string_lossy().to_string(), + }) + }; + unsafe { pg_query_free_fingerprint_result(result) }; + fingerprint +} + +#[cfg(test)] +mod tests { + use crate::{Error, fingerprint}; + + #[test] + fn it_can_fingerprint_a_simple_statement() { + let result = + fingerprint("SELECT * FROM contacts.person WHERE id IN (1, 2, 3, 4);").unwrap(); + assert_eq!(result.hex, "643d2a3c294ab8a7"); + } + + #[test] + fn it_will_error_on_invalid_input() { + let error = fingerprint("CREATE RANDOM ix_test ON contacts.person;") + .err() + .unwrap(); + assert_eq!( + error, + Error::Parse("syntax error at or near \"RANDOM\"".into()) + ); + } + + #[test] + fn it_works_for_multi_statement_queries() { + let q1 = "SET x=$1; SELECT A"; + let q2 = "SET x=$1; SELECT a"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + + let q1 = "SET x=$1; SELECT A"; + let q2 = "SELECT a"; + assert_ne!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + } + + #[test] + fn it_ignores_aliases() { + let q1 = "SELECT a AS b"; + let q2 = "SELECT a AS c"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + + let q1 = "SELECT a"; + let q2 = "SELECT a AS c"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + + let q1 = "SELECT * FROM a AS b"; + let q2 = "SELECT * FROM a AS c"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + + let q1 = "SELECT * FROM a"; + let q2 = "SELECT * FROM a AS c"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + + let q1 = "SELECT * FROM (SELECT * FROM x AS y) AS a"; + let q2 = "SELECT * FROM (SELECT * FROM x AS z) AS b"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + + let q1 = "SELECT a AS b UNION SELECT x AS y"; + let q2 = "SELECT a AS c UNION SELECT x AS z"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + } + + #[test] + fn it_ignores_param_references() { + let q1 = "SELECT $1"; + let q2 = "SELECT $2"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + } + + #[test] + fn it_ignores_select_target_list_ordering() { + let q1 = "SELECT a, b FROM x"; + let q2 = "SELECT b, a FROM x"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + let q1 = "SELECT $1, b FROM x"; + let q2 = "SELECT b, $1 FROM x"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + let q1 = "SELECT $1, $2, b FROM x"; + let q2 = "SELECT $1, b, $2 FROM x"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + + // Testing uniqueness + let q1 = "SELECT a, c FROM x"; + let q2 = "SELECT b, a FROM x"; + assert_ne!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + let q1 = "SELECT b FROM x"; + let q2 = "SELECT b, a FROM x"; + assert_ne!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + } + + #[test] + fn it_ignores_insert_col_ordering() { + let q1 = "INSERT INTO test (a, b) VALUES ($1, $2)"; + let q2 = "INSERT INTO test (b, a) VALUES ($1, $2)"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + + // Testing uniqueness + let q1 = "INSERT INTO test (a, c) VALUES ($1, $2)"; + let q2 = "INSERT INTO test (b, a) VALUES ($1, $2)"; + assert_ne!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + let q1 = "INSERT INTO test (b) VALUES ($1, $2)"; + let q2 = "INSERT INTO test (b, a) VALUES ($1, $2)"; + assert_ne!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + } + + #[test] + fn it_ignores_in_list_size() { + let q1 = "SELECT * FROM x WHERE y IN ($1, $2, $3)"; + let q2 = "SELECT * FROM x WHERE y IN ($1)"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + + let q1 = "SELECT * FROM x WHERE y IN ( $1::uuid, $2::uuid, $3::uuid )"; + let q2 = "SELECT * FROM x WHERE y IN ( $1::uuid )"; + assert_eq!(fingerprint(q1).unwrap().hex, fingerprint(q2).unwrap().hex); + } + + #[test] + fn it_works() { + let result = fingerprint("SELECT 1").unwrap(); + assert_eq!(result.hex, "50fde20626009aba"); + + let result = fingerprint("SELECT 2").unwrap(); + assert_eq!(result.hex, "50fde20626009aba"); + + let result = fingerprint("SELECT $1").unwrap(); + assert_eq!(result.hex, "50fde20626009aba"); + + let result = fingerprint("SELECT 1; SELECT a FROM b").unwrap(); + assert_eq!(result.hex, "3efa3b10d558d06d"); + + let result = fingerprint("SELECT COUNT(DISTINCT id), * FROM targets WHERE something IS NOT NULL AND elsewhere::interval < now()").unwrap(); + assert_eq!(result.hex, "26b6553101185d22"); + + let result = fingerprint("INSERT INTO test (a, b) VALUES ($1, $2)").unwrap(); + assert_eq!(result.hex, "51e63b8083b48bdd"); + + let result = fingerprint("INSERT INTO test (b, a) VALUES ($1, $2)").unwrap(); + assert_eq!(result.hex, "51e63b8083b48bdd"); + + let result = fingerprint( + "INSERT INTO test (a, b) VALUES (ARRAY[$1, $2, $3, $4], $5::timestamptz), (ARRAY[$6, $7, $8, $9], $10::timestamptz), ($11, $12::timestamptz)", + ) + .unwrap(); + assert_eq!(result.hex, "4dfdd5260cac5acf"); + + let result = fingerprint("SELECT b AS x, a AS y FROM z").unwrap(); + assert_eq!(result.hex, "1a8bf5d7614de3a5"); + + let result = fingerprint("SELECT * FROM x WHERE y = $1").unwrap(); + assert_eq!(result.hex, "4ff39426bd074231"); + + let result = fingerprint("SELECT * FROM x WHERE y = ANY ($1)").unwrap(); + assert_eq!(result.hex, "4ff39426bd074231"); + + let result = fingerprint("SELECT * FROM x WHERE y IN ($1)").unwrap(); + assert_eq!(result.hex, "4ff39426bd074231"); + + let result = fingerprint("SELECT * FROM x WHERE y IN ($1, $2, $3)").unwrap(); + assert_eq!(result.hex, "4ff39426bd074231"); + + let result = fingerprint("SELECT * FROM x WHERE y IN ( $1::uuid )").unwrap(); + assert_eq!(result.hex, "4ff39426bd074231"); + + let result = + fingerprint("SELECT * FROM x WHERE y IN ( $1::uuid, $2::uuid, $3::uuid )").unwrap(); + assert_eq!(result.hex, "4ff39426bd074231"); + + let result = fingerprint("PREPARE a123 AS SELECT a").unwrap(); + assert_eq!(result.hex, "9b5e6ead8be993e8"); + + let result = fingerprint("EXECUTE a123").unwrap(); + assert_eq!(result.hex, "44ef1d2beabd53e8"); + + let result = fingerprint("DEALLOCATE a123").unwrap(); + assert_eq!(result.hex, "d8a65a814fbc5f95"); + + let result = fingerprint("DEALLOCATE ALL").unwrap(); + assert_eq!(result.hex, "2debfb8745df64a7"); + + let result = fingerprint("EXPLAIN ANALYZE SELECT a").unwrap(); + assert_eq!(result.hex, "82845c1b5c6102e5"); + + let result = + fingerprint("WITH a AS (SELECT * FROM x WHERE x.y = $1 AND x.z = 1) SELECT * FROM a") + .unwrap(); + assert_eq!(result.hex, "6831e38bbb3dd18c"); + + let result = + fingerprint("CREATE TABLE types (a float(2), b float(49), c NUMERIC(2, 3), d character(4), e char(5), f varchar(6), g character varying(7))") + .unwrap(); + assert_eq!(result.hex, "008d6ba4aa0f4c6e"); + + let result = + fingerprint("CREATE VIEW view_a (a, b) AS WITH RECURSIVE view_a (a, b) AS (SELECT * FROM a(1)) SELECT \"a\", \"b\" FROM \"view_a\"").unwrap(); + assert_eq!(result.hex, "c6ef6b9f498feda4"); + + let result = fingerprint("VACUUM FULL my_table").unwrap(); + assert_eq!(result.hex, "fdf2f4127644f4d8"); + + let result = fingerprint("SELECT * FROM x AS a, y AS b").unwrap(); + assert_eq!(result.hex, "4e9acae841dae228"); + + let result = fingerprint("SELECT * FROM y AS a, x AS b").unwrap(); + assert_eq!(result.hex, "4e9acae841dae228"); + + let result = fingerprint("SELECT x AS a, y AS b FROM x").unwrap(); + assert_eq!(result.hex, "65dff5f5e9a643ad"); + + let result = fingerprint("SELECT y AS a, x AS b FROM x").unwrap(); + assert_eq!(result.hex, "65dff5f5e9a643ad"); + + let result = fingerprint("SELECT x, y FROM z").unwrap(); + assert_eq!(result.hex, "330267237da5535f"); + + let result = fingerprint("SELECT y, x FROM z").unwrap(); + assert_eq!(result.hex, "330267237da5535f"); + + let result = fingerprint("INSERT INTO films (code, title, did) VALUES ('UA502', 'Bananas', 105), ('T_601', 'Yojimbo', DEFAULT)").unwrap(); + assert_eq!(result.hex, "459fdc70778b841e"); + + let result = + fingerprint("INSERT INTO films (code, title, did) VALUES ($1, $2, $3)").unwrap(); + assert_eq!(result.hex, "459fdc70778b841e"); + + let result = fingerprint("SELECT * FROM a").unwrap(); + assert_eq!(result.hex, "fcf44da7b597ef43"); + + let result = fingerprint("SELECT * FROM a AS b").unwrap(); + assert_eq!(result.hex, "fcf44da7b597ef43"); + + let result = + fingerprint("UPDATE users SET one_thing = $1, second_thing = $2 WHERE users.id = $1") + .unwrap(); + assert_eq!(result.hex, "a0ea386c1cfd1e69"); + + let result = + fingerprint("UPDATE users SET something_else = $1 WHERE users.id = $1").unwrap(); + assert_eq!(result.hex, "3172bc3e0d631d55"); + + let result = fingerprint("UPDATE users SET something_else = (SELECT a FROM x WHERE uid = users.id LIMIT 1) WHERE users.id = $1").unwrap(); + assert_eq!(result.hex, "f1127a8b91fbecbf"); + + let result = fingerprint("SAVEPOINT some_id").unwrap(); + assert_eq!(result.hex, "8ebd566ea1bf947b"); + + let result = fingerprint("RELEASE some_id").unwrap(); + assert_eq!(result.hex, "60d618658252d2af"); + + let result = fingerprint("PREPARE TRANSACTION 'some_id'").unwrap(); + assert_eq!(result.hex, "d993959a33d627d4"); + + let result = fingerprint("START TRANSACTION READ WRITE").unwrap(); + assert_eq!(result.hex, "4ca25828c835d55a"); + + let result = + fingerprint("DECLARE cursor_123 CURSOR FOR SELECT * FROM test WHERE id = 123").unwrap(); + assert_eq!(result.hex, "d2bec62d2a7ec7cb"); + + let result = fingerprint("FETCH 1000 FROM cursor_123").unwrap(); + assert_eq!(result.hex, "37f4d2f6a957ae48"); + + let result = fingerprint("CLOSE cursor_123").unwrap(); + assert_eq!(result.hex, "2c7963684fc2bad9"); + + let result = fingerprint("-- nothing").unwrap(); + assert_eq!(result.hex, "d8d13f8b2da6c9ad"); + + let result = fingerprint("CREATE FOREIGN TABLE ft1 () SERVER no_server").unwrap(); + assert_eq!(result.hex, "74481c4af7c76be1"); + + let result = fingerprint("UPDATE x SET a = 1, b = 2, c = 3").unwrap(); + assert_eq!(result.hex, "fd5c248c0e642ce4"); + + let result = fingerprint("UPDATE x SET z = now()").unwrap(); + assert_eq!(result.hex, "a222eaabaa1e7cb1"); + + let result = fingerprint( + "CREATE TEMPORARY TABLE my_temp_table (test_id integer NOT NULL) ON COMMIT DROP", + ) + .unwrap(); + assert_eq!(result.hex, "1407ed5c5bb00967"); + + let result = fingerprint("CREATE TEMPORARY TABLE my_temp_table AS SELECT 1").unwrap(); + assert_eq!(result.hex, "695ebe73a3abc45c"); + + let result = fingerprint("SELECT INTERVAL (0) $2").unwrap(); + assert_eq!(result.hex, "50fde20626009aba"); + + let result = fingerprint("SELECT INTERVAL (2) $2").unwrap(); + assert_eq!(result.hex, "50fde20626009aba"); + + let result = fingerprint("SELECT * FROM t WHERE t.a IN (1, 2) AND t.b = 3").unwrap(); + assert_eq!(result.hex, "346aea01be9173b6"); + + let result = fingerprint("SELECT * FROM t WHERE t.b = 3 AND t.a IN (1, 2)").unwrap(); + assert_eq!(result.hex, "346aea01be9173b6"); + + let result = fingerprint("SELECT * FROM t WHERE a && '[1,2]'").unwrap(); + assert_eq!(result.hex, "673f199f13dfe665"); + + let result = fingerprint("SELECT * FROM t WHERE a && '[1,2]'::int4range").unwrap(); + assert_eq!(result.hex, "673f199f13dfe665"); + + let result = fingerprint("SELECT * FROM t_20210301_x").unwrap(); + assert_eq!(result.hex, "6f8169980cd70a25"); + + let result = fingerprint("SELECT * FROM t_20210302_x").unwrap(); + assert_eq!(result.hex, "6f8169980cd70a25"); + + let result = fingerprint("SELECT * FROM t_20210302_y").unwrap(); + assert_eq!(result.hex, "d357dac4a24fcf1b"); + + let result = fingerprint("SELECT * FROM t_1").unwrap(); + assert_eq!(result.hex, "018bd9230646143e"); + + let result = fingerprint("SELECT * FROM t_2").unwrap(); + assert_eq!(result.hex, "3f1444da570c1a66"); + } +} diff --git a/crates/pgt_query/src/iter_mut.rs b/crates/pgt_query/src/iter_mut.rs new file mode 100644 index 00000000..fe5e8806 --- /dev/null +++ b/crates/pgt_query/src/iter_mut.rs @@ -0,0 +1 @@ +pgt_query_macros::iter_mut_codegen!(); diff --git a/crates/pgt_query/src/iter_ref.rs b/crates/pgt_query/src/iter_ref.rs new file mode 100644 index 00000000..6ac4f220 --- /dev/null +++ b/crates/pgt_query/src/iter_ref.rs @@ -0,0 +1 @@ +pgt_query_macros::iter_ref_codegen!(); diff --git a/crates/pgt_query/src/lib.rs b/crates/pgt_query/src/lib.rs new file mode 100644 index 00000000..e8981719 --- /dev/null +++ b/crates/pgt_query/src/lib.rs @@ -0,0 +1,91 @@ +mod deparse; +mod error; +mod fingerprint; +mod iter_mut; +mod iter_ref; +mod node_enum; +mod node_mut; +mod node_ref; +mod node_structs; +mod normalize; +mod parse; +mod plpgsql; +mod scan; +mod split; + +pub use deparse::*; +pub use error::*; +pub use fingerprint::*; +pub use iter_mut::*; +pub use iter_ref::*; +pub use node_enum::*; +pub use node_mut::*; +pub use node_ref::*; +pub use normalize::*; +pub use parse::*; +pub use plpgsql::*; +pub use scan::*; +pub use split::*; + +pub use protobuf::Node; + +// Include the generated bindings with 2024 edition compatibility +#[allow(non_upper_case_globals)] +#[allow(non_camel_case_types)] +#[allow(non_snake_case)] +#[allow(dead_code)] +#[allow(improper_ctypes)] +#[allow(unsafe_op_in_unsafe_fn)] +mod bindings { + include!(concat!(env!("OUT_DIR"), "/bindings.rs")); +} + +// Include the generated protobuf code +#[allow(clippy::all)] +pub mod protobuf { + include!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/protobuf.rs")); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_does_not_error_when_run_in_parallel() { + use easy_parallel::Parallel; + + let mut queries = vec![]; + for _ in 0..100 { + queries.push( + r#" + SELECT * FROM "t0" + JOIN "t1" ON (1) JOIN "t2" ON (1) JOIN "t3" ON (1) JOIN "t4" ON (1) JOIN "t5" ON (1) + JOIN "t6" ON (1) JOIN "t7" ON (1) JOIN "t8" ON (1) JOIN "t9" ON (1) JOIN "t10" ON (1) + JOIN "t11" ON (1) JOIN "t12" ON (1) JOIN "t13" ON (1) JOIN "t14" ON (1) JOIN "t15" ON (1) + JOIN "t16" ON (1) JOIN "t17" ON (1) JOIN "t18" ON (1) JOIN "t19" ON (1) JOIN "t20" ON (1) + JOIN "t21" ON (1) JOIN "t22" ON (1) JOIN "t23" ON (1) JOIN "t24" ON (1) JOIN "t25" ON (1) + JOIN "t26" ON (1) JOIN "t27" ON (1) JOIN "t28" ON (1) JOIN "t29" ON (1) + "#, + ); + queries.push( + " + SELECT memory_total_bytes, memory_free_bytes, memory_pagecache_bytes, memory_buffers_bytes, memory_applications_bytes, + (memory_swap_total_bytes - memory_swap_free_bytes) AS swap, date_part($0, s.collected_at) AS collected_at + FROM snapshots s JOIN system_snapshots ON (snapshot_id = s.id) + WHERE s.database_id = $0 AND s.collected_at BETWEEN $0 AND $0 + ORDER BY collected_at + ", + ); + } + + Parallel::new() + .each(queries, |query| { + for _ in 0..100 { + let _result = parse(query).unwrap(); + fingerprint(query).unwrap(); + normalize(query).unwrap(); + } + }) + .run(); + } +} diff --git a/crates/pgt_query/src/node_enum.rs b/crates/pgt_query/src/node_enum.rs new file mode 100644 index 00000000..5d5b6bf7 --- /dev/null +++ b/crates/pgt_query/src/node_enum.rs @@ -0,0 +1,33 @@ +use crate::*; + +use protobuf::Node; +pub use protobuf::node::Node as NodeEnum; + +pgt_query_macros::node_enum_codegen!(); + +impl NodeEnum { + pub fn deparse(&self) -> Result { + crate::deparse(&protobuf::ParseResult { + version: crate::bindings::PG_VERSION_NUM as i32, + stmts: vec![protobuf::RawStmt { + stmt: Some(Box::new(Node { + node: Some(self.clone()), + })), + stmt_location: 0, + stmt_len: 0, + }], + }) + } + + pub fn nodes(&self) -> Vec> { + self.iter().collect() + } + + pub fn iter(&self) -> NodeRefIterator<'_> { + NodeRefIterator::new(self.to_ref()) + } + + pub fn iter_mut(&mut self) -> NodeMutIterator { + NodeMutIterator::new(self.to_mut()) + } +} diff --git a/crates/pgt_query/src/node_mut.rs b/crates/pgt_query/src/node_mut.rs new file mode 100644 index 00000000..f2da254b --- /dev/null +++ b/crates/pgt_query/src/node_mut.rs @@ -0,0 +1,26 @@ +use protobuf::Node; + +pgt_query_macros::node_mut_codegen!(); + +impl NodeMut { + pub fn deparse(&self) -> Result { + crate::deparse(&protobuf::ParseResult { + version: crate::bindings::PG_VERSION_NUM as i32, + stmts: vec![protobuf::RawStmt { + stmt: Some(Box::new(Node { + node: Some(self.to_enum()?), + })), + stmt_location: 0, + stmt_len: 0, + }], + }) + } + + pub fn nodes_mut(&self) -> Vec { + self.iter_mut().collect() + } + + pub fn iter_mut(&self) -> NodeMutIterator { + NodeMutIterator::new(*self) + } +} diff --git a/crates/pgt_query/src/node_ref.rs b/crates/pgt_query/src/node_ref.rs new file mode 100644 index 00000000..603913cb --- /dev/null +++ b/crates/pgt_query/src/node_ref.rs @@ -0,0 +1,26 @@ +use protobuf::Node; + +pgt_query_macros::node_ref_codegen!(); + +impl<'a> NodeRef<'a> { + pub fn deparse(&self) -> Result { + crate::deparse(&protobuf::ParseResult { + version: crate::bindings::PG_VERSION_NUM as i32, + stmts: vec![protobuf::RawStmt { + stmt: Some(Box::new(Node { + node: Some(self.to_enum()), + })), + stmt_location: 0, + stmt_len: 0, + }], + }) + } + + pub fn nodes(&self) -> Vec> { + self.iter().collect() + } + + pub fn iter(&self) -> NodeRefIterator<'a> { + NodeRefIterator::new(*self) + } +} diff --git a/crates/pgt_query/src/node_structs.rs b/crates/pgt_query/src/node_structs.rs new file mode 100644 index 00000000..8b81c98e --- /dev/null +++ b/crates/pgt_query/src/node_structs.rs @@ -0,0 +1,16 @@ +use protobuf::Node; + +pgt_query_macros::node_structs_codegen!(); + +impl Node { + pub fn deparse(&self) -> Result { + crate::deparse(&protobuf::ParseResult { + version: crate::bindings::PG_VERSION_NUM as i32, + stmts: vec![protobuf::RawStmt { + stmt: Some(Box::new(self.clone())), + stmt_location: 0, + stmt_len: 0, + }], + }) + } +} diff --git a/crates/pgt_query/src/normalize.rs b/crates/pgt_query/src/normalize.rs new file mode 100644 index 00000000..71ff683c --- /dev/null +++ b/crates/pgt_query/src/normalize.rs @@ -0,0 +1,136 @@ +use std::ffi::{CStr, CString}; + +use crate::bindings::*; +use crate::error::*; + +/// Normalizes the given SQL statement, returning a parametized version. +/// +/// # Example +/// +/// ```rust +/// let result = pgt_query::normalize("SELECT * FROM contacts WHERE name='Paul'"); +/// assert!(result.is_ok()); +/// let result = result.unwrap(); +/// assert_eq!(result, "SELECT * FROM contacts WHERE name=$1"); +/// ``` +pub fn normalize(statement: &str) -> Result { + let input = CString::new(statement).unwrap(); + let result = unsafe { pg_query_normalize(input.as_ptr()) }; + let normalized_query = if !result.error.is_null() { + let message = unsafe { CStr::from_ptr((*result.error).message) } + .to_string_lossy() + .to_string(); + Err(Error::Parse(message)) + } else { + let n = unsafe { CStr::from_ptr(result.normalized_query) }; + Ok(n.to_string_lossy().to_string()) + }; + unsafe { pg_query_free_normalize_result(result) }; + normalized_query +} + +#[cfg(test)] +mod tests { + use crate::{Error, normalize}; + + #[test] + fn it_normalizes_simple_query() { + let result = normalize("SELECT 1").unwrap(); + assert_eq!(result, "SELECT $1"); + } + + #[test] + fn it_normalizes_in() { + let result = + normalize("SELECT 1 FROM x WHERE y = 12561 AND z = '124' AND b IN (1, 2, 3)").unwrap(); + assert_eq!( + result, + "SELECT $1 FROM x WHERE y = $2 AND z = $3 AND b IN ($4, $5, $6)" + ); + } + + #[test] + fn it_errors_on_invalid_input() { + let error = normalize("CREATE RANDOM ix_test ON contacts.person;") + .err() + .unwrap(); + assert_eq!( + error, + Error::Parse("syntax error at or near \"RANDOM\"".into()) + ); + } + + #[test] + fn it_normalizes_subselects() { + let result = + normalize("SELECT 1 FROM x WHERE y = (SELECT 123 FROM a WHERE z = 'bla')").unwrap(); + assert_eq!( + result, + "SELECT $1 FROM x WHERE y = (SELECT $2 FROM a WHERE z = $3)" + ); + } + + #[test] + fn it_normalizes_any() { + let result = normalize("SELECT * FROM x WHERE y = ANY(array[1, 2])").unwrap(); + assert_eq!(result, "SELECT * FROM x WHERE y = ANY(array[$1, $2])"); + + let result = normalize("SELECT * FROM x WHERE y = ANY(SELECT 1)").unwrap(); + assert_eq!(result, "SELECT * FROM x WHERE y = ANY(SELECT $1)"); + } + + #[test] + fn it_normalizes_complicated_strings() { + let result = normalize("SELECT U&'d\\0061t\\+000061' FROM x").unwrap(); + assert_eq!(result, "SELECT $1 FROM x"); + + let result = normalize("SELECT u&'d\\0061t\\+000061' FROM x").unwrap(); + assert_eq!(result, "SELECT $1 FROM x"); + + let result = normalize("SELECT * FROM x WHERE z NOT LIKE E'abc'AND TRUE").unwrap(); + assert_eq!(result, "SELECT * FROM x WHERE z NOT LIKE $1AND $2"); + + let result = normalize("SELECT U&'d\\0061t\\+000061'-- comment\nFROM x").unwrap(); + assert_eq!(result, "SELECT $1-- comment\nFROM x"); + } + + #[test] + fn it_normalizes_copy() { + let result = normalize("COPY (SELECT * FROM t WHERE id IN ('1', '2')) TO STDOUT").unwrap(); + assert_eq!( + result, + "COPY (SELECT * FROM t WHERE id IN ($1, $2)) TO STDOUT" + ); + } + + #[test] + fn it_normalizes_set() { + let result = normalize("SET test=123").unwrap(); + assert_eq!(result, "SET test=$1"); + + let result = normalize("SET CLIENT_ENCODING = UTF8").unwrap(); + assert_eq!(result, "SET CLIENT_ENCODING = $1"); + } + + #[test] + fn it_does_not_error_on_deallocate() { + let result = normalize("DEALLOCATE bla; SELECT 1").unwrap(); + assert_eq!(result, "DEALLOCATE bla; SELECT $1"); + } + + #[test] + fn it_normalizes_explain() { + let result = normalize("EXPLAIN SELECT x FROM y WHERE z = 1").unwrap(); + assert_eq!(result, "EXPLAIN SELECT x FROM y WHERE z = $1"); + } + + #[test] + fn it_normalizes_declare_curson() { + let result = + normalize("DECLARE cursor_b CURSOR FOR SELECT * FROM databases WHERE id = 23").unwrap(); + assert_eq!( + result, + "DECLARE cursor_b CURSOR FOR SELECT * FROM databases WHERE id = $1" + ); + } +} diff --git a/crates/pgt_query/src/parse.rs b/crates/pgt_query/src/parse.rs new file mode 100644 index 00000000..5853dfbc --- /dev/null +++ b/crates/pgt_query/src/parse.rs @@ -0,0 +1,149 @@ +use std::ffi::{CStr, CString}; + +use crate::NodeEnum; +use crate::bindings::*; +use crate::error::*; +use crate::protobuf; + +use prost::Message; + +/// Parses the given SQL statement into the given abstract syntax tree. +/// +/// # Example +/// +/// ```rust +/// use pgt_query::parse; +/// +/// let result = parse("SELECT * FROM contacts"); +/// assert!(result.is_ok()); +/// let result = result.unwrap(); +/// assert_eq!(result.protobuf.stmts.len(), 1); +/// ``` +pub fn parse(statement: &str) -> Result { + let input = CString::new(statement)?; + let result = unsafe { pg_query_parse_protobuf(input.as_ptr()) }; + let parse_result = if !result.error.is_null() { + let message = unsafe { CStr::from_ptr((*result.error).message) } + .to_string_lossy() + .to_string(); + Err(Error::Parse(message)) + } else { + let data = unsafe { + std::slice::from_raw_parts( + result.parse_tree.data as *const u8, + result.parse_tree.len as usize, + ) + }; + let stderr = unsafe { CStr::from_ptr(result.stderr_buffer) } + .to_string_lossy() + .to_string(); + protobuf::ParseResult::decode(data) + .map_err(Error::Decode) + .map(|result| ParseResult::new(result, stderr)) + }; + unsafe { pg_query_free_protobuf_parse_result(result) }; + parse_result +} + +/// The result of parsing a SQL query +#[derive(Debug)] +pub struct ParseResult { + /// The parsed protobuf result + pub protobuf: protobuf::ParseResult, + /// Warnings captured during parsing + pub warnings: Vec, +} + +impl ParseResult { + /// Create a new ParseResult + pub fn new(protobuf: protobuf::ParseResult, stderr: String) -> Self { + let warnings = stderr + .lines() + .filter_map(|l| { + if l.starts_with("WARNING") { + Some(l.trim().into()) + } else { + None + } + }) + .collect(); + + Self { protobuf, warnings } + } + + pub fn deparse(&self) -> Result { + crate::deparse(&self.protobuf) + } + + pub fn stmts(&self) -> Vec<&NodeEnum> { + self.protobuf + .stmts + .iter() + .filter_map(|s| s.stmt.as_ref().and_then(|s| s.node.as_ref())) + .collect() + } + + pub fn stmts_mut(&mut self) -> Vec<&mut NodeEnum> { + self.protobuf + .stmts + .iter_mut() + .filter_map(|s| s.stmt.as_mut().and_then(|s| s.node.as_mut())) + .collect() + } + + /// Returns a reference to the root node of the parse tree. + /// + /// Returns None if there is not exactly one statement in the parse result. + pub fn root(&self) -> Option<&NodeEnum> { + if self.protobuf.stmts.len() != 1 { + return None; + } + + // Get the first (and only) statement + let raw_stmt = &self.protobuf.stmts[0]; + + // Navigate: RawStmt -> Node -> NodeEnum + raw_stmt.stmt.as_ref().and_then(|stmt| stmt.node.as_ref()) + } + + /// Consumes the ParseResult and returns the root node of the parse tree. + /// + /// Returns None if there is not exactly one statement in the parse result. + /// This method avoids cloning by taking ownership of the ParseResult. + pub fn into_root(self) -> Option { + if self.protobuf.stmts.len() != 1 { + return None; + } + + // Extract the first (and only) statement by taking ownership + let raw_stmt = self.protobuf.stmts.into_iter().next()?; + + // Navigate: RawStmt -> Node -> NodeEnum + raw_stmt.stmt.and_then(|stmt| stmt.node) + } + + /// Returns a mutable reference to the root node of the parse tree. + /// + /// Returns None if there is not exactly one statement in the parse result. + pub fn root_mut(&mut self) -> Option<&mut NodeEnum> { + if self.protobuf.stmts.len() != 1 { + return None; + } + + // Get the first (and only) statement + let raw_stmt = &mut self.protobuf.stmts[0]; + + // Navigate: RawStmt -> Node -> NodeEnum + raw_stmt.stmt.as_mut().and_then(|stmt| stmt.node.as_mut()) + } +} + +#[cfg(test)] +mod tests { + use crate::parse; + + #[test] + fn it_parses_parameter_queries() { + assert!(parse("select $0 + $1 + $2 + $3 + $4 + $5").is_ok()); + } +} diff --git a/crates/pgt_query/src/plpgsql.rs b/crates/pgt_query/src/plpgsql.rs new file mode 100644 index 00000000..fbaa9694 --- /dev/null +++ b/crates/pgt_query/src/plpgsql.rs @@ -0,0 +1,38 @@ +use std::ffi::{CStr, CString}; + +use crate::bindings::*; +use crate::error::*; + +/// An experimental API which parses a PLPGSQL function. This currently drops the returned +/// structure and returns only a Result<()>. +/// +/// # Example +/// +/// ```rust +/// let result = pgt_query::parse_plpgsql(" +/// CREATE OR REPLACE FUNCTION cs_fmt_browser_version(v_name varchar, v_version varchar) +/// RETURNS varchar AS $$ +/// BEGIN +/// IF v_version IS NULL THEN +/// RETURN v_name; +/// END IF; +/// RETURN v_name || '/' || v_version; +/// END; +/// $$ LANGUAGE plpgsql; +/// "); +/// assert!(result.is_ok()); +/// ``` +pub fn parse_plpgsql(stmt: &str) -> Result<()> { + let input = CString::new(stmt)?; + let result = unsafe { pg_query_parse_plpgsql(input.as_ptr()) }; + let structure = if !result.error.is_null() { + let message = unsafe { CStr::from_ptr((*result.error).message) } + .to_string_lossy() + .to_string(); + Err(Error::Parse(message)) + } else { + Ok(()) + }; + unsafe { pg_query_free_plpgsql_parse_result(result) }; + structure +} diff --git a/crates/pgt_query/src/protobuf.rs b/crates/pgt_query/src/protobuf.rs new file mode 100644 index 00000000..c47bfe52 --- /dev/null +++ b/crates/pgt_query/src/protobuf.rs @@ -0,0 +1,8846 @@ +// This file is @generated by prost-build. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ParseResult { + #[prost(int32, tag = "1")] + pub version: i32, + #[prost(message, repeated, tag = "2")] + pub stmts: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ScanResult { + #[prost(int32, tag = "1")] + pub version: i32, + #[prost(message, repeated, tag = "2")] + pub tokens: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Node { + #[prost( + oneof = "node::Node", + tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268" + )] + pub node: ::core::option::Option, +} +/// Nested message and enum types in `Node`. +pub mod node { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Node { + #[prost(message, tag = "1")] + Alias(super::Alias), + #[prost(message, tag = "2")] + RangeVar(super::RangeVar), + #[prost(message, tag = "3")] + TableFunc(::prost::alloc::boxed::Box), + #[prost(message, tag = "4")] + IntoClause(::prost::alloc::boxed::Box), + #[prost(message, tag = "5")] + Var(::prost::alloc::boxed::Box), + #[prost(message, tag = "6")] + Param(::prost::alloc::boxed::Box), + #[prost(message, tag = "7")] + Aggref(::prost::alloc::boxed::Box), + #[prost(message, tag = "8")] + GroupingFunc(::prost::alloc::boxed::Box), + #[prost(message, tag = "9")] + WindowFunc(::prost::alloc::boxed::Box), + #[prost(message, tag = "10")] + WindowFuncRunCondition( + ::prost::alloc::boxed::Box, + ), + #[prost(message, tag = "11")] + MergeSupportFunc(::prost::alloc::boxed::Box), + #[prost(message, tag = "12")] + SubscriptingRef(::prost::alloc::boxed::Box), + #[prost(message, tag = "13")] + FuncExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "14")] + NamedArgExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "15")] + OpExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "16")] + DistinctExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "17")] + NullIfExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "18")] + ScalarArrayOpExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "19")] + BoolExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "20")] + SubLink(::prost::alloc::boxed::Box), + #[prost(message, tag = "21")] + SubPlan(::prost::alloc::boxed::Box), + #[prost(message, tag = "22")] + AlternativeSubPlan(::prost::alloc::boxed::Box), + #[prost(message, tag = "23")] + FieldSelect(::prost::alloc::boxed::Box), + #[prost(message, tag = "24")] + FieldStore(::prost::alloc::boxed::Box), + #[prost(message, tag = "25")] + RelabelType(::prost::alloc::boxed::Box), + #[prost(message, tag = "26")] + CoerceViaIo(::prost::alloc::boxed::Box), + #[prost(message, tag = "27")] + ArrayCoerceExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "28")] + ConvertRowtypeExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "29")] + CollateExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "30")] + CaseExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "31")] + CaseWhen(::prost::alloc::boxed::Box), + #[prost(message, tag = "32")] + CaseTestExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "33")] + ArrayExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "34")] + RowExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "35")] + RowCompareExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "36")] + CoalesceExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "37")] + MinMaxExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "38")] + SqlvalueFunction(::prost::alloc::boxed::Box), + #[prost(message, tag = "39")] + XmlExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "40")] + JsonFormat(super::JsonFormat), + #[prost(message, tag = "41")] + JsonReturning(super::JsonReturning), + #[prost(message, tag = "42")] + JsonValueExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "43")] + JsonConstructorExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "44")] + JsonIsPredicate(::prost::alloc::boxed::Box), + #[prost(message, tag = "45")] + JsonBehavior(::prost::alloc::boxed::Box), + #[prost(message, tag = "46")] + JsonExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "47")] + JsonTablePath(super::JsonTablePath), + #[prost(message, tag = "48")] + JsonTablePathScan(::prost::alloc::boxed::Box), + #[prost(message, tag = "49")] + JsonTableSiblingJoin(::prost::alloc::boxed::Box), + #[prost(message, tag = "50")] + NullTest(::prost::alloc::boxed::Box), + #[prost(message, tag = "51")] + BooleanTest(::prost::alloc::boxed::Box), + #[prost(message, tag = "52")] + MergeAction(::prost::alloc::boxed::Box), + #[prost(message, tag = "53")] + CoerceToDomain(::prost::alloc::boxed::Box), + #[prost(message, tag = "54")] + CoerceToDomainValue(::prost::alloc::boxed::Box), + #[prost(message, tag = "55")] + SetToDefault(::prost::alloc::boxed::Box), + #[prost(message, tag = "56")] + CurrentOfExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "57")] + NextValueExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "58")] + InferenceElem(::prost::alloc::boxed::Box), + #[prost(message, tag = "59")] + TargetEntry(::prost::alloc::boxed::Box), + #[prost(message, tag = "60")] + RangeTblRef(super::RangeTblRef), + #[prost(message, tag = "61")] + JoinExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "62")] + FromExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "63")] + OnConflictExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "64")] + Query(::prost::alloc::boxed::Box), + #[prost(message, tag = "65")] + TypeName(super::TypeName), + #[prost(message, tag = "66")] + ColumnRef(super::ColumnRef), + #[prost(message, tag = "67")] + ParamRef(super::ParamRef), + #[prost(message, tag = "68")] + AExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "69")] + TypeCast(::prost::alloc::boxed::Box), + #[prost(message, tag = "70")] + CollateClause(::prost::alloc::boxed::Box), + #[prost(message, tag = "71")] + RoleSpec(super::RoleSpec), + #[prost(message, tag = "72")] + FuncCall(::prost::alloc::boxed::Box), + #[prost(message, tag = "73")] + AStar(super::AStar), + #[prost(message, tag = "74")] + AIndices(::prost::alloc::boxed::Box), + #[prost(message, tag = "75")] + AIndirection(::prost::alloc::boxed::Box), + #[prost(message, tag = "76")] + AArrayExpr(super::AArrayExpr), + #[prost(message, tag = "77")] + ResTarget(::prost::alloc::boxed::Box), + #[prost(message, tag = "78")] + MultiAssignRef(::prost::alloc::boxed::Box), + #[prost(message, tag = "79")] + SortBy(::prost::alloc::boxed::Box), + #[prost(message, tag = "80")] + WindowDef(::prost::alloc::boxed::Box), + #[prost(message, tag = "81")] + RangeSubselect(::prost::alloc::boxed::Box), + #[prost(message, tag = "82")] + RangeFunction(super::RangeFunction), + #[prost(message, tag = "83")] + RangeTableFunc(::prost::alloc::boxed::Box), + #[prost(message, tag = "84")] + RangeTableFuncCol(::prost::alloc::boxed::Box), + #[prost(message, tag = "85")] + RangeTableSample(::prost::alloc::boxed::Box), + #[prost(message, tag = "86")] + ColumnDef(::prost::alloc::boxed::Box), + #[prost(message, tag = "87")] + TableLikeClause(super::TableLikeClause), + #[prost(message, tag = "88")] + IndexElem(::prost::alloc::boxed::Box), + #[prost(message, tag = "89")] + DefElem(::prost::alloc::boxed::Box), + #[prost(message, tag = "90")] + LockingClause(super::LockingClause), + #[prost(message, tag = "91")] + XmlSerialize(::prost::alloc::boxed::Box), + #[prost(message, tag = "92")] + PartitionElem(::prost::alloc::boxed::Box), + #[prost(message, tag = "93")] + PartitionSpec(super::PartitionSpec), + #[prost(message, tag = "94")] + PartitionBoundSpec(super::PartitionBoundSpec), + #[prost(message, tag = "95")] + PartitionRangeDatum(::prost::alloc::boxed::Box), + #[prost(message, tag = "96")] + SinglePartitionSpec(super::SinglePartitionSpec), + #[prost(message, tag = "97")] + PartitionCmd(super::PartitionCmd), + #[prost(message, tag = "98")] + RangeTblEntry(::prost::alloc::boxed::Box), + #[prost(message, tag = "99")] + RtepermissionInfo(super::RtePermissionInfo), + #[prost(message, tag = "100")] + RangeTblFunction(::prost::alloc::boxed::Box), + #[prost(message, tag = "101")] + TableSampleClause(::prost::alloc::boxed::Box), + #[prost(message, tag = "102")] + WithCheckOption(::prost::alloc::boxed::Box), + #[prost(message, tag = "103")] + SortGroupClause(super::SortGroupClause), + #[prost(message, tag = "104")] + GroupingSet(super::GroupingSet), + #[prost(message, tag = "105")] + WindowClause(::prost::alloc::boxed::Box), + #[prost(message, tag = "106")] + RowMarkClause(super::RowMarkClause), + #[prost(message, tag = "107")] + WithClause(super::WithClause), + #[prost(message, tag = "108")] + InferClause(::prost::alloc::boxed::Box), + #[prost(message, tag = "109")] + OnConflictClause(::prost::alloc::boxed::Box), + #[prost(message, tag = "110")] + CtesearchClause(super::CteSearchClause), + #[prost(message, tag = "111")] + CtecycleClause(::prost::alloc::boxed::Box), + #[prost(message, tag = "112")] + CommonTableExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "113")] + MergeWhenClause(::prost::alloc::boxed::Box), + #[prost(message, tag = "114")] + TriggerTransition(super::TriggerTransition), + #[prost(message, tag = "115")] + JsonOutput(super::JsonOutput), + #[prost(message, tag = "116")] + JsonArgument(::prost::alloc::boxed::Box), + #[prost(message, tag = "117")] + JsonFuncExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "118")] + JsonTablePathSpec(::prost::alloc::boxed::Box), + #[prost(message, tag = "119")] + JsonTable(::prost::alloc::boxed::Box), + #[prost(message, tag = "120")] + JsonTableColumn(::prost::alloc::boxed::Box), + #[prost(message, tag = "121")] + JsonKeyValue(::prost::alloc::boxed::Box), + #[prost(message, tag = "122")] + JsonParseExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "123")] + JsonScalarExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "124")] + JsonSerializeExpr(::prost::alloc::boxed::Box), + #[prost(message, tag = "125")] + JsonObjectConstructor(super::JsonObjectConstructor), + #[prost(message, tag = "126")] + JsonArrayConstructor(super::JsonArrayConstructor), + #[prost(message, tag = "127")] + JsonArrayQueryConstructor( + ::prost::alloc::boxed::Box, + ), + #[prost(message, tag = "128")] + JsonAggConstructor(::prost::alloc::boxed::Box), + #[prost(message, tag = "129")] + JsonObjectAgg(::prost::alloc::boxed::Box), + #[prost(message, tag = "130")] + JsonArrayAgg(::prost::alloc::boxed::Box), + #[prost(message, tag = "131")] + RawStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "132")] + InsertStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "133")] + DeleteStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "134")] + UpdateStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "135")] + MergeStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "136")] + SelectStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "137")] + SetOperationStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "138")] + ReturnStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "139")] + PlassignStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "140")] + CreateSchemaStmt(super::CreateSchemaStmt), + #[prost(message, tag = "141")] + AlterTableStmt(super::AlterTableStmt), + #[prost(message, tag = "142")] + ReplicaIdentityStmt(super::ReplicaIdentityStmt), + #[prost(message, tag = "143")] + AlterTableCmd(::prost::alloc::boxed::Box), + #[prost(message, tag = "144")] + AlterCollationStmt(super::AlterCollationStmt), + #[prost(message, tag = "145")] + AlterDomainStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "146")] + GrantStmt(super::GrantStmt), + #[prost(message, tag = "147")] + ObjectWithArgs(super::ObjectWithArgs), + #[prost(message, tag = "148")] + AccessPriv(super::AccessPriv), + #[prost(message, tag = "149")] + GrantRoleStmt(super::GrantRoleStmt), + #[prost(message, tag = "150")] + AlterDefaultPrivilegesStmt(super::AlterDefaultPrivilegesStmt), + #[prost(message, tag = "151")] + CopyStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "152")] + VariableSetStmt(super::VariableSetStmt), + #[prost(message, tag = "153")] + VariableShowStmt(super::VariableShowStmt), + #[prost(message, tag = "154")] + CreateStmt(super::CreateStmt), + #[prost(message, tag = "155")] + Constraint(::prost::alloc::boxed::Box), + #[prost(message, tag = "156")] + CreateTableSpaceStmt(super::CreateTableSpaceStmt), + #[prost(message, tag = "157")] + DropTableSpaceStmt(super::DropTableSpaceStmt), + #[prost(message, tag = "158")] + AlterTableSpaceOptionsStmt(super::AlterTableSpaceOptionsStmt), + #[prost(message, tag = "159")] + AlterTableMoveAllStmt(super::AlterTableMoveAllStmt), + #[prost(message, tag = "160")] + CreateExtensionStmt(super::CreateExtensionStmt), + #[prost(message, tag = "161")] + AlterExtensionStmt(super::AlterExtensionStmt), + #[prost(message, tag = "162")] + AlterExtensionContentsStmt( + ::prost::alloc::boxed::Box, + ), + #[prost(message, tag = "163")] + CreateFdwStmt(super::CreateFdwStmt), + #[prost(message, tag = "164")] + AlterFdwStmt(super::AlterFdwStmt), + #[prost(message, tag = "165")] + CreateForeignServerStmt(super::CreateForeignServerStmt), + #[prost(message, tag = "166")] + AlterForeignServerStmt(super::AlterForeignServerStmt), + #[prost(message, tag = "167")] + CreateForeignTableStmt(super::CreateForeignTableStmt), + #[prost(message, tag = "168")] + CreateUserMappingStmt(super::CreateUserMappingStmt), + #[prost(message, tag = "169")] + AlterUserMappingStmt(super::AlterUserMappingStmt), + #[prost(message, tag = "170")] + DropUserMappingStmt(super::DropUserMappingStmt), + #[prost(message, tag = "171")] + ImportForeignSchemaStmt(super::ImportForeignSchemaStmt), + #[prost(message, tag = "172")] + CreatePolicyStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "173")] + AlterPolicyStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "174")] + CreateAmStmt(super::CreateAmStmt), + #[prost(message, tag = "175")] + CreateTrigStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "176")] + CreateEventTrigStmt(super::CreateEventTrigStmt), + #[prost(message, tag = "177")] + AlterEventTrigStmt(super::AlterEventTrigStmt), + #[prost(message, tag = "178")] + CreatePlangStmt(super::CreatePLangStmt), + #[prost(message, tag = "179")] + CreateRoleStmt(super::CreateRoleStmt), + #[prost(message, tag = "180")] + AlterRoleStmt(super::AlterRoleStmt), + #[prost(message, tag = "181")] + AlterRoleSetStmt(super::AlterRoleSetStmt), + #[prost(message, tag = "182")] + DropRoleStmt(super::DropRoleStmt), + #[prost(message, tag = "183")] + CreateSeqStmt(super::CreateSeqStmt), + #[prost(message, tag = "184")] + AlterSeqStmt(super::AlterSeqStmt), + #[prost(message, tag = "185")] + DefineStmt(super::DefineStmt), + #[prost(message, tag = "186")] + CreateDomainStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "187")] + CreateOpClassStmt(super::CreateOpClassStmt), + #[prost(message, tag = "188")] + CreateOpClassItem(super::CreateOpClassItem), + #[prost(message, tag = "189")] + CreateOpFamilyStmt(super::CreateOpFamilyStmt), + #[prost(message, tag = "190")] + AlterOpFamilyStmt(super::AlterOpFamilyStmt), + #[prost(message, tag = "191")] + DropStmt(super::DropStmt), + #[prost(message, tag = "192")] + TruncateStmt(super::TruncateStmt), + #[prost(message, tag = "193")] + CommentStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "194")] + SecLabelStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "195")] + DeclareCursorStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "196")] + ClosePortalStmt(super::ClosePortalStmt), + #[prost(message, tag = "197")] + FetchStmt(super::FetchStmt), + #[prost(message, tag = "198")] + IndexStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "199")] + CreateStatsStmt(super::CreateStatsStmt), + #[prost(message, tag = "200")] + StatsElem(::prost::alloc::boxed::Box), + #[prost(message, tag = "201")] + AlterStatsStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "202")] + CreateFunctionStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "203")] + FunctionParameter(::prost::alloc::boxed::Box), + #[prost(message, tag = "204")] + AlterFunctionStmt(super::AlterFunctionStmt), + #[prost(message, tag = "205")] + DoStmt(super::DoStmt), + #[prost(message, tag = "206")] + InlineCodeBlock(super::InlineCodeBlock), + #[prost(message, tag = "207")] + CallStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "208")] + CallContext(super::CallContext), + #[prost(message, tag = "209")] + RenameStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "210")] + AlterObjectDependsStmt( + ::prost::alloc::boxed::Box, + ), + #[prost(message, tag = "211")] + AlterObjectSchemaStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "212")] + AlterOwnerStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "213")] + AlterOperatorStmt(super::AlterOperatorStmt), + #[prost(message, tag = "214")] + AlterTypeStmt(super::AlterTypeStmt), + #[prost(message, tag = "215")] + RuleStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "216")] + NotifyStmt(super::NotifyStmt), + #[prost(message, tag = "217")] + ListenStmt(super::ListenStmt), + #[prost(message, tag = "218")] + UnlistenStmt(super::UnlistenStmt), + #[prost(message, tag = "219")] + TransactionStmt(super::TransactionStmt), + #[prost(message, tag = "220")] + CompositeTypeStmt(super::CompositeTypeStmt), + #[prost(message, tag = "221")] + CreateEnumStmt(super::CreateEnumStmt), + #[prost(message, tag = "222")] + CreateRangeStmt(super::CreateRangeStmt), + #[prost(message, tag = "223")] + AlterEnumStmt(super::AlterEnumStmt), + #[prost(message, tag = "224")] + ViewStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "225")] + LoadStmt(super::LoadStmt), + #[prost(message, tag = "226")] + CreatedbStmt(super::CreatedbStmt), + #[prost(message, tag = "227")] + AlterDatabaseStmt(super::AlterDatabaseStmt), + #[prost(message, tag = "228")] + AlterDatabaseRefreshCollStmt(super::AlterDatabaseRefreshCollStmt), + #[prost(message, tag = "229")] + AlterDatabaseSetStmt(super::AlterDatabaseSetStmt), + #[prost(message, tag = "230")] + DropdbStmt(super::DropdbStmt), + #[prost(message, tag = "231")] + AlterSystemStmt(super::AlterSystemStmt), + #[prost(message, tag = "232")] + ClusterStmt(super::ClusterStmt), + #[prost(message, tag = "233")] + VacuumStmt(super::VacuumStmt), + #[prost(message, tag = "234")] + VacuumRelation(super::VacuumRelation), + #[prost(message, tag = "235")] + ExplainStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "236")] + CreateTableAsStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "237")] + RefreshMatViewStmt(super::RefreshMatViewStmt), + #[prost(message, tag = "238")] + CheckPointStmt(super::CheckPointStmt), + #[prost(message, tag = "239")] + DiscardStmt(super::DiscardStmt), + #[prost(message, tag = "240")] + LockStmt(super::LockStmt), + #[prost(message, tag = "241")] + ConstraintsSetStmt(super::ConstraintsSetStmt), + #[prost(message, tag = "242")] + ReindexStmt(super::ReindexStmt), + #[prost(message, tag = "243")] + CreateConversionStmt(super::CreateConversionStmt), + #[prost(message, tag = "244")] + CreateCastStmt(super::CreateCastStmt), + #[prost(message, tag = "245")] + CreateTransformStmt(super::CreateTransformStmt), + #[prost(message, tag = "246")] + PrepareStmt(::prost::alloc::boxed::Box), + #[prost(message, tag = "247")] + ExecuteStmt(super::ExecuteStmt), + #[prost(message, tag = "248")] + DeallocateStmt(super::DeallocateStmt), + #[prost(message, tag = "249")] + DropOwnedStmt(super::DropOwnedStmt), + #[prost(message, tag = "250")] + ReassignOwnedStmt(super::ReassignOwnedStmt), + #[prost(message, tag = "251")] + AlterTsdictionaryStmt(super::AlterTsDictionaryStmt), + #[prost(message, tag = "252")] + AlterTsconfigurationStmt(super::AlterTsConfigurationStmt), + #[prost(message, tag = "253")] + PublicationTable(::prost::alloc::boxed::Box), + #[prost(message, tag = "254")] + PublicationObjSpec(::prost::alloc::boxed::Box), + #[prost(message, tag = "255")] + CreatePublicationStmt(super::CreatePublicationStmt), + #[prost(message, tag = "256")] + AlterPublicationStmt(super::AlterPublicationStmt), + #[prost(message, tag = "257")] + CreateSubscriptionStmt(super::CreateSubscriptionStmt), + #[prost(message, tag = "258")] + AlterSubscriptionStmt(super::AlterSubscriptionStmt), + #[prost(message, tag = "259")] + DropSubscriptionStmt(super::DropSubscriptionStmt), + #[prost(message, tag = "260")] + Integer(super::Integer), + #[prost(message, tag = "261")] + Float(super::Float), + #[prost(message, tag = "262")] + Boolean(super::Boolean), + #[prost(message, tag = "263")] + String(super::String), + #[prost(message, tag = "264")] + BitString(super::BitString), + #[prost(message, tag = "265")] + List(super::List), + #[prost(message, tag = "266")] + IntList(super::IntList), + #[prost(message, tag = "267")] + OidList(super::OidList), + #[prost(message, tag = "268")] + AConst(super::AConst), + } +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Integer { + /// machine integer + #[prost(int32, tag = "1")] + pub ival: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Float { + /// string + #[prost(string, tag = "1")] + pub fval: ::prost::alloc::string::String, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Boolean { + #[prost(bool, tag = "1")] + pub boolval: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct String { + /// string + #[prost(string, tag = "1")] + pub sval: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BitString { + /// string + #[prost(string, tag = "1")] + pub bsval: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct List { + #[prost(message, repeated, tag = "1")] + pub items: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OidList { + #[prost(message, repeated, tag = "1")] + pub items: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IntList { + #[prost(message, repeated, tag = "1")] + pub items: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AConst { + #[prost(bool, tag = "10")] + pub isnull: bool, + #[prost(int32, tag = "11")] + pub location: i32, + #[prost(oneof = "a_const::Val", tags = "1, 2, 3, 4, 5")] + pub val: ::core::option::Option, +} +/// Nested message and enum types in `A_Const`. +pub mod a_const { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Val { + #[prost(message, tag = "1")] + Ival(super::Integer), + #[prost(message, tag = "2")] + Fval(super::Float), + #[prost(message, tag = "3")] + Boolval(super::Boolean), + #[prost(message, tag = "4")] + Sval(super::String), + #[prost(message, tag = "5")] + Bsval(super::BitString), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Alias { + #[prost(string, tag = "1")] + pub aliasname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub colnames: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RangeVar { + #[prost(string, tag = "1")] + pub catalogname: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub schemaname: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub relname: ::prost::alloc::string::String, + #[prost(bool, tag = "4")] + pub inh: bool, + #[prost(string, tag = "5")] + pub relpersistence: ::prost::alloc::string::String, + #[prost(message, optional, tag = "6")] + pub alias: ::core::option::Option, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TableFunc { + #[prost(enumeration = "TableFuncType", tag = "1")] + pub functype: i32, + #[prost(message, repeated, tag = "2")] + pub ns_uris: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub ns_names: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "4")] + pub docexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "5")] + pub rowexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "6")] + pub colnames: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "7")] + pub coltypes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "8")] + pub coltypmods: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "9")] + pub colcollations: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "10")] + pub colexprs: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "11")] + pub coldefexprs: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "12")] + pub colvalexprs: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "13")] + pub passingvalexprs: ::prost::alloc::vec::Vec, + #[prost(uint64, repeated, tag = "14")] + pub notnulls: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "15")] + pub plan: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "16")] + pub ordinalitycol: i32, + #[prost(int32, tag = "17")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IntoClause { + #[prost(message, optional, tag = "1")] + pub rel: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub col_names: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub access_method: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "4")] + pub options: ::prost::alloc::vec::Vec, + #[prost(enumeration = "OnCommitAction", tag = "5")] + pub on_commit: i32, + #[prost(string, tag = "6")] + pub table_space_name: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "7")] + pub view_query: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "8")] + pub skip_data: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Var { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "2")] + pub varno: i32, + #[prost(int32, tag = "3")] + pub varattno: i32, + #[prost(uint32, tag = "4")] + pub vartype: u32, + #[prost(int32, tag = "5")] + pub vartypmod: i32, + #[prost(uint32, tag = "6")] + pub varcollid: u32, + #[prost(uint64, repeated, tag = "7")] + pub varnullingrels: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "8")] + pub varlevelsup: u32, + #[prost(int32, tag = "9")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Param { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "ParamKind", tag = "2")] + pub paramkind: i32, + #[prost(int32, tag = "3")] + pub paramid: i32, + #[prost(uint32, tag = "4")] + pub paramtype: u32, + #[prost(int32, tag = "5")] + pub paramtypmod: i32, + #[prost(uint32, tag = "6")] + pub paramcollid: u32, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Aggref { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub aggfnoid: u32, + #[prost(uint32, tag = "3")] + pub aggtype: u32, + #[prost(uint32, tag = "4")] + pub aggcollid: u32, + #[prost(uint32, tag = "5")] + pub inputcollid: u32, + #[prost(message, repeated, tag = "6")] + pub aggargtypes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "7")] + pub aggdirectargs: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "8")] + pub args: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "9")] + pub aggorder: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "10")] + pub aggdistinct: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "11")] + pub aggfilter: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "12")] + pub aggstar: bool, + #[prost(bool, tag = "13")] + pub aggvariadic: bool, + #[prost(string, tag = "14")] + pub aggkind: ::prost::alloc::string::String, + #[prost(uint32, tag = "15")] + pub agglevelsup: u32, + #[prost(enumeration = "AggSplit", tag = "16")] + pub aggsplit: i32, + #[prost(int32, tag = "17")] + pub aggno: i32, + #[prost(int32, tag = "18")] + pub aggtransno: i32, + #[prost(int32, tag = "19")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GroupingFunc { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "2")] + pub args: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub refs: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "4")] + pub agglevelsup: u32, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WindowFunc { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub winfnoid: u32, + #[prost(uint32, tag = "3")] + pub wintype: u32, + #[prost(uint32, tag = "4")] + pub wincollid: u32, + #[prost(uint32, tag = "5")] + pub inputcollid: u32, + #[prost(message, repeated, tag = "6")] + pub args: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "7")] + pub aggfilter: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "8")] + pub run_condition: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "9")] + pub winref: u32, + #[prost(bool, tag = "10")] + pub winstar: bool, + #[prost(bool, tag = "11")] + pub winagg: bool, + #[prost(int32, tag = "12")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WindowFuncRunCondition { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub opno: u32, + #[prost(uint32, tag = "3")] + pub inputcollid: u32, + #[prost(bool, tag = "4")] + pub wfunc_left: bool, + #[prost(message, optional, boxed, tag = "5")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MergeSupportFunc { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub msftype: u32, + #[prost(uint32, tag = "3")] + pub msfcollid: u32, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SubscriptingRef { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub refcontainertype: u32, + #[prost(uint32, tag = "3")] + pub refelemtype: u32, + #[prost(uint32, tag = "4")] + pub refrestype: u32, + #[prost(int32, tag = "5")] + pub reftypmod: i32, + #[prost(uint32, tag = "6")] + pub refcollid: u32, + #[prost(message, repeated, tag = "7")] + pub refupperindexpr: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "8")] + pub reflowerindexpr: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "9")] + pub refexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "10")] + pub refassgnexpr: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FuncExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub funcid: u32, + #[prost(uint32, tag = "3")] + pub funcresulttype: u32, + #[prost(bool, tag = "4")] + pub funcretset: bool, + #[prost(bool, tag = "5")] + pub funcvariadic: bool, + #[prost(enumeration = "CoercionForm", tag = "6")] + pub funcformat: i32, + #[prost(uint32, tag = "7")] + pub funccollid: u32, + #[prost(uint32, tag = "8")] + pub inputcollid: u32, + #[prost(message, repeated, tag = "9")] + pub args: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "10")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedArgExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "3")] + pub name: ::prost::alloc::string::String, + #[prost(int32, tag = "4")] + pub argnumber: i32, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OpExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub opno: u32, + #[prost(uint32, tag = "3")] + pub opresulttype: u32, + #[prost(bool, tag = "4")] + pub opretset: bool, + #[prost(uint32, tag = "5")] + pub opcollid: u32, + #[prost(uint32, tag = "6")] + pub inputcollid: u32, + #[prost(message, repeated, tag = "7")] + pub args: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "8")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistinctExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub opno: u32, + #[prost(uint32, tag = "3")] + pub opresulttype: u32, + #[prost(bool, tag = "4")] + pub opretset: bool, + #[prost(uint32, tag = "5")] + pub opcollid: u32, + #[prost(uint32, tag = "6")] + pub inputcollid: u32, + #[prost(message, repeated, tag = "7")] + pub args: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "8")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NullIfExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub opno: u32, + #[prost(uint32, tag = "3")] + pub opresulttype: u32, + #[prost(bool, tag = "4")] + pub opretset: bool, + #[prost(uint32, tag = "5")] + pub opcollid: u32, + #[prost(uint32, tag = "6")] + pub inputcollid: u32, + #[prost(message, repeated, tag = "7")] + pub args: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "8")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ScalarArrayOpExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub opno: u32, + #[prost(bool, tag = "3")] + pub use_or: bool, + #[prost(uint32, tag = "4")] + pub inputcollid: u32, + #[prost(message, repeated, tag = "5")] + pub args: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "6")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BoolExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "BoolExprType", tag = "2")] + pub boolop: i32, + #[prost(message, repeated, tag = "3")] + pub args: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SubLink { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "SubLinkType", tag = "2")] + pub sub_link_type: i32, + #[prost(int32, tag = "3")] + pub sub_link_id: i32, + #[prost(message, optional, boxed, tag = "4")] + pub testexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "5")] + pub oper_name: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "6")] + pub subselect: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SubPlan { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "SubLinkType", tag = "2")] + pub sub_link_type: i32, + #[prost(message, optional, boxed, tag = "3")] + pub testexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "4")] + pub param_ids: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "5")] + pub plan_id: i32, + #[prost(string, tag = "6")] + pub plan_name: ::prost::alloc::string::String, + #[prost(uint32, tag = "7")] + pub first_col_type: u32, + #[prost(int32, tag = "8")] + pub first_col_typmod: i32, + #[prost(uint32, tag = "9")] + pub first_col_collation: u32, + #[prost(bool, tag = "10")] + pub use_hash_table: bool, + #[prost(bool, tag = "11")] + pub unknown_eq_false: bool, + #[prost(bool, tag = "12")] + pub parallel_safe: bool, + #[prost(message, repeated, tag = "13")] + pub set_param: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "14")] + pub par_param: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "15")] + pub args: ::prost::alloc::vec::Vec, + #[prost(double, tag = "16")] + pub startup_cost: f64, + #[prost(double, tag = "17")] + pub per_call_cost: f64, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlternativeSubPlan { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "2")] + pub subplans: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FieldSelect { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "3")] + pub fieldnum: i32, + #[prost(uint32, tag = "4")] + pub resulttype: u32, + #[prost(int32, tag = "5")] + pub resulttypmod: i32, + #[prost(uint32, tag = "6")] + pub resultcollid: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FieldStore { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "3")] + pub newvals: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub fieldnums: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "5")] + pub resulttype: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RelabelType { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "3")] + pub resulttype: u32, + #[prost(int32, tag = "4")] + pub resulttypmod: i32, + #[prost(uint32, tag = "5")] + pub resultcollid: u32, + #[prost(enumeration = "CoercionForm", tag = "6")] + pub relabelformat: i32, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CoerceViaIo { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "3")] + pub resulttype: u32, + #[prost(uint32, tag = "4")] + pub resultcollid: u32, + #[prost(enumeration = "CoercionForm", tag = "5")] + pub coerceformat: i32, + #[prost(int32, tag = "6")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArrayCoerceExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "3")] + pub elemexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "4")] + pub resulttype: u32, + #[prost(int32, tag = "5")] + pub resulttypmod: i32, + #[prost(uint32, tag = "6")] + pub resultcollid: u32, + #[prost(enumeration = "CoercionForm", tag = "7")] + pub coerceformat: i32, + #[prost(int32, tag = "8")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConvertRowtypeExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "3")] + pub resulttype: u32, + #[prost(enumeration = "CoercionForm", tag = "4")] + pub convertformat: i32, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CollateExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "3")] + pub coll_oid: u32, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CaseExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub casetype: u32, + #[prost(uint32, tag = "3")] + pub casecollid: u32, + #[prost(message, optional, boxed, tag = "4")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "5")] + pub args: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "6")] + pub defresult: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CaseWhen { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "3")] + pub result: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CaseTestExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub type_id: u32, + #[prost(int32, tag = "3")] + pub type_mod: i32, + #[prost(uint32, tag = "4")] + pub collation: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArrayExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub array_typeid: u32, + #[prost(uint32, tag = "3")] + pub array_collid: u32, + #[prost(uint32, tag = "4")] + pub element_typeid: u32, + #[prost(message, repeated, tag = "5")] + pub elements: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "6")] + pub multidims: bool, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RowExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "2")] + pub args: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "3")] + pub row_typeid: u32, + #[prost(enumeration = "CoercionForm", tag = "4")] + pub row_format: i32, + #[prost(message, repeated, tag = "5")] + pub colnames: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "6")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RowCompareExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "RowCompareType", tag = "2")] + pub rctype: i32, + #[prost(message, repeated, tag = "3")] + pub opnos: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub opfamilies: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub inputcollids: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub largs: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "7")] + pub rargs: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CoalesceExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub coalescetype: u32, + #[prost(uint32, tag = "3")] + pub coalescecollid: u32, + #[prost(message, repeated, tag = "4")] + pub args: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MinMaxExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub minmaxtype: u32, + #[prost(uint32, tag = "3")] + pub minmaxcollid: u32, + #[prost(uint32, tag = "4")] + pub inputcollid: u32, + #[prost(enumeration = "MinMaxOp", tag = "5")] + pub op: i32, + #[prost(message, repeated, tag = "6")] + pub args: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SqlValueFunction { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "SqlValueFunctionOp", tag = "2")] + pub op: i32, + #[prost(uint32, tag = "3")] + pub r#type: u32, + #[prost(int32, tag = "4")] + pub typmod: i32, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct XmlExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "XmlExprOp", tag = "2")] + pub op: i32, + #[prost(string, tag = "3")] + pub name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "4")] + pub named_args: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub arg_names: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub args: ::prost::alloc::vec::Vec, + #[prost(enumeration = "XmlOptionType", tag = "7")] + pub xmloption: i32, + #[prost(bool, tag = "8")] + pub indent: bool, + #[prost(uint32, tag = "9")] + pub r#type: u32, + #[prost(int32, tag = "10")] + pub typmod: i32, + #[prost(int32, tag = "11")] + pub location: i32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct JsonFormat { + #[prost(enumeration = "JsonFormatType", tag = "1")] + pub format_type: i32, + #[prost(enumeration = "JsonEncoding", tag = "2")] + pub encoding: i32, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct JsonReturning { + #[prost(message, optional, tag = "1")] + pub format: ::core::option::Option, + #[prost(uint32, tag = "2")] + pub typid: u32, + #[prost(int32, tag = "3")] + pub typmod: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonValueExpr { + #[prost(message, optional, boxed, tag = "1")] + pub raw_expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub formatted_expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "3")] + pub format: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonConstructorExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "JsonConstructorType", tag = "2")] + pub r#type: i32, + #[prost(message, repeated, tag = "3")] + pub args: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "4")] + pub func: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "5")] + pub coercion: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "6")] + pub returning: ::core::option::Option, + #[prost(bool, tag = "7")] + pub absent_on_null: bool, + #[prost(bool, tag = "8")] + pub unique: bool, + #[prost(int32, tag = "9")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonIsPredicate { + #[prost(message, optional, boxed, tag = "1")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "2")] + pub format: ::core::option::Option, + #[prost(enumeration = "JsonValueType", tag = "3")] + pub item_type: i32, + #[prost(bool, tag = "4")] + pub unique_keys: bool, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonBehavior { + #[prost(enumeration = "JsonBehaviorType", tag = "1")] + pub btype: i32, + #[prost(message, optional, boxed, tag = "2")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "3")] + pub coerce: bool, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "JsonExprOp", tag = "2")] + pub op: i32, + #[prost(string, tag = "3")] + pub column_name: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "4")] + pub formatted_expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "5")] + pub format: ::core::option::Option, + #[prost(message, optional, boxed, tag = "6")] + pub path_spec: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "7")] + pub returning: ::core::option::Option, + #[prost(message, repeated, tag = "8")] + pub passing_names: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "9")] + pub passing_values: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "10")] + pub on_empty: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "11")] + pub on_error: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "12")] + pub use_io_coercion: bool, + #[prost(bool, tag = "13")] + pub use_json_coercion: bool, + #[prost(enumeration = "JsonWrapper", tag = "14")] + pub wrapper: i32, + #[prost(bool, tag = "15")] + pub omit_quotes: bool, + #[prost(uint32, tag = "16")] + pub collation: u32, + #[prost(int32, tag = "17")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonTablePath { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonTablePathScan { + #[prost(message, optional, boxed, tag = "1")] + pub plan: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "2")] + pub path: ::core::option::Option, + #[prost(bool, tag = "3")] + pub error_on_error: bool, + #[prost(message, optional, boxed, tag = "4")] + pub child: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "5")] + pub col_min: i32, + #[prost(int32, tag = "6")] + pub col_max: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonTableSiblingJoin { + #[prost(message, optional, boxed, tag = "1")] + pub plan: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub lplan: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "3")] + pub rplan: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NullTest { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "NullTestType", tag = "3")] + pub nulltesttype: i32, + #[prost(bool, tag = "4")] + pub argisrow: bool, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BooleanTest { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "BoolTestType", tag = "3")] + pub booltesttype: i32, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MergeAction { + #[prost(enumeration = "MergeMatchKind", tag = "1")] + pub match_kind: i32, + #[prost(enumeration = "CmdType", tag = "2")] + pub command_type: i32, + #[prost(enumeration = "OverridingKind", tag = "3")] + pub r#override: i32, + #[prost(message, optional, boxed, tag = "4")] + pub qual: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "5")] + pub target_list: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub update_colnos: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CoerceToDomain { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "3")] + pub resulttype: u32, + #[prost(int32, tag = "4")] + pub resulttypmod: i32, + #[prost(uint32, tag = "5")] + pub resultcollid: u32, + #[prost(enumeration = "CoercionForm", tag = "6")] + pub coercionformat: i32, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CoerceToDomainValue { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub type_id: u32, + #[prost(int32, tag = "3")] + pub type_mod: i32, + #[prost(uint32, tag = "4")] + pub collation: u32, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetToDefault { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub type_id: u32, + #[prost(int32, tag = "3")] + pub type_mod: i32, + #[prost(uint32, tag = "4")] + pub collation: u32, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CurrentOfExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub cvarno: u32, + #[prost(string, tag = "3")] + pub cursor_name: ::prost::alloc::string::String, + #[prost(int32, tag = "4")] + pub cursor_param: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NextValueExpr { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "2")] + pub seqid: u32, + #[prost(uint32, tag = "3")] + pub type_id: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InferenceElem { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "3")] + pub infercollid: u32, + #[prost(uint32, tag = "4")] + pub inferopclass: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TargetEntry { + #[prost(message, optional, boxed, tag = "1")] + pub xpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "3")] + pub resno: i32, + #[prost(string, tag = "4")] + pub resname: ::prost::alloc::string::String, + #[prost(uint32, tag = "5")] + pub ressortgroupref: u32, + #[prost(uint32, tag = "6")] + pub resorigtbl: u32, + #[prost(int32, tag = "7")] + pub resorigcol: i32, + #[prost(bool, tag = "8")] + pub resjunk: bool, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct RangeTblRef { + #[prost(int32, tag = "1")] + pub rtindex: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JoinExpr { + #[prost(enumeration = "JoinType", tag = "1")] + pub jointype: i32, + #[prost(bool, tag = "2")] + pub is_natural: bool, + #[prost(message, optional, boxed, tag = "3")] + pub larg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "4")] + pub rarg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "5")] + pub using_clause: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "6")] + pub join_using_alias: ::core::option::Option, + #[prost(message, optional, boxed, tag = "7")] + pub quals: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "8")] + pub alias: ::core::option::Option, + #[prost(int32, tag = "9")] + pub rtindex: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FromExpr { + #[prost(message, repeated, tag = "1")] + pub fromlist: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "2")] + pub quals: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OnConflictExpr { + #[prost(enumeration = "OnConflictAction", tag = "1")] + pub action: i32, + #[prost(message, repeated, tag = "2")] + pub arbiter_elems: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "3")] + pub arbiter_where: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "4")] + pub constraint: u32, + #[prost(message, repeated, tag = "5")] + pub on_conflict_set: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "6")] + pub on_conflict_where: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "7")] + pub excl_rel_index: i32, + #[prost(message, repeated, tag = "8")] + pub excl_rel_tlist: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Query { + #[prost(enumeration = "CmdType", tag = "1")] + pub command_type: i32, + #[prost(enumeration = "QuerySource", tag = "2")] + pub query_source: i32, + #[prost(bool, tag = "3")] + pub can_set_tag: bool, + #[prost(message, optional, boxed, tag = "4")] + pub utility_stmt: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "5")] + pub result_relation: i32, + #[prost(bool, tag = "6")] + pub has_aggs: bool, + #[prost(bool, tag = "7")] + pub has_window_funcs: bool, + #[prost(bool, tag = "8")] + pub has_target_srfs: bool, + #[prost(bool, tag = "9")] + pub has_sub_links: bool, + #[prost(bool, tag = "10")] + pub has_distinct_on: bool, + #[prost(bool, tag = "11")] + pub has_recursive: bool, + #[prost(bool, tag = "12")] + pub has_modifying_cte: bool, + #[prost(bool, tag = "13")] + pub has_for_update: bool, + #[prost(bool, tag = "14")] + pub has_row_security: bool, + #[prost(bool, tag = "15")] + pub is_return: bool, + #[prost(message, repeated, tag = "16")] + pub cte_list: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "17")] + pub rtable: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "18")] + pub rteperminfos: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "19")] + pub jointree: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "20")] + pub merge_action_list: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "21")] + pub merge_target_relation: i32, + #[prost(message, optional, boxed, tag = "22")] + pub merge_join_condition: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "23")] + pub target_list: ::prost::alloc::vec::Vec, + #[prost(enumeration = "OverridingKind", tag = "24")] + pub r#override: i32, + #[prost(message, optional, boxed, tag = "25")] + pub on_conflict: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "26")] + pub returning_list: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "27")] + pub group_clause: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "28")] + pub group_distinct: bool, + #[prost(message, repeated, tag = "29")] + pub grouping_sets: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "30")] + pub having_qual: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "31")] + pub window_clause: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "32")] + pub distinct_clause: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "33")] + pub sort_clause: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "34")] + pub limit_offset: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "35")] + pub limit_count: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "LimitOption", tag = "36")] + pub limit_option: i32, + #[prost(message, repeated, tag = "37")] + pub row_marks: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "38")] + pub set_operations: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "39")] + pub constraint_deps: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "40")] + pub with_check_options: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "41")] + pub stmt_location: i32, + #[prost(int32, tag = "42")] + pub stmt_len: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TypeName { + #[prost(message, repeated, tag = "1")] + pub names: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "2")] + pub type_oid: u32, + #[prost(bool, tag = "3")] + pub setof: bool, + #[prost(bool, tag = "4")] + pub pct_type: bool, + #[prost(message, repeated, tag = "5")] + pub typmods: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "6")] + pub typemod: i32, + #[prost(message, repeated, tag = "7")] + pub array_bounds: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "8")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ColumnRef { + #[prost(message, repeated, tag = "1")] + pub fields: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "2")] + pub location: i32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ParamRef { + #[prost(int32, tag = "1")] + pub number: i32, + #[prost(int32, tag = "2")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AExpr { + #[prost(enumeration = "AExprKind", tag = "1")] + pub kind: i32, + #[prost(message, repeated, tag = "2")] + pub name: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "3")] + pub lexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "4")] + pub rexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TypeCast { + #[prost(message, optional, boxed, tag = "1")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "2")] + pub type_name: ::core::option::Option, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CollateClause { + #[prost(message, optional, boxed, tag = "1")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "2")] + pub collname: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RoleSpec { + #[prost(enumeration = "RoleSpecType", tag = "1")] + pub roletype: i32, + #[prost(string, tag = "2")] + pub rolename: ::prost::alloc::string::String, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FuncCall { + #[prost(message, repeated, tag = "1")] + pub funcname: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub args: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub agg_order: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "4")] + pub agg_filter: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "5")] + pub over: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "6")] + pub agg_within_group: bool, + #[prost(bool, tag = "7")] + pub agg_star: bool, + #[prost(bool, tag = "8")] + pub agg_distinct: bool, + #[prost(bool, tag = "9")] + pub func_variadic: bool, + #[prost(enumeration = "CoercionForm", tag = "10")] + pub funcformat: i32, + #[prost(int32, tag = "11")] + pub location: i32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct AStar {} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AIndices { + #[prost(bool, tag = "1")] + pub is_slice: bool, + #[prost(message, optional, boxed, tag = "2")] + pub lidx: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "3")] + pub uidx: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AIndirection { + #[prost(message, optional, boxed, tag = "1")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "2")] + pub indirection: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AArrayExpr { + #[prost(message, repeated, tag = "1")] + pub elements: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "2")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResTarget { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub indirection: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "3")] + pub val: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MultiAssignRef { + #[prost(message, optional, boxed, tag = "1")] + pub source: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "2")] + pub colno: i32, + #[prost(int32, tag = "3")] + pub ncolumns: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SortBy { + #[prost(message, optional, boxed, tag = "1")] + pub node: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "SortByDir", tag = "2")] + pub sortby_dir: i32, + #[prost(enumeration = "SortByNulls", tag = "3")] + pub sortby_nulls: i32, + #[prost(message, repeated, tag = "4")] + pub use_op: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WindowDef { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub refname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub partition_clause: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub order_clause: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "5")] + pub frame_options: i32, + #[prost(message, optional, boxed, tag = "6")] + pub start_offset: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "7")] + pub end_offset: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "8")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RangeSubselect { + #[prost(bool, tag = "1")] + pub lateral: bool, + #[prost(message, optional, boxed, tag = "2")] + pub subquery: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "3")] + pub alias: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RangeFunction { + #[prost(bool, tag = "1")] + pub lateral: bool, + #[prost(bool, tag = "2")] + pub ordinality: bool, + #[prost(bool, tag = "3")] + pub is_rowsfrom: bool, + #[prost(message, repeated, tag = "4")] + pub functions: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "5")] + pub alias: ::core::option::Option, + #[prost(message, repeated, tag = "6")] + pub coldeflist: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RangeTableFunc { + #[prost(bool, tag = "1")] + pub lateral: bool, + #[prost(message, optional, boxed, tag = "2")] + pub docexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "3")] + pub rowexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "4")] + pub namespaces: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub columns: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "6")] + pub alias: ::core::option::Option, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RangeTableFuncCol { + #[prost(string, tag = "1")] + pub colname: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub type_name: ::core::option::Option, + #[prost(bool, tag = "3")] + pub for_ordinality: bool, + #[prost(bool, tag = "4")] + pub is_not_null: bool, + #[prost(message, optional, boxed, tag = "5")] + pub colexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "6")] + pub coldefexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "7")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RangeTableSample { + #[prost(message, optional, boxed, tag = "1")] + pub relation: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "2")] + pub method: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub args: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "4")] + pub repeatable: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ColumnDef { + #[prost(string, tag = "1")] + pub colname: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub type_name: ::core::option::Option, + #[prost(string, tag = "3")] + pub compression: ::prost::alloc::string::String, + #[prost(int32, tag = "4")] + pub inhcount: i32, + #[prost(bool, tag = "5")] + pub is_local: bool, + #[prost(bool, tag = "6")] + pub is_not_null: bool, + #[prost(bool, tag = "7")] + pub is_from_type: bool, + #[prost(string, tag = "8")] + pub storage: ::prost::alloc::string::String, + #[prost(string, tag = "9")] + pub storage_name: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "10")] + pub raw_default: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "11")] + pub cooked_default: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "12")] + pub identity: ::prost::alloc::string::String, + #[prost(message, optional, tag = "13")] + pub identity_sequence: ::core::option::Option, + #[prost(string, tag = "14")] + pub generated: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "15")] + pub coll_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "16")] + pub coll_oid: u32, + #[prost(message, repeated, tag = "17")] + pub constraints: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "18")] + pub fdwoptions: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "19")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TableLikeClause { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(uint32, tag = "2")] + pub options: u32, + #[prost(uint32, tag = "3")] + pub relation_oid: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IndexElem { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "2")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "3")] + pub indexcolname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "4")] + pub collation: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub opclass: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub opclassopts: ::prost::alloc::vec::Vec, + #[prost(enumeration = "SortByDir", tag = "7")] + pub ordering: i32, + #[prost(enumeration = "SortByNulls", tag = "8")] + pub nulls_ordering: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DefElem { + #[prost(string, tag = "1")] + pub defnamespace: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub defname: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "3")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "DefElemAction", tag = "4")] + pub defaction: i32, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LockingClause { + #[prost(message, repeated, tag = "1")] + pub locked_rels: ::prost::alloc::vec::Vec, + #[prost(enumeration = "LockClauseStrength", tag = "2")] + pub strength: i32, + #[prost(enumeration = "LockWaitPolicy", tag = "3")] + pub wait_policy: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct XmlSerialize { + #[prost(enumeration = "XmlOptionType", tag = "1")] + pub xmloption: i32, + #[prost(message, optional, boxed, tag = "2")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "3")] + pub type_name: ::core::option::Option, + #[prost(bool, tag = "4")] + pub indent: bool, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PartitionElem { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "2")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "3")] + pub collation: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub opclass: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PartitionSpec { + #[prost(enumeration = "PartitionStrategy", tag = "1")] + pub strategy: i32, + #[prost(message, repeated, tag = "2")] + pub part_params: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PartitionBoundSpec { + #[prost(string, tag = "1")] + pub strategy: ::prost::alloc::string::String, + #[prost(bool, tag = "2")] + pub is_default: bool, + #[prost(int32, tag = "3")] + pub modulus: i32, + #[prost(int32, tag = "4")] + pub remainder: i32, + #[prost(message, repeated, tag = "5")] + pub listdatums: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub lowerdatums: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "7")] + pub upperdatums: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "8")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PartitionRangeDatum { + #[prost(enumeration = "PartitionRangeDatumKind", tag = "1")] + pub kind: i32, + #[prost(message, optional, boxed, tag = "2")] + pub value: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct SinglePartitionSpec {} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PartitionCmd { + #[prost(message, optional, tag = "1")] + pub name: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub bound: ::core::option::Option, + #[prost(bool, tag = "3")] + pub concurrent: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RangeTblEntry { + #[prost(message, optional, tag = "1")] + pub alias: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub eref: ::core::option::Option, + #[prost(enumeration = "RteKind", tag = "3")] + pub rtekind: i32, + #[prost(uint32, tag = "4")] + pub relid: u32, + #[prost(bool, tag = "5")] + pub inh: bool, + #[prost(string, tag = "6")] + pub relkind: ::prost::alloc::string::String, + #[prost(int32, tag = "7")] + pub rellockmode: i32, + #[prost(uint32, tag = "8")] + pub perminfoindex: u32, + #[prost(message, optional, boxed, tag = "9")] + pub tablesample: ::core::option::Option< + ::prost::alloc::boxed::Box, + >, + #[prost(message, optional, boxed, tag = "10")] + pub subquery: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "11")] + pub security_barrier: bool, + #[prost(enumeration = "JoinType", tag = "12")] + pub jointype: i32, + #[prost(int32, tag = "13")] + pub joinmergedcols: i32, + #[prost(message, repeated, tag = "14")] + pub joinaliasvars: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "15")] + pub joinleftcols: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "16")] + pub joinrightcols: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "17")] + pub join_using_alias: ::core::option::Option, + #[prost(message, repeated, tag = "18")] + pub functions: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "19")] + pub funcordinality: bool, + #[prost(message, optional, boxed, tag = "20")] + pub tablefunc: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "21")] + pub values_lists: ::prost::alloc::vec::Vec, + #[prost(string, tag = "22")] + pub ctename: ::prost::alloc::string::String, + #[prost(uint32, tag = "23")] + pub ctelevelsup: u32, + #[prost(bool, tag = "24")] + pub self_reference: bool, + #[prost(message, repeated, tag = "25")] + pub coltypes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "26")] + pub coltypmods: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "27")] + pub colcollations: ::prost::alloc::vec::Vec, + #[prost(string, tag = "28")] + pub enrname: ::prost::alloc::string::String, + #[prost(double, tag = "29")] + pub enrtuples: f64, + #[prost(bool, tag = "30")] + pub lateral: bool, + #[prost(bool, tag = "31")] + pub in_from_cl: bool, + #[prost(message, repeated, tag = "32")] + pub security_quals: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RtePermissionInfo { + #[prost(uint32, tag = "1")] + pub relid: u32, + #[prost(bool, tag = "2")] + pub inh: bool, + #[prost(uint64, tag = "3")] + pub required_perms: u64, + #[prost(uint32, tag = "4")] + pub check_as_user: u32, + #[prost(uint64, repeated, tag = "5")] + pub selected_cols: ::prost::alloc::vec::Vec, + #[prost(uint64, repeated, tag = "6")] + pub inserted_cols: ::prost::alloc::vec::Vec, + #[prost(uint64, repeated, tag = "7")] + pub updated_cols: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RangeTblFunction { + #[prost(message, optional, boxed, tag = "1")] + pub funcexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "2")] + pub funccolcount: i32, + #[prost(message, repeated, tag = "3")] + pub funccolnames: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub funccoltypes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub funccoltypmods: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub funccolcollations: ::prost::alloc::vec::Vec, + #[prost(uint64, repeated, tag = "7")] + pub funcparams: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TableSampleClause { + #[prost(uint32, tag = "1")] + pub tsmhandler: u32, + #[prost(message, repeated, tag = "2")] + pub args: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "3")] + pub repeatable: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WithCheckOption { + #[prost(enumeration = "WcoKind", tag = "1")] + pub kind: i32, + #[prost(string, tag = "2")] + pub relname: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub polname: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "4")] + pub qual: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "5")] + pub cascaded: bool, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct SortGroupClause { + #[prost(uint32, tag = "1")] + pub tle_sort_group_ref: u32, + #[prost(uint32, tag = "2")] + pub eqop: u32, + #[prost(uint32, tag = "3")] + pub sortop: u32, + #[prost(bool, tag = "4")] + pub nulls_first: bool, + #[prost(bool, tag = "5")] + pub hashable: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GroupingSet { + #[prost(enumeration = "GroupingSetKind", tag = "1")] + pub kind: i32, + #[prost(message, repeated, tag = "2")] + pub content: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WindowClause { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub refname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub partition_clause: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub order_clause: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "5")] + pub frame_options: i32, + #[prost(message, optional, boxed, tag = "6")] + pub start_offset: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "7")] + pub end_offset: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(uint32, tag = "8")] + pub start_in_range_func: u32, + #[prost(uint32, tag = "9")] + pub end_in_range_func: u32, + #[prost(uint32, tag = "10")] + pub in_range_coll: u32, + #[prost(bool, tag = "11")] + pub in_range_asc: bool, + #[prost(bool, tag = "12")] + pub in_range_nulls_first: bool, + #[prost(uint32, tag = "13")] + pub winref: u32, + #[prost(bool, tag = "14")] + pub copied_order: bool, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct RowMarkClause { + #[prost(uint32, tag = "1")] + pub rti: u32, + #[prost(enumeration = "LockClauseStrength", tag = "2")] + pub strength: i32, + #[prost(enumeration = "LockWaitPolicy", tag = "3")] + pub wait_policy: i32, + #[prost(bool, tag = "4")] + pub pushed_down: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WithClause { + #[prost(message, repeated, tag = "1")] + pub ctes: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "2")] + pub recursive: bool, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InferClause { + #[prost(message, repeated, tag = "1")] + pub index_elems: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "2")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "3")] + pub conname: ::prost::alloc::string::String, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OnConflictClause { + #[prost(enumeration = "OnConflictAction", tag = "1")] + pub action: i32, + #[prost(message, optional, boxed, tag = "2")] + pub infer: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "3")] + pub target_list: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "4")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CteSearchClause { + #[prost(message, repeated, tag = "1")] + pub search_col_list: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "2")] + pub search_breadth_first: bool, + #[prost(string, tag = "3")] + pub search_seq_column: ::prost::alloc::string::String, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CteCycleClause { + #[prost(message, repeated, tag = "1")] + pub cycle_col_list: ::prost::alloc::vec::Vec, + #[prost(string, tag = "2")] + pub cycle_mark_column: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "3")] + pub cycle_mark_value: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "4")] + pub cycle_mark_default: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "5")] + pub cycle_path_column: ::prost::alloc::string::String, + #[prost(int32, tag = "6")] + pub location: i32, + #[prost(uint32, tag = "7")] + pub cycle_mark_type: u32, + #[prost(int32, tag = "8")] + pub cycle_mark_typmod: i32, + #[prost(uint32, tag = "9")] + pub cycle_mark_collation: u32, + #[prost(uint32, tag = "10")] + pub cycle_mark_neop: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CommonTableExpr { + #[prost(string, tag = "1")] + pub ctename: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub aliascolnames: ::prost::alloc::vec::Vec, + #[prost(enumeration = "CteMaterialize", tag = "3")] + pub ctematerialized: i32, + #[prost(message, optional, boxed, tag = "4")] + pub ctequery: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "5")] + pub search_clause: ::core::option::Option, + #[prost(message, optional, boxed, tag = "6")] + pub cycle_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "7")] + pub location: i32, + #[prost(bool, tag = "8")] + pub cterecursive: bool, + #[prost(int32, tag = "9")] + pub cterefcount: i32, + #[prost(message, repeated, tag = "10")] + pub ctecolnames: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "11")] + pub ctecoltypes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "12")] + pub ctecoltypmods: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "13")] + pub ctecolcollations: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MergeWhenClause { + #[prost(enumeration = "MergeMatchKind", tag = "1")] + pub match_kind: i32, + #[prost(enumeration = "CmdType", tag = "2")] + pub command_type: i32, + #[prost(enumeration = "OverridingKind", tag = "3")] + pub r#override: i32, + #[prost(message, optional, boxed, tag = "4")] + pub condition: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "5")] + pub target_list: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub values: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TriggerTransition { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(bool, tag = "2")] + pub is_new: bool, + #[prost(bool, tag = "3")] + pub is_table: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonOutput { + #[prost(message, optional, tag = "1")] + pub type_name: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub returning: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonArgument { + #[prost(message, optional, boxed, tag = "1")] + pub val: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonFuncExpr { + #[prost(enumeration = "JsonExprOp", tag = "1")] + pub op: i32, + #[prost(string, tag = "2")] + pub column_name: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "3")] + pub context_item: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "4")] + pub pathspec: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "5")] + pub passing: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "6")] + pub output: ::core::option::Option, + #[prost(message, optional, boxed, tag = "7")] + pub on_empty: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "8")] + pub on_error: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "JsonWrapper", tag = "9")] + pub wrapper: i32, + #[prost(enumeration = "JsonQuotes", tag = "10")] + pub quotes: i32, + #[prost(int32, tag = "11")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonTablePathSpec { + #[prost(message, optional, boxed, tag = "1")] + pub string: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + #[prost(int32, tag = "3")] + pub name_location: i32, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonTable { + #[prost(message, optional, boxed, tag = "1")] + pub context_item: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub pathspec: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "3")] + pub passing: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub columns: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "5")] + pub on_error: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "6")] + pub alias: ::core::option::Option, + #[prost(bool, tag = "7")] + pub lateral: bool, + #[prost(int32, tag = "8")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonTableColumn { + #[prost(enumeration = "JsonTableColumnType", tag = "1")] + pub coltype: i32, + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub type_name: ::core::option::Option, + #[prost(message, optional, boxed, tag = "4")] + pub pathspec: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "5")] + pub format: ::core::option::Option, + #[prost(enumeration = "JsonWrapper", tag = "6")] + pub wrapper: i32, + #[prost(enumeration = "JsonQuotes", tag = "7")] + pub quotes: i32, + #[prost(message, repeated, tag = "8")] + pub columns: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "9")] + pub on_empty: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "10")] + pub on_error: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "11")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonKeyValue { + #[prost(message, optional, boxed, tag = "1")] + pub key: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub value: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonParseExpr { + #[prost(message, optional, boxed, tag = "1")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "2")] + pub output: ::core::option::Option, + #[prost(bool, tag = "3")] + pub unique_keys: bool, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonScalarExpr { + #[prost(message, optional, boxed, tag = "1")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "2")] + pub output: ::core::option::Option, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonSerializeExpr { + #[prost(message, optional, boxed, tag = "1")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "2")] + pub output: ::core::option::Option, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonObjectConstructor { + #[prost(message, repeated, tag = "1")] + pub exprs: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub output: ::core::option::Option, + #[prost(bool, tag = "3")] + pub absent_on_null: bool, + #[prost(bool, tag = "4")] + pub unique: bool, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonArrayConstructor { + #[prost(message, repeated, tag = "1")] + pub exprs: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub output: ::core::option::Option, + #[prost(bool, tag = "3")] + pub absent_on_null: bool, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonArrayQueryConstructor { + #[prost(message, optional, boxed, tag = "1")] + pub query: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "2")] + pub output: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub format: ::core::option::Option, + #[prost(bool, tag = "4")] + pub absent_on_null: bool, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonAggConstructor { + #[prost(message, optional, tag = "1")] + pub output: ::core::option::Option, + #[prost(message, optional, boxed, tag = "2")] + pub agg_filter: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "3")] + pub agg_order: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "4")] + pub over: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonObjectAgg { + #[prost(message, optional, boxed, tag = "1")] + pub constructor: ::core::option::Option< + ::prost::alloc::boxed::Box, + >, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "3")] + pub absent_on_null: bool, + #[prost(bool, tag = "4")] + pub unique: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonArrayAgg { + #[prost(message, optional, boxed, tag = "1")] + pub constructor: ::core::option::Option< + ::prost::alloc::boxed::Box, + >, + #[prost(message, optional, boxed, tag = "2")] + pub arg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "3")] + pub absent_on_null: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RawStmt { + #[prost(message, optional, boxed, tag = "1")] + pub stmt: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "2")] + pub stmt_location: i32, + #[prost(int32, tag = "3")] + pub stmt_len: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InsertStmt { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub cols: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "3")] + pub select_stmt: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "4")] + pub on_conflict_clause: ::core::option::Option< + ::prost::alloc::boxed::Box, + >, + #[prost(message, repeated, tag = "5")] + pub returning_list: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "6")] + pub with_clause: ::core::option::Option, + #[prost(enumeration = "OverridingKind", tag = "7")] + pub r#override: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteStmt { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub using_clause: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "3")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "4")] + pub returning_list: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "5")] + pub with_clause: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateStmt { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub target_list: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "3")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "4")] + pub from_clause: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub returning_list: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "6")] + pub with_clause: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MergeStmt { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(message, optional, boxed, tag = "2")] + pub source_relation: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "3")] + pub join_condition: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "4")] + pub merge_when_clauses: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub returning_list: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "6")] + pub with_clause: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SelectStmt { + #[prost(message, repeated, tag = "1")] + pub distinct_clause: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "2")] + pub into_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "3")] + pub target_list: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub from_clause: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "5")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "6")] + pub group_clause: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "7")] + pub group_distinct: bool, + #[prost(message, optional, boxed, tag = "8")] + pub having_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "9")] + pub window_clause: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "10")] + pub values_lists: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "11")] + pub sort_clause: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "12")] + pub limit_offset: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "13")] + pub limit_count: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "LimitOption", tag = "14")] + pub limit_option: i32, + #[prost(message, repeated, tag = "15")] + pub locking_clause: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "16")] + pub with_clause: ::core::option::Option, + #[prost(enumeration = "SetOperation", tag = "17")] + pub op: i32, + #[prost(bool, tag = "18")] + pub all: bool, + #[prost(message, optional, boxed, tag = "19")] + pub larg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "20")] + pub rarg: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetOperationStmt { + #[prost(enumeration = "SetOperation", tag = "1")] + pub op: i32, + #[prost(bool, tag = "2")] + pub all: bool, + #[prost(message, optional, boxed, tag = "3")] + pub larg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "4")] + pub rarg: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "5")] + pub col_types: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub col_typmods: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "7")] + pub col_collations: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "8")] + pub group_clauses: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReturnStmt { + #[prost(message, optional, boxed, tag = "1")] + pub returnval: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PlAssignStmt { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub indirection: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "3")] + pub nnames: i32, + #[prost(message, optional, boxed, tag = "4")] + pub val: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "5")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateSchemaStmt { + #[prost(string, tag = "1")] + pub schemaname: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub authrole: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub schema_elts: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "4")] + pub if_not_exists: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterTableStmt { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub cmds: ::prost::alloc::vec::Vec, + #[prost(enumeration = "ObjectType", tag = "3")] + pub objtype: i32, + #[prost(bool, tag = "4")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReplicaIdentityStmt { + #[prost(string, tag = "1")] + pub identity_type: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterTableCmd { + #[prost(enumeration = "AlterTableType", tag = "1")] + pub subtype: i32, + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + #[prost(int32, tag = "3")] + pub num: i32, + #[prost(message, optional, tag = "4")] + pub newowner: ::core::option::Option, + #[prost(message, optional, boxed, tag = "5")] + pub def: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "DropBehavior", tag = "6")] + pub behavior: i32, + #[prost(bool, tag = "7")] + pub missing_ok: bool, + #[prost(bool, tag = "8")] + pub recurse: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterCollationStmt { + #[prost(message, repeated, tag = "1")] + pub collname: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterDomainStmt { + #[prost(string, tag = "1")] + pub subtype: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub type_name: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "4")] + pub def: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "DropBehavior", tag = "5")] + pub behavior: i32, + #[prost(bool, tag = "6")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GrantStmt { + #[prost(bool, tag = "1")] + pub is_grant: bool, + #[prost(enumeration = "GrantTargetType", tag = "2")] + pub targtype: i32, + #[prost(enumeration = "ObjectType", tag = "3")] + pub objtype: i32, + #[prost(message, repeated, tag = "4")] + pub objects: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub privileges: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub grantees: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "7")] + pub grant_option: bool, + #[prost(message, optional, tag = "8")] + pub grantor: ::core::option::Option, + #[prost(enumeration = "DropBehavior", tag = "9")] + pub behavior: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ObjectWithArgs { + #[prost(message, repeated, tag = "1")] + pub objname: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub objargs: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub objfuncargs: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "4")] + pub args_unspecified: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccessPriv { + #[prost(string, tag = "1")] + pub priv_name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub cols: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GrantRoleStmt { + #[prost(message, repeated, tag = "1")] + pub granted_roles: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub grantee_roles: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "3")] + pub is_grant: bool, + #[prost(message, repeated, tag = "4")] + pub opt: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "5")] + pub grantor: ::core::option::Option, + #[prost(enumeration = "DropBehavior", tag = "6")] + pub behavior: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterDefaultPrivilegesStmt { + #[prost(message, repeated, tag = "1")] + pub options: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub action: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CopyStmt { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(message, optional, boxed, tag = "2")] + pub query: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "3")] + pub attlist: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "4")] + pub is_from: bool, + #[prost(bool, tag = "5")] + pub is_program: bool, + #[prost(string, tag = "6")] + pub filename: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "7")] + pub options: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "8")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VariableSetStmt { + #[prost(enumeration = "VariableSetKind", tag = "1")] + pub kind: i32, + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub args: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "4")] + pub is_local: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VariableShowStmt { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateStmt { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub table_elts: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub inh_relations: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "4")] + pub partbound: ::core::option::Option, + #[prost(message, optional, tag = "5")] + pub partspec: ::core::option::Option, + #[prost(message, optional, tag = "6")] + pub of_typename: ::core::option::Option, + #[prost(message, repeated, tag = "7")] + pub constraints: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "8")] + pub options: ::prost::alloc::vec::Vec, + #[prost(enumeration = "OnCommitAction", tag = "9")] + pub oncommit: i32, + #[prost(string, tag = "10")] + pub tablespacename: ::prost::alloc::string::String, + #[prost(string, tag = "11")] + pub access_method: ::prost::alloc::string::String, + #[prost(bool, tag = "12")] + pub if_not_exists: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Constraint { + #[prost(enumeration = "ConstrType", tag = "1")] + pub contype: i32, + #[prost(string, tag = "2")] + pub conname: ::prost::alloc::string::String, + #[prost(bool, tag = "3")] + pub deferrable: bool, + #[prost(bool, tag = "4")] + pub initdeferred: bool, + #[prost(bool, tag = "5")] + pub skip_validation: bool, + #[prost(bool, tag = "6")] + pub initially_valid: bool, + #[prost(bool, tag = "7")] + pub is_no_inherit: bool, + #[prost(message, optional, boxed, tag = "8")] + pub raw_expr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "9")] + pub cooked_expr: ::prost::alloc::string::String, + #[prost(string, tag = "10")] + pub generated_when: ::prost::alloc::string::String, + #[prost(int32, tag = "11")] + pub inhcount: i32, + #[prost(bool, tag = "12")] + pub nulls_not_distinct: bool, + #[prost(message, repeated, tag = "13")] + pub keys: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "14")] + pub including: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "15")] + pub exclusions: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "16")] + pub options: ::prost::alloc::vec::Vec, + #[prost(string, tag = "17")] + pub indexname: ::prost::alloc::string::String, + #[prost(string, tag = "18")] + pub indexspace: ::prost::alloc::string::String, + #[prost(bool, tag = "19")] + pub reset_default_tblspc: bool, + #[prost(string, tag = "20")] + pub access_method: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "21")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "22")] + pub pktable: ::core::option::Option, + #[prost(message, repeated, tag = "23")] + pub fk_attrs: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "24")] + pub pk_attrs: ::prost::alloc::vec::Vec, + #[prost(string, tag = "25")] + pub fk_matchtype: ::prost::alloc::string::String, + #[prost(string, tag = "26")] + pub fk_upd_action: ::prost::alloc::string::String, + #[prost(string, tag = "27")] + pub fk_del_action: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "28")] + pub fk_del_set_cols: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "29")] + pub old_conpfeqop: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "30")] + pub old_pktable_oid: u32, + #[prost(int32, tag = "31")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateTableSpaceStmt { + #[prost(string, tag = "1")] + pub tablespacename: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub owner: ::core::option::Option, + #[prost(string, tag = "3")] + pub location: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "4")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DropTableSpaceStmt { + #[prost(string, tag = "1")] + pub tablespacename: ::prost::alloc::string::String, + #[prost(bool, tag = "2")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterTableSpaceOptionsStmt { + #[prost(string, tag = "1")] + pub tablespacename: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "3")] + pub is_reset: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterTableMoveAllStmt { + #[prost(string, tag = "1")] + pub orig_tablespacename: ::prost::alloc::string::String, + #[prost(enumeration = "ObjectType", tag = "2")] + pub objtype: i32, + #[prost(message, repeated, tag = "3")] + pub roles: ::prost::alloc::vec::Vec, + #[prost(string, tag = "4")] + pub new_tablespacename: ::prost::alloc::string::String, + #[prost(bool, tag = "5")] + pub nowait: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateExtensionStmt { + #[prost(string, tag = "1")] + pub extname: ::prost::alloc::string::String, + #[prost(bool, tag = "2")] + pub if_not_exists: bool, + #[prost(message, repeated, tag = "3")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterExtensionStmt { + #[prost(string, tag = "1")] + pub extname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterExtensionContentsStmt { + #[prost(string, tag = "1")] + pub extname: ::prost::alloc::string::String, + #[prost(int32, tag = "2")] + pub action: i32, + #[prost(enumeration = "ObjectType", tag = "3")] + pub objtype: i32, + #[prost(message, optional, boxed, tag = "4")] + pub object: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateFdwStmt { + #[prost(string, tag = "1")] + pub fdwname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub func_options: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterFdwStmt { + #[prost(string, tag = "1")] + pub fdwname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub func_options: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateForeignServerStmt { + #[prost(string, tag = "1")] + pub servername: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub servertype: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub version: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub fdwname: ::prost::alloc::string::String, + #[prost(bool, tag = "5")] + pub if_not_exists: bool, + #[prost(message, repeated, tag = "6")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterForeignServerStmt { + #[prost(string, tag = "1")] + pub servername: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub version: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub options: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "4")] + pub has_version: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateForeignTableStmt { + #[prost(message, optional, tag = "1")] + pub base_stmt: ::core::option::Option, + #[prost(string, tag = "2")] + pub servername: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateUserMappingStmt { + #[prost(message, optional, tag = "1")] + pub user: ::core::option::Option, + #[prost(string, tag = "2")] + pub servername: ::prost::alloc::string::String, + #[prost(bool, tag = "3")] + pub if_not_exists: bool, + #[prost(message, repeated, tag = "4")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterUserMappingStmt { + #[prost(message, optional, tag = "1")] + pub user: ::core::option::Option, + #[prost(string, tag = "2")] + pub servername: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DropUserMappingStmt { + #[prost(message, optional, tag = "1")] + pub user: ::core::option::Option, + #[prost(string, tag = "2")] + pub servername: ::prost::alloc::string::String, + #[prost(bool, tag = "3")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ImportForeignSchemaStmt { + #[prost(string, tag = "1")] + pub server_name: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub remote_schema: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub local_schema: ::prost::alloc::string::String, + #[prost(enumeration = "ImportForeignSchemaType", tag = "4")] + pub list_type: i32, + #[prost(message, repeated, tag = "5")] + pub table_list: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreatePolicyStmt { + #[prost(string, tag = "1")] + pub policy_name: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub table: ::core::option::Option, + #[prost(string, tag = "3")] + pub cmd_name: ::prost::alloc::string::String, + #[prost(bool, tag = "4")] + pub permissive: bool, + #[prost(message, repeated, tag = "5")] + pub roles: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "6")] + pub qual: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "7")] + pub with_check: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterPolicyStmt { + #[prost(string, tag = "1")] + pub policy_name: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub table: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub roles: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "4")] + pub qual: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "5")] + pub with_check: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateAmStmt { + #[prost(string, tag = "1")] + pub amname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub handler_name: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub amtype: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateTrigStmt { + #[prost(bool, tag = "1")] + pub replace: bool, + #[prost(bool, tag = "2")] + pub isconstraint: bool, + #[prost(string, tag = "3")] + pub trigname: ::prost::alloc::string::String, + #[prost(message, optional, tag = "4")] + pub relation: ::core::option::Option, + #[prost(message, repeated, tag = "5")] + pub funcname: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub args: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "7")] + pub row: bool, + #[prost(int32, tag = "8")] + pub timing: i32, + #[prost(int32, tag = "9")] + pub events: i32, + #[prost(message, repeated, tag = "10")] + pub columns: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "11")] + pub when_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "12")] + pub transition_rels: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "13")] + pub deferrable: bool, + #[prost(bool, tag = "14")] + pub initdeferred: bool, + #[prost(message, optional, tag = "15")] + pub constrrel: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateEventTrigStmt { + #[prost(string, tag = "1")] + pub trigname: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub eventname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub whenclause: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub funcname: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterEventTrigStmt { + #[prost(string, tag = "1")] + pub trigname: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub tgenabled: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreatePLangStmt { + #[prost(bool, tag = "1")] + pub replace: bool, + #[prost(string, tag = "2")] + pub plname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub plhandler: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub plinline: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub plvalidator: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "6")] + pub pltrusted: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateRoleStmt { + #[prost(enumeration = "RoleStmtType", tag = "1")] + pub stmt_type: i32, + #[prost(string, tag = "2")] + pub role: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterRoleStmt { + #[prost(message, optional, tag = "1")] + pub role: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "3")] + pub action: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterRoleSetStmt { + #[prost(message, optional, tag = "1")] + pub role: ::core::option::Option, + #[prost(string, tag = "2")] + pub database: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub setstmt: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DropRoleStmt { + #[prost(message, repeated, tag = "1")] + pub roles: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "2")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateSeqStmt { + #[prost(message, optional, tag = "1")] + pub sequence: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "3")] + pub owner_id: u32, + #[prost(bool, tag = "4")] + pub for_identity: bool, + #[prost(bool, tag = "5")] + pub if_not_exists: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterSeqStmt { + #[prost(message, optional, tag = "1")] + pub sequence: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "3")] + pub for_identity: bool, + #[prost(bool, tag = "4")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DefineStmt { + #[prost(enumeration = "ObjectType", tag = "1")] + pub kind: i32, + #[prost(bool, tag = "2")] + pub oldstyle: bool, + #[prost(message, repeated, tag = "3")] + pub defnames: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub args: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub definition: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "6")] + pub if_not_exists: bool, + #[prost(bool, tag = "7")] + pub replace: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateDomainStmt { + #[prost(message, repeated, tag = "1")] + pub domainname: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub type_name: ::core::option::Option, + #[prost(message, optional, boxed, tag = "3")] + pub coll_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "4")] + pub constraints: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateOpClassStmt { + #[prost(message, repeated, tag = "1")] + pub opclassname: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub opfamilyname: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub amname: ::prost::alloc::string::String, + #[prost(message, optional, tag = "4")] + pub datatype: ::core::option::Option, + #[prost(message, repeated, tag = "5")] + pub items: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "6")] + pub is_default: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateOpClassItem { + #[prost(int32, tag = "1")] + pub itemtype: i32, + #[prost(message, optional, tag = "2")] + pub name: ::core::option::Option, + #[prost(int32, tag = "3")] + pub number: i32, + #[prost(message, repeated, tag = "4")] + pub order_family: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub class_args: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "6")] + pub storedtype: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateOpFamilyStmt { + #[prost(message, repeated, tag = "1")] + pub opfamilyname: ::prost::alloc::vec::Vec, + #[prost(string, tag = "2")] + pub amname: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterOpFamilyStmt { + #[prost(message, repeated, tag = "1")] + pub opfamilyname: ::prost::alloc::vec::Vec, + #[prost(string, tag = "2")] + pub amname: ::prost::alloc::string::String, + #[prost(bool, tag = "3")] + pub is_drop: bool, + #[prost(message, repeated, tag = "4")] + pub items: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DropStmt { + #[prost(message, repeated, tag = "1")] + pub objects: ::prost::alloc::vec::Vec, + #[prost(enumeration = "ObjectType", tag = "2")] + pub remove_type: i32, + #[prost(enumeration = "DropBehavior", tag = "3")] + pub behavior: i32, + #[prost(bool, tag = "4")] + pub missing_ok: bool, + #[prost(bool, tag = "5")] + pub concurrent: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TruncateStmt { + #[prost(message, repeated, tag = "1")] + pub relations: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "2")] + pub restart_seqs: bool, + #[prost(enumeration = "DropBehavior", tag = "3")] + pub behavior: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CommentStmt { + #[prost(enumeration = "ObjectType", tag = "1")] + pub objtype: i32, + #[prost(message, optional, boxed, tag = "2")] + pub object: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "3")] + pub comment: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SecLabelStmt { + #[prost(enumeration = "ObjectType", tag = "1")] + pub objtype: i32, + #[prost(message, optional, boxed, tag = "2")] + pub object: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "3")] + pub provider: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub label: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeclareCursorStmt { + #[prost(string, tag = "1")] + pub portalname: ::prost::alloc::string::String, + #[prost(int32, tag = "2")] + pub options: i32, + #[prost(message, optional, boxed, tag = "3")] + pub query: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClosePortalStmt { + #[prost(string, tag = "1")] + pub portalname: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FetchStmt { + #[prost(enumeration = "FetchDirection", tag = "1")] + pub direction: i32, + #[prost(int64, tag = "2")] + pub how_many: i64, + #[prost(string, tag = "3")] + pub portalname: ::prost::alloc::string::String, + #[prost(bool, tag = "4")] + pub ismove: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IndexStmt { + #[prost(string, tag = "1")] + pub idxname: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub relation: ::core::option::Option, + #[prost(string, tag = "3")] + pub access_method: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub table_space: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "5")] + pub index_params: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub index_including_params: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "7")] + pub options: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "8")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "9")] + pub exclude_op_names: ::prost::alloc::vec::Vec, + #[prost(string, tag = "10")] + pub idxcomment: ::prost::alloc::string::String, + #[prost(uint32, tag = "11")] + pub index_oid: u32, + #[prost(uint32, tag = "12")] + pub old_number: u32, + #[prost(uint32, tag = "13")] + pub old_create_subid: u32, + #[prost(uint32, tag = "14")] + pub old_first_relfilelocator_subid: u32, + #[prost(bool, tag = "15")] + pub unique: bool, + #[prost(bool, tag = "16")] + pub nulls_not_distinct: bool, + #[prost(bool, tag = "17")] + pub primary: bool, + #[prost(bool, tag = "18")] + pub isconstraint: bool, + #[prost(bool, tag = "19")] + pub deferrable: bool, + #[prost(bool, tag = "20")] + pub initdeferred: bool, + #[prost(bool, tag = "21")] + pub transformed: bool, + #[prost(bool, tag = "22")] + pub concurrent: bool, + #[prost(bool, tag = "23")] + pub if_not_exists: bool, + #[prost(bool, tag = "24")] + pub reset_default_tblspc: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateStatsStmt { + #[prost(message, repeated, tag = "1")] + pub defnames: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub stat_types: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub exprs: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub relations: ::prost::alloc::vec::Vec, + #[prost(string, tag = "5")] + pub stxcomment: ::prost::alloc::string::String, + #[prost(bool, tag = "6")] + pub transformed: bool, + #[prost(bool, tag = "7")] + pub if_not_exists: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatsElem { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "2")] + pub expr: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterStatsStmt { + #[prost(message, repeated, tag = "1")] + pub defnames: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "2")] + pub stxstattarget: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "3")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateFunctionStmt { + #[prost(bool, tag = "1")] + pub is_procedure: bool, + #[prost(bool, tag = "2")] + pub replace: bool, + #[prost(message, repeated, tag = "3")] + pub funcname: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub parameters: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "5")] + pub return_type: ::core::option::Option, + #[prost(message, repeated, tag = "6")] + pub options: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "7")] + pub sql_body: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FunctionParameter { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub arg_type: ::core::option::Option, + #[prost(enumeration = "FunctionParameterMode", tag = "3")] + pub mode: i32, + #[prost(message, optional, boxed, tag = "4")] + pub defexpr: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterFunctionStmt { + #[prost(enumeration = "ObjectType", tag = "1")] + pub objtype: i32, + #[prost(message, optional, tag = "2")] + pub func: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub actions: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DoStmt { + #[prost(message, repeated, tag = "1")] + pub args: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InlineCodeBlock { + #[prost(string, tag = "1")] + pub source_text: ::prost::alloc::string::String, + #[prost(uint32, tag = "2")] + pub lang_oid: u32, + #[prost(bool, tag = "3")] + pub lang_is_trusted: bool, + #[prost(bool, tag = "4")] + pub atomic: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CallStmt { + #[prost(message, optional, boxed, tag = "1")] + pub funccall: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub funcexpr: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "3")] + pub outargs: ::prost::alloc::vec::Vec, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct CallContext { + #[prost(bool, tag = "1")] + pub atomic: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RenameStmt { + #[prost(enumeration = "ObjectType", tag = "1")] + pub rename_type: i32, + #[prost(enumeration = "ObjectType", tag = "2")] + pub relation_type: i32, + #[prost(message, optional, tag = "3")] + pub relation: ::core::option::Option, + #[prost(message, optional, boxed, tag = "4")] + pub object: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "5")] + pub subname: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub newname: ::prost::alloc::string::String, + #[prost(enumeration = "DropBehavior", tag = "7")] + pub behavior: i32, + #[prost(bool, tag = "8")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterObjectDependsStmt { + #[prost(enumeration = "ObjectType", tag = "1")] + pub object_type: i32, + #[prost(message, optional, tag = "2")] + pub relation: ::core::option::Option, + #[prost(message, optional, boxed, tag = "3")] + pub object: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "4")] + pub extname: ::core::option::Option, + #[prost(bool, tag = "5")] + pub remove: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterObjectSchemaStmt { + #[prost(enumeration = "ObjectType", tag = "1")] + pub object_type: i32, + #[prost(message, optional, tag = "2")] + pub relation: ::core::option::Option, + #[prost(message, optional, boxed, tag = "3")] + pub object: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(string, tag = "4")] + pub newschema: ::prost::alloc::string::String, + #[prost(bool, tag = "5")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterOwnerStmt { + #[prost(enumeration = "ObjectType", tag = "1")] + pub object_type: i32, + #[prost(message, optional, tag = "2")] + pub relation: ::core::option::Option, + #[prost(message, optional, boxed, tag = "3")] + pub object: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "4")] + pub newowner: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterOperatorStmt { + #[prost(message, optional, tag = "1")] + pub opername: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterTypeStmt { + #[prost(message, repeated, tag = "1")] + pub type_name: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RuleStmt { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(string, tag = "2")] + pub rulename: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "3")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "CmdType", tag = "4")] + pub event: i32, + #[prost(bool, tag = "5")] + pub instead: bool, + #[prost(message, repeated, tag = "6")] + pub actions: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "7")] + pub replace: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NotifyStmt { + #[prost(string, tag = "1")] + pub conditionname: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub payload: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListenStmt { + #[prost(string, tag = "1")] + pub conditionname: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UnlistenStmt { + #[prost(string, tag = "1")] + pub conditionname: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionStmt { + #[prost(enumeration = "TransactionStmtKind", tag = "1")] + pub kind: i32, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub savepoint_name: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub gid: ::prost::alloc::string::String, + #[prost(bool, tag = "5")] + pub chain: bool, + #[prost(int32, tag = "6")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompositeTypeStmt { + #[prost(message, optional, tag = "1")] + pub typevar: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub coldeflist: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateEnumStmt { + #[prost(message, repeated, tag = "1")] + pub type_name: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub vals: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateRangeStmt { + #[prost(message, repeated, tag = "1")] + pub type_name: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub params: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterEnumStmt { + #[prost(message, repeated, tag = "1")] + pub type_name: ::prost::alloc::vec::Vec, + #[prost(string, tag = "2")] + pub old_val: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub new_val: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub new_val_neighbor: ::prost::alloc::string::String, + #[prost(bool, tag = "5")] + pub new_val_is_after: bool, + #[prost(bool, tag = "6")] + pub skip_if_new_val_exists: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ViewStmt { + #[prost(message, optional, tag = "1")] + pub view: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub aliases: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "3")] + pub query: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(bool, tag = "4")] + pub replace: bool, + #[prost(message, repeated, tag = "5")] + pub options: ::prost::alloc::vec::Vec, + #[prost(enumeration = "ViewCheckOption", tag = "6")] + pub with_check_option: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LoadStmt { + #[prost(string, tag = "1")] + pub filename: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreatedbStmt { + #[prost(string, tag = "1")] + pub dbname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterDatabaseStmt { + #[prost(string, tag = "1")] + pub dbname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterDatabaseRefreshCollStmt { + #[prost(string, tag = "1")] + pub dbname: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterDatabaseSetStmt { + #[prost(string, tag = "1")] + pub dbname: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub setstmt: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DropdbStmt { + #[prost(string, tag = "1")] + pub dbname: ::prost::alloc::string::String, + #[prost(bool, tag = "2")] + pub missing_ok: bool, + #[prost(message, repeated, tag = "3")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterSystemStmt { + #[prost(message, optional, tag = "1")] + pub setstmt: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClusterStmt { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(string, tag = "2")] + pub indexname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub params: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VacuumStmt { + #[prost(message, repeated, tag = "1")] + pub options: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub rels: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "3")] + pub is_vacuumcmd: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VacuumRelation { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(uint32, tag = "2")] + pub oid: u32, + #[prost(message, repeated, tag = "3")] + pub va_cols: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExplainStmt { + #[prost(message, optional, boxed, tag = "1")] + pub query: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateTableAsStmt { + #[prost(message, optional, boxed, tag = "1")] + pub query: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, boxed, tag = "2")] + pub into: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(enumeration = "ObjectType", tag = "3")] + pub objtype: i32, + #[prost(bool, tag = "4")] + pub is_select_into: bool, + #[prost(bool, tag = "5")] + pub if_not_exists: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RefreshMatViewStmt { + #[prost(bool, tag = "1")] + pub concurrent: bool, + #[prost(bool, tag = "2")] + pub skip_data: bool, + #[prost(message, optional, tag = "3")] + pub relation: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct CheckPointStmt {} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct DiscardStmt { + #[prost(enumeration = "DiscardMode", tag = "1")] + pub target: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LockStmt { + #[prost(message, repeated, tag = "1")] + pub relations: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "2")] + pub mode: i32, + #[prost(bool, tag = "3")] + pub nowait: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConstraintsSetStmt { + #[prost(message, repeated, tag = "1")] + pub constraints: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "2")] + pub deferred: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReindexStmt { + #[prost(enumeration = "ReindexObjectType", tag = "1")] + pub kind: i32, + #[prost(message, optional, tag = "2")] + pub relation: ::core::option::Option, + #[prost(string, tag = "3")] + pub name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "4")] + pub params: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateConversionStmt { + #[prost(message, repeated, tag = "1")] + pub conversion_name: ::prost::alloc::vec::Vec, + #[prost(string, tag = "2")] + pub for_encoding_name: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub to_encoding_name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "4")] + pub func_name: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "5")] + pub def: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateCastStmt { + #[prost(message, optional, tag = "1")] + pub sourcetype: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub targettype: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub func: ::core::option::Option, + #[prost(enumeration = "CoercionContext", tag = "4")] + pub context: i32, + #[prost(bool, tag = "5")] + pub inout: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateTransformStmt { + #[prost(bool, tag = "1")] + pub replace: bool, + #[prost(message, optional, tag = "2")] + pub type_name: ::core::option::Option, + #[prost(string, tag = "3")] + pub lang: ::prost::alloc::string::String, + #[prost(message, optional, tag = "4")] + pub fromsql: ::core::option::Option, + #[prost(message, optional, tag = "5")] + pub tosql: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PrepareStmt { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub argtypes: ::prost::alloc::vec::Vec, + #[prost(message, optional, boxed, tag = "3")] + pub query: ::core::option::Option<::prost::alloc::boxed::Box>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteStmt { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub params: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeallocateStmt { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(bool, tag = "2")] + pub isall: bool, + #[prost(int32, tag = "3")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DropOwnedStmt { + #[prost(message, repeated, tag = "1")] + pub roles: ::prost::alloc::vec::Vec, + #[prost(enumeration = "DropBehavior", tag = "2")] + pub behavior: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReassignOwnedStmt { + #[prost(message, repeated, tag = "1")] + pub roles: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub newrole: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterTsDictionaryStmt { + #[prost(message, repeated, tag = "1")] + pub dictname: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterTsConfigurationStmt { + #[prost(enumeration = "AlterTsConfigType", tag = "1")] + pub kind: i32, + #[prost(message, repeated, tag = "2")] + pub cfgname: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub tokentype: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub dicts: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "5")] + pub r#override: bool, + #[prost(bool, tag = "6")] + pub replace: bool, + #[prost(bool, tag = "7")] + pub missing_ok: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PublicationTable { + #[prost(message, optional, tag = "1")] + pub relation: ::core::option::Option, + #[prost(message, optional, boxed, tag = "2")] + pub where_clause: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, repeated, tag = "3")] + pub columns: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PublicationObjSpec { + #[prost(enumeration = "PublicationObjSpecType", tag = "1")] + pub pubobjtype: i32, + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, boxed, tag = "3")] + pub pubtable: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(int32, tag = "4")] + pub location: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreatePublicationStmt { + #[prost(string, tag = "1")] + pub pubname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub pubobjects: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "4")] + pub for_all_tables: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterPublicationStmt { + #[prost(string, tag = "1")] + pub pubname: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub options: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub pubobjects: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "4")] + pub for_all_tables: bool, + #[prost(enumeration = "AlterPublicationAction", tag = "5")] + pub action: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateSubscriptionStmt { + #[prost(string, tag = "1")] + pub subname: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub conninfo: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub publication: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AlterSubscriptionStmt { + #[prost(enumeration = "AlterSubscriptionType", tag = "1")] + pub kind: i32, + #[prost(string, tag = "2")] + pub subname: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub conninfo: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "4")] + pub publication: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub options: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DropSubscriptionStmt { + #[prost(string, tag = "1")] + pub subname: ::prost::alloc::string::String, + #[prost(bool, tag = "2")] + pub missing_ok: bool, + #[prost(enumeration = "DropBehavior", tag = "3")] + pub behavior: i32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ScanToken { + #[prost(int32, tag = "1")] + pub start: i32, + #[prost(int32, tag = "2")] + pub end: i32, + #[prost(enumeration = "Token", tag = "4")] + pub token: i32, + #[prost(enumeration = "KeywordKind", tag = "5")] + pub keyword_kind: i32, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum QuerySource { + Undefined = 0, + QsrcOriginal = 1, + QsrcParser = 2, + QsrcInsteadRule = 3, + QsrcQualInsteadRule = 4, + QsrcNonInsteadRule = 5, +} +impl QuerySource { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "QUERY_SOURCE_UNDEFINED", + Self::QsrcOriginal => "QSRC_ORIGINAL", + Self::QsrcParser => "QSRC_PARSER", + Self::QsrcInsteadRule => "QSRC_INSTEAD_RULE", + Self::QsrcQualInsteadRule => "QSRC_QUAL_INSTEAD_RULE", + Self::QsrcNonInsteadRule => "QSRC_NON_INSTEAD_RULE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "QUERY_SOURCE_UNDEFINED" => Some(Self::Undefined), + "QSRC_ORIGINAL" => Some(Self::QsrcOriginal), + "QSRC_PARSER" => Some(Self::QsrcParser), + "QSRC_INSTEAD_RULE" => Some(Self::QsrcInsteadRule), + "QSRC_QUAL_INSTEAD_RULE" => Some(Self::QsrcQualInsteadRule), + "QSRC_NON_INSTEAD_RULE" => Some(Self::QsrcNonInsteadRule), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SortByDir { + Undefined = 0, + SortbyDefault = 1, + SortbyAsc = 2, + SortbyDesc = 3, + SortbyUsing = 4, +} +impl SortByDir { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "SORT_BY_DIR_UNDEFINED", + Self::SortbyDefault => "SORTBY_DEFAULT", + Self::SortbyAsc => "SORTBY_ASC", + Self::SortbyDesc => "SORTBY_DESC", + Self::SortbyUsing => "SORTBY_USING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SORT_BY_DIR_UNDEFINED" => Some(Self::Undefined), + "SORTBY_DEFAULT" => Some(Self::SortbyDefault), + "SORTBY_ASC" => Some(Self::SortbyAsc), + "SORTBY_DESC" => Some(Self::SortbyDesc), + "SORTBY_USING" => Some(Self::SortbyUsing), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SortByNulls { + Undefined = 0, + SortbyNullsDefault = 1, + SortbyNullsFirst = 2, + SortbyNullsLast = 3, +} +impl SortByNulls { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "SORT_BY_NULLS_UNDEFINED", + Self::SortbyNullsDefault => "SORTBY_NULLS_DEFAULT", + Self::SortbyNullsFirst => "SORTBY_NULLS_FIRST", + Self::SortbyNullsLast => "SORTBY_NULLS_LAST", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SORT_BY_NULLS_UNDEFINED" => Some(Self::Undefined), + "SORTBY_NULLS_DEFAULT" => Some(Self::SortbyNullsDefault), + "SORTBY_NULLS_FIRST" => Some(Self::SortbyNullsFirst), + "SORTBY_NULLS_LAST" => Some(Self::SortbyNullsLast), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SetQuantifier { + Undefined = 0, + Default = 1, + All = 2, + Distinct = 3, +} +impl SetQuantifier { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "SET_QUANTIFIER_UNDEFINED", + Self::Default => "SET_QUANTIFIER_DEFAULT", + Self::All => "SET_QUANTIFIER_ALL", + Self::Distinct => "SET_QUANTIFIER_DISTINCT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SET_QUANTIFIER_UNDEFINED" => Some(Self::Undefined), + "SET_QUANTIFIER_DEFAULT" => Some(Self::Default), + "SET_QUANTIFIER_ALL" => Some(Self::All), + "SET_QUANTIFIER_DISTINCT" => Some(Self::Distinct), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AExprKind { + Undefined = 0, + AexprOp = 1, + AexprOpAny = 2, + AexprOpAll = 3, + AexprDistinct = 4, + AexprNotDistinct = 5, + AexprNullif = 6, + AexprIn = 7, + AexprLike = 8, + AexprIlike = 9, + AexprSimilar = 10, + AexprBetween = 11, + AexprNotBetween = 12, + AexprBetweenSym = 13, + AexprNotBetweenSym = 14, +} +impl AExprKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "A_EXPR_KIND_UNDEFINED", + Self::AexprOp => "AEXPR_OP", + Self::AexprOpAny => "AEXPR_OP_ANY", + Self::AexprOpAll => "AEXPR_OP_ALL", + Self::AexprDistinct => "AEXPR_DISTINCT", + Self::AexprNotDistinct => "AEXPR_NOT_DISTINCT", + Self::AexprNullif => "AEXPR_NULLIF", + Self::AexprIn => "AEXPR_IN", + Self::AexprLike => "AEXPR_LIKE", + Self::AexprIlike => "AEXPR_ILIKE", + Self::AexprSimilar => "AEXPR_SIMILAR", + Self::AexprBetween => "AEXPR_BETWEEN", + Self::AexprNotBetween => "AEXPR_NOT_BETWEEN", + Self::AexprBetweenSym => "AEXPR_BETWEEN_SYM", + Self::AexprNotBetweenSym => "AEXPR_NOT_BETWEEN_SYM", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "A_EXPR_KIND_UNDEFINED" => Some(Self::Undefined), + "AEXPR_OP" => Some(Self::AexprOp), + "AEXPR_OP_ANY" => Some(Self::AexprOpAny), + "AEXPR_OP_ALL" => Some(Self::AexprOpAll), + "AEXPR_DISTINCT" => Some(Self::AexprDistinct), + "AEXPR_NOT_DISTINCT" => Some(Self::AexprNotDistinct), + "AEXPR_NULLIF" => Some(Self::AexprNullif), + "AEXPR_IN" => Some(Self::AexprIn), + "AEXPR_LIKE" => Some(Self::AexprLike), + "AEXPR_ILIKE" => Some(Self::AexprIlike), + "AEXPR_SIMILAR" => Some(Self::AexprSimilar), + "AEXPR_BETWEEN" => Some(Self::AexprBetween), + "AEXPR_NOT_BETWEEN" => Some(Self::AexprNotBetween), + "AEXPR_BETWEEN_SYM" => Some(Self::AexprBetweenSym), + "AEXPR_NOT_BETWEEN_SYM" => Some(Self::AexprNotBetweenSym), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum RoleSpecType { + Undefined = 0, + RolespecCstring = 1, + RolespecCurrentRole = 2, + RolespecCurrentUser = 3, + RolespecSessionUser = 4, + RolespecPublic = 5, +} +impl RoleSpecType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "ROLE_SPEC_TYPE_UNDEFINED", + Self::RolespecCstring => "ROLESPEC_CSTRING", + Self::RolespecCurrentRole => "ROLESPEC_CURRENT_ROLE", + Self::RolespecCurrentUser => "ROLESPEC_CURRENT_USER", + Self::RolespecSessionUser => "ROLESPEC_SESSION_USER", + Self::RolespecPublic => "ROLESPEC_PUBLIC", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ROLE_SPEC_TYPE_UNDEFINED" => Some(Self::Undefined), + "ROLESPEC_CSTRING" => Some(Self::RolespecCstring), + "ROLESPEC_CURRENT_ROLE" => Some(Self::RolespecCurrentRole), + "ROLESPEC_CURRENT_USER" => Some(Self::RolespecCurrentUser), + "ROLESPEC_SESSION_USER" => Some(Self::RolespecSessionUser), + "ROLESPEC_PUBLIC" => Some(Self::RolespecPublic), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum TableLikeOption { + Undefined = 0, + CreateTableLikeComments = 1, + CreateTableLikeCompression = 2, + CreateTableLikeConstraints = 3, + CreateTableLikeDefaults = 4, + CreateTableLikeGenerated = 5, + CreateTableLikeIdentity = 6, + CreateTableLikeIndexes = 7, + CreateTableLikeStatistics = 8, + CreateTableLikeStorage = 9, + CreateTableLikeAll = 10, +} +impl TableLikeOption { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "TABLE_LIKE_OPTION_UNDEFINED", + Self::CreateTableLikeComments => "CREATE_TABLE_LIKE_COMMENTS", + Self::CreateTableLikeCompression => "CREATE_TABLE_LIKE_COMPRESSION", + Self::CreateTableLikeConstraints => "CREATE_TABLE_LIKE_CONSTRAINTS", + Self::CreateTableLikeDefaults => "CREATE_TABLE_LIKE_DEFAULTS", + Self::CreateTableLikeGenerated => "CREATE_TABLE_LIKE_GENERATED", + Self::CreateTableLikeIdentity => "CREATE_TABLE_LIKE_IDENTITY", + Self::CreateTableLikeIndexes => "CREATE_TABLE_LIKE_INDEXES", + Self::CreateTableLikeStatistics => "CREATE_TABLE_LIKE_STATISTICS", + Self::CreateTableLikeStorage => "CREATE_TABLE_LIKE_STORAGE", + Self::CreateTableLikeAll => "CREATE_TABLE_LIKE_ALL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TABLE_LIKE_OPTION_UNDEFINED" => Some(Self::Undefined), + "CREATE_TABLE_LIKE_COMMENTS" => Some(Self::CreateTableLikeComments), + "CREATE_TABLE_LIKE_COMPRESSION" => Some(Self::CreateTableLikeCompression), + "CREATE_TABLE_LIKE_CONSTRAINTS" => Some(Self::CreateTableLikeConstraints), + "CREATE_TABLE_LIKE_DEFAULTS" => Some(Self::CreateTableLikeDefaults), + "CREATE_TABLE_LIKE_GENERATED" => Some(Self::CreateTableLikeGenerated), + "CREATE_TABLE_LIKE_IDENTITY" => Some(Self::CreateTableLikeIdentity), + "CREATE_TABLE_LIKE_INDEXES" => Some(Self::CreateTableLikeIndexes), + "CREATE_TABLE_LIKE_STATISTICS" => Some(Self::CreateTableLikeStatistics), + "CREATE_TABLE_LIKE_STORAGE" => Some(Self::CreateTableLikeStorage), + "CREATE_TABLE_LIKE_ALL" => Some(Self::CreateTableLikeAll), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum DefElemAction { + Undefined = 0, + DefelemUnspec = 1, + DefelemSet = 2, + DefelemAdd = 3, + DefelemDrop = 4, +} +impl DefElemAction { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "DEF_ELEM_ACTION_UNDEFINED", + Self::DefelemUnspec => "DEFELEM_UNSPEC", + Self::DefelemSet => "DEFELEM_SET", + Self::DefelemAdd => "DEFELEM_ADD", + Self::DefelemDrop => "DEFELEM_DROP", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DEF_ELEM_ACTION_UNDEFINED" => Some(Self::Undefined), + "DEFELEM_UNSPEC" => Some(Self::DefelemUnspec), + "DEFELEM_SET" => Some(Self::DefelemSet), + "DEFELEM_ADD" => Some(Self::DefelemAdd), + "DEFELEM_DROP" => Some(Self::DefelemDrop), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum PartitionStrategy { + Undefined = 0, + List = 1, + Range = 2, + Hash = 3, +} +impl PartitionStrategy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "PARTITION_STRATEGY_UNDEFINED", + Self::List => "PARTITION_STRATEGY_LIST", + Self::Range => "PARTITION_STRATEGY_RANGE", + Self::Hash => "PARTITION_STRATEGY_HASH", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PARTITION_STRATEGY_UNDEFINED" => Some(Self::Undefined), + "PARTITION_STRATEGY_LIST" => Some(Self::List), + "PARTITION_STRATEGY_RANGE" => Some(Self::Range), + "PARTITION_STRATEGY_HASH" => Some(Self::Hash), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum PartitionRangeDatumKind { + Undefined = 0, + PartitionRangeDatumMinvalue = 1, + PartitionRangeDatumValue = 2, + PartitionRangeDatumMaxvalue = 3, +} +impl PartitionRangeDatumKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "PARTITION_RANGE_DATUM_KIND_UNDEFINED", + Self::PartitionRangeDatumMinvalue => "PARTITION_RANGE_DATUM_MINVALUE", + Self::PartitionRangeDatumValue => "PARTITION_RANGE_DATUM_VALUE", + Self::PartitionRangeDatumMaxvalue => "PARTITION_RANGE_DATUM_MAXVALUE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PARTITION_RANGE_DATUM_KIND_UNDEFINED" => Some(Self::Undefined), + "PARTITION_RANGE_DATUM_MINVALUE" => Some(Self::PartitionRangeDatumMinvalue), + "PARTITION_RANGE_DATUM_VALUE" => Some(Self::PartitionRangeDatumValue), + "PARTITION_RANGE_DATUM_MAXVALUE" => Some(Self::PartitionRangeDatumMaxvalue), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum RteKind { + RtekindUndefined = 0, + RteRelation = 1, + RteSubquery = 2, + RteJoin = 3, + RteFunction = 4, + RteTablefunc = 5, + RteValues = 6, + RteCte = 7, + RteNamedtuplestore = 8, + RteResult = 9, +} +impl RteKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::RtekindUndefined => "RTEKIND_UNDEFINED", + Self::RteRelation => "RTE_RELATION", + Self::RteSubquery => "RTE_SUBQUERY", + Self::RteJoin => "RTE_JOIN", + Self::RteFunction => "RTE_FUNCTION", + Self::RteTablefunc => "RTE_TABLEFUNC", + Self::RteValues => "RTE_VALUES", + Self::RteCte => "RTE_CTE", + Self::RteNamedtuplestore => "RTE_NAMEDTUPLESTORE", + Self::RteResult => "RTE_RESULT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "RTEKIND_UNDEFINED" => Some(Self::RtekindUndefined), + "RTE_RELATION" => Some(Self::RteRelation), + "RTE_SUBQUERY" => Some(Self::RteSubquery), + "RTE_JOIN" => Some(Self::RteJoin), + "RTE_FUNCTION" => Some(Self::RteFunction), + "RTE_TABLEFUNC" => Some(Self::RteTablefunc), + "RTE_VALUES" => Some(Self::RteValues), + "RTE_CTE" => Some(Self::RteCte), + "RTE_NAMEDTUPLESTORE" => Some(Self::RteNamedtuplestore), + "RTE_RESULT" => Some(Self::RteResult), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum WcoKind { + WcokindUndefined = 0, + WcoViewCheck = 1, + WcoRlsInsertCheck = 2, + WcoRlsUpdateCheck = 3, + WcoRlsConflictCheck = 4, + WcoRlsMergeUpdateCheck = 5, + WcoRlsMergeDeleteCheck = 6, +} +impl WcoKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::WcokindUndefined => "WCOKIND_UNDEFINED", + Self::WcoViewCheck => "WCO_VIEW_CHECK", + Self::WcoRlsInsertCheck => "WCO_RLS_INSERT_CHECK", + Self::WcoRlsUpdateCheck => "WCO_RLS_UPDATE_CHECK", + Self::WcoRlsConflictCheck => "WCO_RLS_CONFLICT_CHECK", + Self::WcoRlsMergeUpdateCheck => "WCO_RLS_MERGE_UPDATE_CHECK", + Self::WcoRlsMergeDeleteCheck => "WCO_RLS_MERGE_DELETE_CHECK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "WCOKIND_UNDEFINED" => Some(Self::WcokindUndefined), + "WCO_VIEW_CHECK" => Some(Self::WcoViewCheck), + "WCO_RLS_INSERT_CHECK" => Some(Self::WcoRlsInsertCheck), + "WCO_RLS_UPDATE_CHECK" => Some(Self::WcoRlsUpdateCheck), + "WCO_RLS_CONFLICT_CHECK" => Some(Self::WcoRlsConflictCheck), + "WCO_RLS_MERGE_UPDATE_CHECK" => Some(Self::WcoRlsMergeUpdateCheck), + "WCO_RLS_MERGE_DELETE_CHECK" => Some(Self::WcoRlsMergeDeleteCheck), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum GroupingSetKind { + Undefined = 0, + GroupingSetEmpty = 1, + GroupingSetSimple = 2, + GroupingSetRollup = 3, + GroupingSetCube = 4, + GroupingSetSets = 5, +} +impl GroupingSetKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "GROUPING_SET_KIND_UNDEFINED", + Self::GroupingSetEmpty => "GROUPING_SET_EMPTY", + Self::GroupingSetSimple => "GROUPING_SET_SIMPLE", + Self::GroupingSetRollup => "GROUPING_SET_ROLLUP", + Self::GroupingSetCube => "GROUPING_SET_CUBE", + Self::GroupingSetSets => "GROUPING_SET_SETS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "GROUPING_SET_KIND_UNDEFINED" => Some(Self::Undefined), + "GROUPING_SET_EMPTY" => Some(Self::GroupingSetEmpty), + "GROUPING_SET_SIMPLE" => Some(Self::GroupingSetSimple), + "GROUPING_SET_ROLLUP" => Some(Self::GroupingSetRollup), + "GROUPING_SET_CUBE" => Some(Self::GroupingSetCube), + "GROUPING_SET_SETS" => Some(Self::GroupingSetSets), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum CteMaterialize { + CtematerializeUndefined = 0, + Default = 1, + Always = 2, + Never = 3, +} +impl CteMaterialize { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::CtematerializeUndefined => "CTEMATERIALIZE_UNDEFINED", + Self::Default => "CTEMaterializeDefault", + Self::Always => "CTEMaterializeAlways", + Self::Never => "CTEMaterializeNever", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CTEMATERIALIZE_UNDEFINED" => Some(Self::CtematerializeUndefined), + "CTEMaterializeDefault" => Some(Self::Default), + "CTEMaterializeAlways" => Some(Self::Always), + "CTEMaterializeNever" => Some(Self::Never), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JsonQuotes { + Undefined = 0, + JsQuotesUnspec = 1, + JsQuotesKeep = 2, + JsQuotesOmit = 3, +} +impl JsonQuotes { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JSON_QUOTES_UNDEFINED", + Self::JsQuotesUnspec => "JS_QUOTES_UNSPEC", + Self::JsQuotesKeep => "JS_QUOTES_KEEP", + Self::JsQuotesOmit => "JS_QUOTES_OMIT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON_QUOTES_UNDEFINED" => Some(Self::Undefined), + "JS_QUOTES_UNSPEC" => Some(Self::JsQuotesUnspec), + "JS_QUOTES_KEEP" => Some(Self::JsQuotesKeep), + "JS_QUOTES_OMIT" => Some(Self::JsQuotesOmit), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JsonTableColumnType { + Undefined = 0, + JtcForOrdinality = 1, + JtcRegular = 2, + JtcExists = 3, + JtcFormatted = 4, + JtcNested = 5, +} +impl JsonTableColumnType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JSON_TABLE_COLUMN_TYPE_UNDEFINED", + Self::JtcForOrdinality => "JTC_FOR_ORDINALITY", + Self::JtcRegular => "JTC_REGULAR", + Self::JtcExists => "JTC_EXISTS", + Self::JtcFormatted => "JTC_FORMATTED", + Self::JtcNested => "JTC_NESTED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON_TABLE_COLUMN_TYPE_UNDEFINED" => Some(Self::Undefined), + "JTC_FOR_ORDINALITY" => Some(Self::JtcForOrdinality), + "JTC_REGULAR" => Some(Self::JtcRegular), + "JTC_EXISTS" => Some(Self::JtcExists), + "JTC_FORMATTED" => Some(Self::JtcFormatted), + "JTC_NESTED" => Some(Self::JtcNested), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SetOperation { + Undefined = 0, + SetopNone = 1, + SetopUnion = 2, + SetopIntersect = 3, + SetopExcept = 4, +} +impl SetOperation { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "SET_OPERATION_UNDEFINED", + Self::SetopNone => "SETOP_NONE", + Self::SetopUnion => "SETOP_UNION", + Self::SetopIntersect => "SETOP_INTERSECT", + Self::SetopExcept => "SETOP_EXCEPT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SET_OPERATION_UNDEFINED" => Some(Self::Undefined), + "SETOP_NONE" => Some(Self::SetopNone), + "SETOP_UNION" => Some(Self::SetopUnion), + "SETOP_INTERSECT" => Some(Self::SetopIntersect), + "SETOP_EXCEPT" => Some(Self::SetopExcept), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ObjectType { + Undefined = 0, + ObjectAccessMethod = 1, + ObjectAggregate = 2, + ObjectAmop = 3, + ObjectAmproc = 4, + ObjectAttribute = 5, + ObjectCast = 6, + ObjectColumn = 7, + ObjectCollation = 8, + ObjectConversion = 9, + ObjectDatabase = 10, + ObjectDefault = 11, + ObjectDefacl = 12, + ObjectDomain = 13, + ObjectDomconstraint = 14, + ObjectEventTrigger = 15, + ObjectExtension = 16, + ObjectFdw = 17, + ObjectForeignServer = 18, + ObjectForeignTable = 19, + ObjectFunction = 20, + ObjectIndex = 21, + ObjectLanguage = 22, + ObjectLargeobject = 23, + ObjectMatview = 24, + ObjectOpclass = 25, + ObjectOperator = 26, + ObjectOpfamily = 27, + ObjectParameterAcl = 28, + ObjectPolicy = 29, + ObjectProcedure = 30, + ObjectPublication = 31, + ObjectPublicationNamespace = 32, + ObjectPublicationRel = 33, + ObjectRole = 34, + ObjectRoutine = 35, + ObjectRule = 36, + ObjectSchema = 37, + ObjectSequence = 38, + ObjectSubscription = 39, + ObjectStatisticExt = 40, + ObjectTabconstraint = 41, + ObjectTable = 42, + ObjectTablespace = 43, + ObjectTransform = 44, + ObjectTrigger = 45, + ObjectTsconfiguration = 46, + ObjectTsdictionary = 47, + ObjectTsparser = 48, + ObjectTstemplate = 49, + ObjectType = 50, + ObjectUserMapping = 51, + ObjectView = 52, +} +impl ObjectType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "OBJECT_TYPE_UNDEFINED", + Self::ObjectAccessMethod => "OBJECT_ACCESS_METHOD", + Self::ObjectAggregate => "OBJECT_AGGREGATE", + Self::ObjectAmop => "OBJECT_AMOP", + Self::ObjectAmproc => "OBJECT_AMPROC", + Self::ObjectAttribute => "OBJECT_ATTRIBUTE", + Self::ObjectCast => "OBJECT_CAST", + Self::ObjectColumn => "OBJECT_COLUMN", + Self::ObjectCollation => "OBJECT_COLLATION", + Self::ObjectConversion => "OBJECT_CONVERSION", + Self::ObjectDatabase => "OBJECT_DATABASE", + Self::ObjectDefault => "OBJECT_DEFAULT", + Self::ObjectDefacl => "OBJECT_DEFACL", + Self::ObjectDomain => "OBJECT_DOMAIN", + Self::ObjectDomconstraint => "OBJECT_DOMCONSTRAINT", + Self::ObjectEventTrigger => "OBJECT_EVENT_TRIGGER", + Self::ObjectExtension => "OBJECT_EXTENSION", + Self::ObjectFdw => "OBJECT_FDW", + Self::ObjectForeignServer => "OBJECT_FOREIGN_SERVER", + Self::ObjectForeignTable => "OBJECT_FOREIGN_TABLE", + Self::ObjectFunction => "OBJECT_FUNCTION", + Self::ObjectIndex => "OBJECT_INDEX", + Self::ObjectLanguage => "OBJECT_LANGUAGE", + Self::ObjectLargeobject => "OBJECT_LARGEOBJECT", + Self::ObjectMatview => "OBJECT_MATVIEW", + Self::ObjectOpclass => "OBJECT_OPCLASS", + Self::ObjectOperator => "OBJECT_OPERATOR", + Self::ObjectOpfamily => "OBJECT_OPFAMILY", + Self::ObjectParameterAcl => "OBJECT_PARAMETER_ACL", + Self::ObjectPolicy => "OBJECT_POLICY", + Self::ObjectProcedure => "OBJECT_PROCEDURE", + Self::ObjectPublication => "OBJECT_PUBLICATION", + Self::ObjectPublicationNamespace => "OBJECT_PUBLICATION_NAMESPACE", + Self::ObjectPublicationRel => "OBJECT_PUBLICATION_REL", + Self::ObjectRole => "OBJECT_ROLE", + Self::ObjectRoutine => "OBJECT_ROUTINE", + Self::ObjectRule => "OBJECT_RULE", + Self::ObjectSchema => "OBJECT_SCHEMA", + Self::ObjectSequence => "OBJECT_SEQUENCE", + Self::ObjectSubscription => "OBJECT_SUBSCRIPTION", + Self::ObjectStatisticExt => "OBJECT_STATISTIC_EXT", + Self::ObjectTabconstraint => "OBJECT_TABCONSTRAINT", + Self::ObjectTable => "OBJECT_TABLE", + Self::ObjectTablespace => "OBJECT_TABLESPACE", + Self::ObjectTransform => "OBJECT_TRANSFORM", + Self::ObjectTrigger => "OBJECT_TRIGGER", + Self::ObjectTsconfiguration => "OBJECT_TSCONFIGURATION", + Self::ObjectTsdictionary => "OBJECT_TSDICTIONARY", + Self::ObjectTsparser => "OBJECT_TSPARSER", + Self::ObjectTstemplate => "OBJECT_TSTEMPLATE", + Self::ObjectType => "OBJECT_TYPE", + Self::ObjectUserMapping => "OBJECT_USER_MAPPING", + Self::ObjectView => "OBJECT_VIEW", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "OBJECT_TYPE_UNDEFINED" => Some(Self::Undefined), + "OBJECT_ACCESS_METHOD" => Some(Self::ObjectAccessMethod), + "OBJECT_AGGREGATE" => Some(Self::ObjectAggregate), + "OBJECT_AMOP" => Some(Self::ObjectAmop), + "OBJECT_AMPROC" => Some(Self::ObjectAmproc), + "OBJECT_ATTRIBUTE" => Some(Self::ObjectAttribute), + "OBJECT_CAST" => Some(Self::ObjectCast), + "OBJECT_COLUMN" => Some(Self::ObjectColumn), + "OBJECT_COLLATION" => Some(Self::ObjectCollation), + "OBJECT_CONVERSION" => Some(Self::ObjectConversion), + "OBJECT_DATABASE" => Some(Self::ObjectDatabase), + "OBJECT_DEFAULT" => Some(Self::ObjectDefault), + "OBJECT_DEFACL" => Some(Self::ObjectDefacl), + "OBJECT_DOMAIN" => Some(Self::ObjectDomain), + "OBJECT_DOMCONSTRAINT" => Some(Self::ObjectDomconstraint), + "OBJECT_EVENT_TRIGGER" => Some(Self::ObjectEventTrigger), + "OBJECT_EXTENSION" => Some(Self::ObjectExtension), + "OBJECT_FDW" => Some(Self::ObjectFdw), + "OBJECT_FOREIGN_SERVER" => Some(Self::ObjectForeignServer), + "OBJECT_FOREIGN_TABLE" => Some(Self::ObjectForeignTable), + "OBJECT_FUNCTION" => Some(Self::ObjectFunction), + "OBJECT_INDEX" => Some(Self::ObjectIndex), + "OBJECT_LANGUAGE" => Some(Self::ObjectLanguage), + "OBJECT_LARGEOBJECT" => Some(Self::ObjectLargeobject), + "OBJECT_MATVIEW" => Some(Self::ObjectMatview), + "OBJECT_OPCLASS" => Some(Self::ObjectOpclass), + "OBJECT_OPERATOR" => Some(Self::ObjectOperator), + "OBJECT_OPFAMILY" => Some(Self::ObjectOpfamily), + "OBJECT_PARAMETER_ACL" => Some(Self::ObjectParameterAcl), + "OBJECT_POLICY" => Some(Self::ObjectPolicy), + "OBJECT_PROCEDURE" => Some(Self::ObjectProcedure), + "OBJECT_PUBLICATION" => Some(Self::ObjectPublication), + "OBJECT_PUBLICATION_NAMESPACE" => Some(Self::ObjectPublicationNamespace), + "OBJECT_PUBLICATION_REL" => Some(Self::ObjectPublicationRel), + "OBJECT_ROLE" => Some(Self::ObjectRole), + "OBJECT_ROUTINE" => Some(Self::ObjectRoutine), + "OBJECT_RULE" => Some(Self::ObjectRule), + "OBJECT_SCHEMA" => Some(Self::ObjectSchema), + "OBJECT_SEQUENCE" => Some(Self::ObjectSequence), + "OBJECT_SUBSCRIPTION" => Some(Self::ObjectSubscription), + "OBJECT_STATISTIC_EXT" => Some(Self::ObjectStatisticExt), + "OBJECT_TABCONSTRAINT" => Some(Self::ObjectTabconstraint), + "OBJECT_TABLE" => Some(Self::ObjectTable), + "OBJECT_TABLESPACE" => Some(Self::ObjectTablespace), + "OBJECT_TRANSFORM" => Some(Self::ObjectTransform), + "OBJECT_TRIGGER" => Some(Self::ObjectTrigger), + "OBJECT_TSCONFIGURATION" => Some(Self::ObjectTsconfiguration), + "OBJECT_TSDICTIONARY" => Some(Self::ObjectTsdictionary), + "OBJECT_TSPARSER" => Some(Self::ObjectTsparser), + "OBJECT_TSTEMPLATE" => Some(Self::ObjectTstemplate), + "OBJECT_TYPE" => Some(Self::ObjectType), + "OBJECT_USER_MAPPING" => Some(Self::ObjectUserMapping), + "OBJECT_VIEW" => Some(Self::ObjectView), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum DropBehavior { + Undefined = 0, + DropRestrict = 1, + DropCascade = 2, +} +impl DropBehavior { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "DROP_BEHAVIOR_UNDEFINED", + Self::DropRestrict => "DROP_RESTRICT", + Self::DropCascade => "DROP_CASCADE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DROP_BEHAVIOR_UNDEFINED" => Some(Self::Undefined), + "DROP_RESTRICT" => Some(Self::DropRestrict), + "DROP_CASCADE" => Some(Self::DropCascade), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AlterTableType { + Undefined = 0, + AtAddColumn = 1, + AtAddColumnToView = 2, + AtColumnDefault = 3, + AtCookedColumnDefault = 4, + AtDropNotNull = 5, + AtSetNotNull = 6, + AtSetExpression = 7, + AtDropExpression = 8, + AtCheckNotNull = 9, + AtSetStatistics = 10, + AtSetOptions = 11, + AtResetOptions = 12, + AtSetStorage = 13, + AtSetCompression = 14, + AtDropColumn = 15, + AtAddIndex = 16, + AtReAddIndex = 17, + AtAddConstraint = 18, + AtReAddConstraint = 19, + AtReAddDomainConstraint = 20, + AtAlterConstraint = 21, + AtValidateConstraint = 22, + AtAddIndexConstraint = 23, + AtDropConstraint = 24, + AtReAddComment = 25, + AtAlterColumnType = 26, + AtAlterColumnGenericOptions = 27, + AtChangeOwner = 28, + AtClusterOn = 29, + AtDropCluster = 30, + AtSetLogged = 31, + AtSetUnLogged = 32, + AtDropOids = 33, + AtSetAccessMethod = 34, + AtSetTableSpace = 35, + AtSetRelOptions = 36, + AtResetRelOptions = 37, + AtReplaceRelOptions = 38, + AtEnableTrig = 39, + AtEnableAlwaysTrig = 40, + AtEnableReplicaTrig = 41, + AtDisableTrig = 42, + AtEnableTrigAll = 43, + AtDisableTrigAll = 44, + AtEnableTrigUser = 45, + AtDisableTrigUser = 46, + AtEnableRule = 47, + AtEnableAlwaysRule = 48, + AtEnableReplicaRule = 49, + AtDisableRule = 50, + AtAddInherit = 51, + AtDropInherit = 52, + AtAddOf = 53, + AtDropOf = 54, + AtReplicaIdentity = 55, + AtEnableRowSecurity = 56, + AtDisableRowSecurity = 57, + AtForceRowSecurity = 58, + AtNoForceRowSecurity = 59, + AtGenericOptions = 60, + AtAttachPartition = 61, + AtDetachPartition = 62, + AtDetachPartitionFinalize = 63, + AtAddIdentity = 64, + AtSetIdentity = 65, + AtDropIdentity = 66, + AtReAddStatistics = 67, +} +impl AlterTableType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "ALTER_TABLE_TYPE_UNDEFINED", + Self::AtAddColumn => "AT_AddColumn", + Self::AtAddColumnToView => "AT_AddColumnToView", + Self::AtColumnDefault => "AT_ColumnDefault", + Self::AtCookedColumnDefault => "AT_CookedColumnDefault", + Self::AtDropNotNull => "AT_DropNotNull", + Self::AtSetNotNull => "AT_SetNotNull", + Self::AtSetExpression => "AT_SetExpression", + Self::AtDropExpression => "AT_DropExpression", + Self::AtCheckNotNull => "AT_CheckNotNull", + Self::AtSetStatistics => "AT_SetStatistics", + Self::AtSetOptions => "AT_SetOptions", + Self::AtResetOptions => "AT_ResetOptions", + Self::AtSetStorage => "AT_SetStorage", + Self::AtSetCompression => "AT_SetCompression", + Self::AtDropColumn => "AT_DropColumn", + Self::AtAddIndex => "AT_AddIndex", + Self::AtReAddIndex => "AT_ReAddIndex", + Self::AtAddConstraint => "AT_AddConstraint", + Self::AtReAddConstraint => "AT_ReAddConstraint", + Self::AtReAddDomainConstraint => "AT_ReAddDomainConstraint", + Self::AtAlterConstraint => "AT_AlterConstraint", + Self::AtValidateConstraint => "AT_ValidateConstraint", + Self::AtAddIndexConstraint => "AT_AddIndexConstraint", + Self::AtDropConstraint => "AT_DropConstraint", + Self::AtReAddComment => "AT_ReAddComment", + Self::AtAlterColumnType => "AT_AlterColumnType", + Self::AtAlterColumnGenericOptions => "AT_AlterColumnGenericOptions", + Self::AtChangeOwner => "AT_ChangeOwner", + Self::AtClusterOn => "AT_ClusterOn", + Self::AtDropCluster => "AT_DropCluster", + Self::AtSetLogged => "AT_SetLogged", + Self::AtSetUnLogged => "AT_SetUnLogged", + Self::AtDropOids => "AT_DropOids", + Self::AtSetAccessMethod => "AT_SetAccessMethod", + Self::AtSetTableSpace => "AT_SetTableSpace", + Self::AtSetRelOptions => "AT_SetRelOptions", + Self::AtResetRelOptions => "AT_ResetRelOptions", + Self::AtReplaceRelOptions => "AT_ReplaceRelOptions", + Self::AtEnableTrig => "AT_EnableTrig", + Self::AtEnableAlwaysTrig => "AT_EnableAlwaysTrig", + Self::AtEnableReplicaTrig => "AT_EnableReplicaTrig", + Self::AtDisableTrig => "AT_DisableTrig", + Self::AtEnableTrigAll => "AT_EnableTrigAll", + Self::AtDisableTrigAll => "AT_DisableTrigAll", + Self::AtEnableTrigUser => "AT_EnableTrigUser", + Self::AtDisableTrigUser => "AT_DisableTrigUser", + Self::AtEnableRule => "AT_EnableRule", + Self::AtEnableAlwaysRule => "AT_EnableAlwaysRule", + Self::AtEnableReplicaRule => "AT_EnableReplicaRule", + Self::AtDisableRule => "AT_DisableRule", + Self::AtAddInherit => "AT_AddInherit", + Self::AtDropInherit => "AT_DropInherit", + Self::AtAddOf => "AT_AddOf", + Self::AtDropOf => "AT_DropOf", + Self::AtReplicaIdentity => "AT_ReplicaIdentity", + Self::AtEnableRowSecurity => "AT_EnableRowSecurity", + Self::AtDisableRowSecurity => "AT_DisableRowSecurity", + Self::AtForceRowSecurity => "AT_ForceRowSecurity", + Self::AtNoForceRowSecurity => "AT_NoForceRowSecurity", + Self::AtGenericOptions => "AT_GenericOptions", + Self::AtAttachPartition => "AT_AttachPartition", + Self::AtDetachPartition => "AT_DetachPartition", + Self::AtDetachPartitionFinalize => "AT_DetachPartitionFinalize", + Self::AtAddIdentity => "AT_AddIdentity", + Self::AtSetIdentity => "AT_SetIdentity", + Self::AtDropIdentity => "AT_DropIdentity", + Self::AtReAddStatistics => "AT_ReAddStatistics", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ALTER_TABLE_TYPE_UNDEFINED" => Some(Self::Undefined), + "AT_AddColumn" => Some(Self::AtAddColumn), + "AT_AddColumnToView" => Some(Self::AtAddColumnToView), + "AT_ColumnDefault" => Some(Self::AtColumnDefault), + "AT_CookedColumnDefault" => Some(Self::AtCookedColumnDefault), + "AT_DropNotNull" => Some(Self::AtDropNotNull), + "AT_SetNotNull" => Some(Self::AtSetNotNull), + "AT_SetExpression" => Some(Self::AtSetExpression), + "AT_DropExpression" => Some(Self::AtDropExpression), + "AT_CheckNotNull" => Some(Self::AtCheckNotNull), + "AT_SetStatistics" => Some(Self::AtSetStatistics), + "AT_SetOptions" => Some(Self::AtSetOptions), + "AT_ResetOptions" => Some(Self::AtResetOptions), + "AT_SetStorage" => Some(Self::AtSetStorage), + "AT_SetCompression" => Some(Self::AtSetCompression), + "AT_DropColumn" => Some(Self::AtDropColumn), + "AT_AddIndex" => Some(Self::AtAddIndex), + "AT_ReAddIndex" => Some(Self::AtReAddIndex), + "AT_AddConstraint" => Some(Self::AtAddConstraint), + "AT_ReAddConstraint" => Some(Self::AtReAddConstraint), + "AT_ReAddDomainConstraint" => Some(Self::AtReAddDomainConstraint), + "AT_AlterConstraint" => Some(Self::AtAlterConstraint), + "AT_ValidateConstraint" => Some(Self::AtValidateConstraint), + "AT_AddIndexConstraint" => Some(Self::AtAddIndexConstraint), + "AT_DropConstraint" => Some(Self::AtDropConstraint), + "AT_ReAddComment" => Some(Self::AtReAddComment), + "AT_AlterColumnType" => Some(Self::AtAlterColumnType), + "AT_AlterColumnGenericOptions" => Some(Self::AtAlterColumnGenericOptions), + "AT_ChangeOwner" => Some(Self::AtChangeOwner), + "AT_ClusterOn" => Some(Self::AtClusterOn), + "AT_DropCluster" => Some(Self::AtDropCluster), + "AT_SetLogged" => Some(Self::AtSetLogged), + "AT_SetUnLogged" => Some(Self::AtSetUnLogged), + "AT_DropOids" => Some(Self::AtDropOids), + "AT_SetAccessMethod" => Some(Self::AtSetAccessMethod), + "AT_SetTableSpace" => Some(Self::AtSetTableSpace), + "AT_SetRelOptions" => Some(Self::AtSetRelOptions), + "AT_ResetRelOptions" => Some(Self::AtResetRelOptions), + "AT_ReplaceRelOptions" => Some(Self::AtReplaceRelOptions), + "AT_EnableTrig" => Some(Self::AtEnableTrig), + "AT_EnableAlwaysTrig" => Some(Self::AtEnableAlwaysTrig), + "AT_EnableReplicaTrig" => Some(Self::AtEnableReplicaTrig), + "AT_DisableTrig" => Some(Self::AtDisableTrig), + "AT_EnableTrigAll" => Some(Self::AtEnableTrigAll), + "AT_DisableTrigAll" => Some(Self::AtDisableTrigAll), + "AT_EnableTrigUser" => Some(Self::AtEnableTrigUser), + "AT_DisableTrigUser" => Some(Self::AtDisableTrigUser), + "AT_EnableRule" => Some(Self::AtEnableRule), + "AT_EnableAlwaysRule" => Some(Self::AtEnableAlwaysRule), + "AT_EnableReplicaRule" => Some(Self::AtEnableReplicaRule), + "AT_DisableRule" => Some(Self::AtDisableRule), + "AT_AddInherit" => Some(Self::AtAddInherit), + "AT_DropInherit" => Some(Self::AtDropInherit), + "AT_AddOf" => Some(Self::AtAddOf), + "AT_DropOf" => Some(Self::AtDropOf), + "AT_ReplicaIdentity" => Some(Self::AtReplicaIdentity), + "AT_EnableRowSecurity" => Some(Self::AtEnableRowSecurity), + "AT_DisableRowSecurity" => Some(Self::AtDisableRowSecurity), + "AT_ForceRowSecurity" => Some(Self::AtForceRowSecurity), + "AT_NoForceRowSecurity" => Some(Self::AtNoForceRowSecurity), + "AT_GenericOptions" => Some(Self::AtGenericOptions), + "AT_AttachPartition" => Some(Self::AtAttachPartition), + "AT_DetachPartition" => Some(Self::AtDetachPartition), + "AT_DetachPartitionFinalize" => Some(Self::AtDetachPartitionFinalize), + "AT_AddIdentity" => Some(Self::AtAddIdentity), + "AT_SetIdentity" => Some(Self::AtSetIdentity), + "AT_DropIdentity" => Some(Self::AtDropIdentity), + "AT_ReAddStatistics" => Some(Self::AtReAddStatistics), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum GrantTargetType { + Undefined = 0, + AclTargetObject = 1, + AclTargetAllInSchema = 2, + AclTargetDefaults = 3, +} +impl GrantTargetType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "GRANT_TARGET_TYPE_UNDEFINED", + Self::AclTargetObject => "ACL_TARGET_OBJECT", + Self::AclTargetAllInSchema => "ACL_TARGET_ALL_IN_SCHEMA", + Self::AclTargetDefaults => "ACL_TARGET_DEFAULTS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "GRANT_TARGET_TYPE_UNDEFINED" => Some(Self::Undefined), + "ACL_TARGET_OBJECT" => Some(Self::AclTargetObject), + "ACL_TARGET_ALL_IN_SCHEMA" => Some(Self::AclTargetAllInSchema), + "ACL_TARGET_DEFAULTS" => Some(Self::AclTargetDefaults), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum VariableSetKind { + Undefined = 0, + VarSetValue = 1, + VarSetDefault = 2, + VarSetCurrent = 3, + VarSetMulti = 4, + VarReset = 5, + VarResetAll = 6, +} +impl VariableSetKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "VARIABLE_SET_KIND_UNDEFINED", + Self::VarSetValue => "VAR_SET_VALUE", + Self::VarSetDefault => "VAR_SET_DEFAULT", + Self::VarSetCurrent => "VAR_SET_CURRENT", + Self::VarSetMulti => "VAR_SET_MULTI", + Self::VarReset => "VAR_RESET", + Self::VarResetAll => "VAR_RESET_ALL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "VARIABLE_SET_KIND_UNDEFINED" => Some(Self::Undefined), + "VAR_SET_VALUE" => Some(Self::VarSetValue), + "VAR_SET_DEFAULT" => Some(Self::VarSetDefault), + "VAR_SET_CURRENT" => Some(Self::VarSetCurrent), + "VAR_SET_MULTI" => Some(Self::VarSetMulti), + "VAR_RESET" => Some(Self::VarReset), + "VAR_RESET_ALL" => Some(Self::VarResetAll), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ConstrType { + Undefined = 0, + ConstrNull = 1, + ConstrNotnull = 2, + ConstrDefault = 3, + ConstrIdentity = 4, + ConstrGenerated = 5, + ConstrCheck = 6, + ConstrPrimary = 7, + ConstrUnique = 8, + ConstrExclusion = 9, + ConstrForeign = 10, + ConstrAttrDeferrable = 11, + ConstrAttrNotDeferrable = 12, + ConstrAttrDeferred = 13, + ConstrAttrImmediate = 14, +} +impl ConstrType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "CONSTR_TYPE_UNDEFINED", + Self::ConstrNull => "CONSTR_NULL", + Self::ConstrNotnull => "CONSTR_NOTNULL", + Self::ConstrDefault => "CONSTR_DEFAULT", + Self::ConstrIdentity => "CONSTR_IDENTITY", + Self::ConstrGenerated => "CONSTR_GENERATED", + Self::ConstrCheck => "CONSTR_CHECK", + Self::ConstrPrimary => "CONSTR_PRIMARY", + Self::ConstrUnique => "CONSTR_UNIQUE", + Self::ConstrExclusion => "CONSTR_EXCLUSION", + Self::ConstrForeign => "CONSTR_FOREIGN", + Self::ConstrAttrDeferrable => "CONSTR_ATTR_DEFERRABLE", + Self::ConstrAttrNotDeferrable => "CONSTR_ATTR_NOT_DEFERRABLE", + Self::ConstrAttrDeferred => "CONSTR_ATTR_DEFERRED", + Self::ConstrAttrImmediate => "CONSTR_ATTR_IMMEDIATE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CONSTR_TYPE_UNDEFINED" => Some(Self::Undefined), + "CONSTR_NULL" => Some(Self::ConstrNull), + "CONSTR_NOTNULL" => Some(Self::ConstrNotnull), + "CONSTR_DEFAULT" => Some(Self::ConstrDefault), + "CONSTR_IDENTITY" => Some(Self::ConstrIdentity), + "CONSTR_GENERATED" => Some(Self::ConstrGenerated), + "CONSTR_CHECK" => Some(Self::ConstrCheck), + "CONSTR_PRIMARY" => Some(Self::ConstrPrimary), + "CONSTR_UNIQUE" => Some(Self::ConstrUnique), + "CONSTR_EXCLUSION" => Some(Self::ConstrExclusion), + "CONSTR_FOREIGN" => Some(Self::ConstrForeign), + "CONSTR_ATTR_DEFERRABLE" => Some(Self::ConstrAttrDeferrable), + "CONSTR_ATTR_NOT_DEFERRABLE" => Some(Self::ConstrAttrNotDeferrable), + "CONSTR_ATTR_DEFERRED" => Some(Self::ConstrAttrDeferred), + "CONSTR_ATTR_IMMEDIATE" => Some(Self::ConstrAttrImmediate), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ImportForeignSchemaType { + Undefined = 0, + FdwImportSchemaAll = 1, + FdwImportSchemaLimitTo = 2, + FdwImportSchemaExcept = 3, +} +impl ImportForeignSchemaType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "IMPORT_FOREIGN_SCHEMA_TYPE_UNDEFINED", + Self::FdwImportSchemaAll => "FDW_IMPORT_SCHEMA_ALL", + Self::FdwImportSchemaLimitTo => "FDW_IMPORT_SCHEMA_LIMIT_TO", + Self::FdwImportSchemaExcept => "FDW_IMPORT_SCHEMA_EXCEPT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "IMPORT_FOREIGN_SCHEMA_TYPE_UNDEFINED" => Some(Self::Undefined), + "FDW_IMPORT_SCHEMA_ALL" => Some(Self::FdwImportSchemaAll), + "FDW_IMPORT_SCHEMA_LIMIT_TO" => Some(Self::FdwImportSchemaLimitTo), + "FDW_IMPORT_SCHEMA_EXCEPT" => Some(Self::FdwImportSchemaExcept), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum RoleStmtType { + Undefined = 0, + RolestmtRole = 1, + RolestmtUser = 2, + RolestmtGroup = 3, +} +impl RoleStmtType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "ROLE_STMT_TYPE_UNDEFINED", + Self::RolestmtRole => "ROLESTMT_ROLE", + Self::RolestmtUser => "ROLESTMT_USER", + Self::RolestmtGroup => "ROLESTMT_GROUP", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ROLE_STMT_TYPE_UNDEFINED" => Some(Self::Undefined), + "ROLESTMT_ROLE" => Some(Self::RolestmtRole), + "ROLESTMT_USER" => Some(Self::RolestmtUser), + "ROLESTMT_GROUP" => Some(Self::RolestmtGroup), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FetchDirection { + Undefined = 0, + FetchForward = 1, + FetchBackward = 2, + FetchAbsolute = 3, + FetchRelative = 4, +} +impl FetchDirection { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "FETCH_DIRECTION_UNDEFINED", + Self::FetchForward => "FETCH_FORWARD", + Self::FetchBackward => "FETCH_BACKWARD", + Self::FetchAbsolute => "FETCH_ABSOLUTE", + Self::FetchRelative => "FETCH_RELATIVE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FETCH_DIRECTION_UNDEFINED" => Some(Self::Undefined), + "FETCH_FORWARD" => Some(Self::FetchForward), + "FETCH_BACKWARD" => Some(Self::FetchBackward), + "FETCH_ABSOLUTE" => Some(Self::FetchAbsolute), + "FETCH_RELATIVE" => Some(Self::FetchRelative), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FunctionParameterMode { + Undefined = 0, + FuncParamIn = 1, + FuncParamOut = 2, + FuncParamInout = 3, + FuncParamVariadic = 4, + FuncParamTable = 5, + FuncParamDefault = 6, +} +impl FunctionParameterMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "FUNCTION_PARAMETER_MODE_UNDEFINED", + Self::FuncParamIn => "FUNC_PARAM_IN", + Self::FuncParamOut => "FUNC_PARAM_OUT", + Self::FuncParamInout => "FUNC_PARAM_INOUT", + Self::FuncParamVariadic => "FUNC_PARAM_VARIADIC", + Self::FuncParamTable => "FUNC_PARAM_TABLE", + Self::FuncParamDefault => "FUNC_PARAM_DEFAULT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FUNCTION_PARAMETER_MODE_UNDEFINED" => Some(Self::Undefined), + "FUNC_PARAM_IN" => Some(Self::FuncParamIn), + "FUNC_PARAM_OUT" => Some(Self::FuncParamOut), + "FUNC_PARAM_INOUT" => Some(Self::FuncParamInout), + "FUNC_PARAM_VARIADIC" => Some(Self::FuncParamVariadic), + "FUNC_PARAM_TABLE" => Some(Self::FuncParamTable), + "FUNC_PARAM_DEFAULT" => Some(Self::FuncParamDefault), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum TransactionStmtKind { + Undefined = 0, + TransStmtBegin = 1, + TransStmtStart = 2, + TransStmtCommit = 3, + TransStmtRollback = 4, + TransStmtSavepoint = 5, + TransStmtRelease = 6, + TransStmtRollbackTo = 7, + TransStmtPrepare = 8, + TransStmtCommitPrepared = 9, + TransStmtRollbackPrepared = 10, +} +impl TransactionStmtKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "TRANSACTION_STMT_KIND_UNDEFINED", + Self::TransStmtBegin => "TRANS_STMT_BEGIN", + Self::TransStmtStart => "TRANS_STMT_START", + Self::TransStmtCommit => "TRANS_STMT_COMMIT", + Self::TransStmtRollback => "TRANS_STMT_ROLLBACK", + Self::TransStmtSavepoint => "TRANS_STMT_SAVEPOINT", + Self::TransStmtRelease => "TRANS_STMT_RELEASE", + Self::TransStmtRollbackTo => "TRANS_STMT_ROLLBACK_TO", + Self::TransStmtPrepare => "TRANS_STMT_PREPARE", + Self::TransStmtCommitPrepared => "TRANS_STMT_COMMIT_PREPARED", + Self::TransStmtRollbackPrepared => "TRANS_STMT_ROLLBACK_PREPARED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TRANSACTION_STMT_KIND_UNDEFINED" => Some(Self::Undefined), + "TRANS_STMT_BEGIN" => Some(Self::TransStmtBegin), + "TRANS_STMT_START" => Some(Self::TransStmtStart), + "TRANS_STMT_COMMIT" => Some(Self::TransStmtCommit), + "TRANS_STMT_ROLLBACK" => Some(Self::TransStmtRollback), + "TRANS_STMT_SAVEPOINT" => Some(Self::TransStmtSavepoint), + "TRANS_STMT_RELEASE" => Some(Self::TransStmtRelease), + "TRANS_STMT_ROLLBACK_TO" => Some(Self::TransStmtRollbackTo), + "TRANS_STMT_PREPARE" => Some(Self::TransStmtPrepare), + "TRANS_STMT_COMMIT_PREPARED" => Some(Self::TransStmtCommitPrepared), + "TRANS_STMT_ROLLBACK_PREPARED" => Some(Self::TransStmtRollbackPrepared), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ViewCheckOption { + Undefined = 0, + NoCheckOption = 1, + LocalCheckOption = 2, + CascadedCheckOption = 3, +} +impl ViewCheckOption { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "VIEW_CHECK_OPTION_UNDEFINED", + Self::NoCheckOption => "NO_CHECK_OPTION", + Self::LocalCheckOption => "LOCAL_CHECK_OPTION", + Self::CascadedCheckOption => "CASCADED_CHECK_OPTION", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "VIEW_CHECK_OPTION_UNDEFINED" => Some(Self::Undefined), + "NO_CHECK_OPTION" => Some(Self::NoCheckOption), + "LOCAL_CHECK_OPTION" => Some(Self::LocalCheckOption), + "CASCADED_CHECK_OPTION" => Some(Self::CascadedCheckOption), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum DiscardMode { + Undefined = 0, + DiscardAll = 1, + DiscardPlans = 2, + DiscardSequences = 3, + DiscardTemp = 4, +} +impl DiscardMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "DISCARD_MODE_UNDEFINED", + Self::DiscardAll => "DISCARD_ALL", + Self::DiscardPlans => "DISCARD_PLANS", + Self::DiscardSequences => "DISCARD_SEQUENCES", + Self::DiscardTemp => "DISCARD_TEMP", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DISCARD_MODE_UNDEFINED" => Some(Self::Undefined), + "DISCARD_ALL" => Some(Self::DiscardAll), + "DISCARD_PLANS" => Some(Self::DiscardPlans), + "DISCARD_SEQUENCES" => Some(Self::DiscardSequences), + "DISCARD_TEMP" => Some(Self::DiscardTemp), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ReindexObjectType { + Undefined = 0, + ReindexObjectIndex = 1, + ReindexObjectTable = 2, + ReindexObjectSchema = 3, + ReindexObjectSystem = 4, + ReindexObjectDatabase = 5, +} +impl ReindexObjectType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "REINDEX_OBJECT_TYPE_UNDEFINED", + Self::ReindexObjectIndex => "REINDEX_OBJECT_INDEX", + Self::ReindexObjectTable => "REINDEX_OBJECT_TABLE", + Self::ReindexObjectSchema => "REINDEX_OBJECT_SCHEMA", + Self::ReindexObjectSystem => "REINDEX_OBJECT_SYSTEM", + Self::ReindexObjectDatabase => "REINDEX_OBJECT_DATABASE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REINDEX_OBJECT_TYPE_UNDEFINED" => Some(Self::Undefined), + "REINDEX_OBJECT_INDEX" => Some(Self::ReindexObjectIndex), + "REINDEX_OBJECT_TABLE" => Some(Self::ReindexObjectTable), + "REINDEX_OBJECT_SCHEMA" => Some(Self::ReindexObjectSchema), + "REINDEX_OBJECT_SYSTEM" => Some(Self::ReindexObjectSystem), + "REINDEX_OBJECT_DATABASE" => Some(Self::ReindexObjectDatabase), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AlterTsConfigType { + AlterTsconfigTypeUndefined = 0, + AlterTsconfigAddMapping = 1, + AlterTsconfigAlterMappingForToken = 2, + AlterTsconfigReplaceDict = 3, + AlterTsconfigReplaceDictForToken = 4, + AlterTsconfigDropMapping = 5, +} +impl AlterTsConfigType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::AlterTsconfigTypeUndefined => "ALTER_TSCONFIG_TYPE_UNDEFINED", + Self::AlterTsconfigAddMapping => "ALTER_TSCONFIG_ADD_MAPPING", + Self::AlterTsconfigAlterMappingForToken => { + "ALTER_TSCONFIG_ALTER_MAPPING_FOR_TOKEN" + } + Self::AlterTsconfigReplaceDict => "ALTER_TSCONFIG_REPLACE_DICT", + Self::AlterTsconfigReplaceDictForToken => { + "ALTER_TSCONFIG_REPLACE_DICT_FOR_TOKEN" + } + Self::AlterTsconfigDropMapping => "ALTER_TSCONFIG_DROP_MAPPING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ALTER_TSCONFIG_TYPE_UNDEFINED" => Some(Self::AlterTsconfigTypeUndefined), + "ALTER_TSCONFIG_ADD_MAPPING" => Some(Self::AlterTsconfigAddMapping), + "ALTER_TSCONFIG_ALTER_MAPPING_FOR_TOKEN" => { + Some(Self::AlterTsconfigAlterMappingForToken) + } + "ALTER_TSCONFIG_REPLACE_DICT" => Some(Self::AlterTsconfigReplaceDict), + "ALTER_TSCONFIG_REPLACE_DICT_FOR_TOKEN" => { + Some(Self::AlterTsconfigReplaceDictForToken) + } + "ALTER_TSCONFIG_DROP_MAPPING" => Some(Self::AlterTsconfigDropMapping), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum PublicationObjSpecType { + Undefined = 0, + PublicationobjTable = 1, + PublicationobjTablesInSchema = 2, + PublicationobjTablesInCurSchema = 3, + PublicationobjContinuation = 4, +} +impl PublicationObjSpecType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "PUBLICATION_OBJ_SPEC_TYPE_UNDEFINED", + Self::PublicationobjTable => "PUBLICATIONOBJ_TABLE", + Self::PublicationobjTablesInSchema => "PUBLICATIONOBJ_TABLES_IN_SCHEMA", + Self::PublicationobjTablesInCurSchema => { + "PUBLICATIONOBJ_TABLES_IN_CUR_SCHEMA" + } + Self::PublicationobjContinuation => "PUBLICATIONOBJ_CONTINUATION", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PUBLICATION_OBJ_SPEC_TYPE_UNDEFINED" => Some(Self::Undefined), + "PUBLICATIONOBJ_TABLE" => Some(Self::PublicationobjTable), + "PUBLICATIONOBJ_TABLES_IN_SCHEMA" => Some(Self::PublicationobjTablesInSchema), + "PUBLICATIONOBJ_TABLES_IN_CUR_SCHEMA" => { + Some(Self::PublicationobjTablesInCurSchema) + } + "PUBLICATIONOBJ_CONTINUATION" => Some(Self::PublicationobjContinuation), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AlterPublicationAction { + Undefined = 0, + ApAddObjects = 1, + ApDropObjects = 2, + ApSetObjects = 3, +} +impl AlterPublicationAction { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "ALTER_PUBLICATION_ACTION_UNDEFINED", + Self::ApAddObjects => "AP_AddObjects", + Self::ApDropObjects => "AP_DropObjects", + Self::ApSetObjects => "AP_SetObjects", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ALTER_PUBLICATION_ACTION_UNDEFINED" => Some(Self::Undefined), + "AP_AddObjects" => Some(Self::ApAddObjects), + "AP_DropObjects" => Some(Self::ApDropObjects), + "AP_SetObjects" => Some(Self::ApSetObjects), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AlterSubscriptionType { + Undefined = 0, + AlterSubscriptionOptions = 1, + AlterSubscriptionConnection = 2, + AlterSubscriptionSetPublication = 3, + AlterSubscriptionAddPublication = 4, + AlterSubscriptionDropPublication = 5, + AlterSubscriptionRefresh = 6, + AlterSubscriptionEnabled = 7, + AlterSubscriptionSkip = 8, +} +impl AlterSubscriptionType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "ALTER_SUBSCRIPTION_TYPE_UNDEFINED", + Self::AlterSubscriptionOptions => "ALTER_SUBSCRIPTION_OPTIONS", + Self::AlterSubscriptionConnection => "ALTER_SUBSCRIPTION_CONNECTION", + Self::AlterSubscriptionSetPublication => "ALTER_SUBSCRIPTION_SET_PUBLICATION", + Self::AlterSubscriptionAddPublication => "ALTER_SUBSCRIPTION_ADD_PUBLICATION", + Self::AlterSubscriptionDropPublication => { + "ALTER_SUBSCRIPTION_DROP_PUBLICATION" + } + Self::AlterSubscriptionRefresh => "ALTER_SUBSCRIPTION_REFRESH", + Self::AlterSubscriptionEnabled => "ALTER_SUBSCRIPTION_ENABLED", + Self::AlterSubscriptionSkip => "ALTER_SUBSCRIPTION_SKIP", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ALTER_SUBSCRIPTION_TYPE_UNDEFINED" => Some(Self::Undefined), + "ALTER_SUBSCRIPTION_OPTIONS" => Some(Self::AlterSubscriptionOptions), + "ALTER_SUBSCRIPTION_CONNECTION" => Some(Self::AlterSubscriptionConnection), + "ALTER_SUBSCRIPTION_SET_PUBLICATION" => { + Some(Self::AlterSubscriptionSetPublication) + } + "ALTER_SUBSCRIPTION_ADD_PUBLICATION" => { + Some(Self::AlterSubscriptionAddPublication) + } + "ALTER_SUBSCRIPTION_DROP_PUBLICATION" => { + Some(Self::AlterSubscriptionDropPublication) + } + "ALTER_SUBSCRIPTION_REFRESH" => Some(Self::AlterSubscriptionRefresh), + "ALTER_SUBSCRIPTION_ENABLED" => Some(Self::AlterSubscriptionEnabled), + "ALTER_SUBSCRIPTION_SKIP" => Some(Self::AlterSubscriptionSkip), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum OverridingKind { + Undefined = 0, + OverridingNotSet = 1, + OverridingUserValue = 2, + OverridingSystemValue = 3, +} +impl OverridingKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "OVERRIDING_KIND_UNDEFINED", + Self::OverridingNotSet => "OVERRIDING_NOT_SET", + Self::OverridingUserValue => "OVERRIDING_USER_VALUE", + Self::OverridingSystemValue => "OVERRIDING_SYSTEM_VALUE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "OVERRIDING_KIND_UNDEFINED" => Some(Self::Undefined), + "OVERRIDING_NOT_SET" => Some(Self::OverridingNotSet), + "OVERRIDING_USER_VALUE" => Some(Self::OverridingUserValue), + "OVERRIDING_SYSTEM_VALUE" => Some(Self::OverridingSystemValue), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum OnCommitAction { + Undefined = 0, + OncommitNoop = 1, + OncommitPreserveRows = 2, + OncommitDeleteRows = 3, + OncommitDrop = 4, +} +impl OnCommitAction { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "ON_COMMIT_ACTION_UNDEFINED", + Self::OncommitNoop => "ONCOMMIT_NOOP", + Self::OncommitPreserveRows => "ONCOMMIT_PRESERVE_ROWS", + Self::OncommitDeleteRows => "ONCOMMIT_DELETE_ROWS", + Self::OncommitDrop => "ONCOMMIT_DROP", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ON_COMMIT_ACTION_UNDEFINED" => Some(Self::Undefined), + "ONCOMMIT_NOOP" => Some(Self::OncommitNoop), + "ONCOMMIT_PRESERVE_ROWS" => Some(Self::OncommitPreserveRows), + "ONCOMMIT_DELETE_ROWS" => Some(Self::OncommitDeleteRows), + "ONCOMMIT_DROP" => Some(Self::OncommitDrop), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum TableFuncType { + Undefined = 0, + TftXmltable = 1, + TftJsonTable = 2, +} +impl TableFuncType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "TABLE_FUNC_TYPE_UNDEFINED", + Self::TftXmltable => "TFT_XMLTABLE", + Self::TftJsonTable => "TFT_JSON_TABLE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TABLE_FUNC_TYPE_UNDEFINED" => Some(Self::Undefined), + "TFT_XMLTABLE" => Some(Self::TftXmltable), + "TFT_JSON_TABLE" => Some(Self::TftJsonTable), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ParamKind { + Undefined = 0, + ParamExtern = 1, + ParamExec = 2, + ParamSublink = 3, + ParamMultiexpr = 4, +} +impl ParamKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "PARAM_KIND_UNDEFINED", + Self::ParamExtern => "PARAM_EXTERN", + Self::ParamExec => "PARAM_EXEC", + Self::ParamSublink => "PARAM_SUBLINK", + Self::ParamMultiexpr => "PARAM_MULTIEXPR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PARAM_KIND_UNDEFINED" => Some(Self::Undefined), + "PARAM_EXTERN" => Some(Self::ParamExtern), + "PARAM_EXEC" => Some(Self::ParamExec), + "PARAM_SUBLINK" => Some(Self::ParamSublink), + "PARAM_MULTIEXPR" => Some(Self::ParamMultiexpr), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum CoercionContext { + Undefined = 0, + CoercionImplicit = 1, + CoercionAssignment = 2, + CoercionPlpgsql = 3, + CoercionExplicit = 4, +} +impl CoercionContext { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "COERCION_CONTEXT_UNDEFINED", + Self::CoercionImplicit => "COERCION_IMPLICIT", + Self::CoercionAssignment => "COERCION_ASSIGNMENT", + Self::CoercionPlpgsql => "COERCION_PLPGSQL", + Self::CoercionExplicit => "COERCION_EXPLICIT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "COERCION_CONTEXT_UNDEFINED" => Some(Self::Undefined), + "COERCION_IMPLICIT" => Some(Self::CoercionImplicit), + "COERCION_ASSIGNMENT" => Some(Self::CoercionAssignment), + "COERCION_PLPGSQL" => Some(Self::CoercionPlpgsql), + "COERCION_EXPLICIT" => Some(Self::CoercionExplicit), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum CoercionForm { + Undefined = 0, + CoerceExplicitCall = 1, + CoerceExplicitCast = 2, + CoerceImplicitCast = 3, + CoerceSqlSyntax = 4, +} +impl CoercionForm { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "COERCION_FORM_UNDEFINED", + Self::CoerceExplicitCall => "COERCE_EXPLICIT_CALL", + Self::CoerceExplicitCast => "COERCE_EXPLICIT_CAST", + Self::CoerceImplicitCast => "COERCE_IMPLICIT_CAST", + Self::CoerceSqlSyntax => "COERCE_SQL_SYNTAX", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "COERCION_FORM_UNDEFINED" => Some(Self::Undefined), + "COERCE_EXPLICIT_CALL" => Some(Self::CoerceExplicitCall), + "COERCE_EXPLICIT_CAST" => Some(Self::CoerceExplicitCast), + "COERCE_IMPLICIT_CAST" => Some(Self::CoerceImplicitCast), + "COERCE_SQL_SYNTAX" => Some(Self::CoerceSqlSyntax), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum BoolExprType { + Undefined = 0, + AndExpr = 1, + OrExpr = 2, + NotExpr = 3, +} +impl BoolExprType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "BOOL_EXPR_TYPE_UNDEFINED", + Self::AndExpr => "AND_EXPR", + Self::OrExpr => "OR_EXPR", + Self::NotExpr => "NOT_EXPR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "BOOL_EXPR_TYPE_UNDEFINED" => Some(Self::Undefined), + "AND_EXPR" => Some(Self::AndExpr), + "OR_EXPR" => Some(Self::OrExpr), + "NOT_EXPR" => Some(Self::NotExpr), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SubLinkType { + Undefined = 0, + ExistsSublink = 1, + AllSublink = 2, + AnySublink = 3, + RowcompareSublink = 4, + ExprSublink = 5, + MultiexprSublink = 6, + ArraySublink = 7, + CteSublink = 8, +} +impl SubLinkType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "SUB_LINK_TYPE_UNDEFINED", + Self::ExistsSublink => "EXISTS_SUBLINK", + Self::AllSublink => "ALL_SUBLINK", + Self::AnySublink => "ANY_SUBLINK", + Self::RowcompareSublink => "ROWCOMPARE_SUBLINK", + Self::ExprSublink => "EXPR_SUBLINK", + Self::MultiexprSublink => "MULTIEXPR_SUBLINK", + Self::ArraySublink => "ARRAY_SUBLINK", + Self::CteSublink => "CTE_SUBLINK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SUB_LINK_TYPE_UNDEFINED" => Some(Self::Undefined), + "EXISTS_SUBLINK" => Some(Self::ExistsSublink), + "ALL_SUBLINK" => Some(Self::AllSublink), + "ANY_SUBLINK" => Some(Self::AnySublink), + "ROWCOMPARE_SUBLINK" => Some(Self::RowcompareSublink), + "EXPR_SUBLINK" => Some(Self::ExprSublink), + "MULTIEXPR_SUBLINK" => Some(Self::MultiexprSublink), + "ARRAY_SUBLINK" => Some(Self::ArraySublink), + "CTE_SUBLINK" => Some(Self::CteSublink), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum RowCompareType { + Undefined = 0, + RowcompareLt = 1, + RowcompareLe = 2, + RowcompareEq = 3, + RowcompareGe = 4, + RowcompareGt = 5, + RowcompareNe = 6, +} +impl RowCompareType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "ROW_COMPARE_TYPE_UNDEFINED", + Self::RowcompareLt => "ROWCOMPARE_LT", + Self::RowcompareLe => "ROWCOMPARE_LE", + Self::RowcompareEq => "ROWCOMPARE_EQ", + Self::RowcompareGe => "ROWCOMPARE_GE", + Self::RowcompareGt => "ROWCOMPARE_GT", + Self::RowcompareNe => "ROWCOMPARE_NE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ROW_COMPARE_TYPE_UNDEFINED" => Some(Self::Undefined), + "ROWCOMPARE_LT" => Some(Self::RowcompareLt), + "ROWCOMPARE_LE" => Some(Self::RowcompareLe), + "ROWCOMPARE_EQ" => Some(Self::RowcompareEq), + "ROWCOMPARE_GE" => Some(Self::RowcompareGe), + "ROWCOMPARE_GT" => Some(Self::RowcompareGt), + "ROWCOMPARE_NE" => Some(Self::RowcompareNe), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum MinMaxOp { + Undefined = 0, + IsGreatest = 1, + IsLeast = 2, +} +impl MinMaxOp { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "MIN_MAX_OP_UNDEFINED", + Self::IsGreatest => "IS_GREATEST", + Self::IsLeast => "IS_LEAST", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "MIN_MAX_OP_UNDEFINED" => Some(Self::Undefined), + "IS_GREATEST" => Some(Self::IsGreatest), + "IS_LEAST" => Some(Self::IsLeast), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SqlValueFunctionOp { + SqlvalueFunctionOpUndefined = 0, + SvfopCurrentDate = 1, + SvfopCurrentTime = 2, + SvfopCurrentTimeN = 3, + SvfopCurrentTimestamp = 4, + SvfopCurrentTimestampN = 5, + SvfopLocaltime = 6, + SvfopLocaltimeN = 7, + SvfopLocaltimestamp = 8, + SvfopLocaltimestampN = 9, + SvfopCurrentRole = 10, + SvfopCurrentUser = 11, + SvfopUser = 12, + SvfopSessionUser = 13, + SvfopCurrentCatalog = 14, + SvfopCurrentSchema = 15, +} +impl SqlValueFunctionOp { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::SqlvalueFunctionOpUndefined => "SQLVALUE_FUNCTION_OP_UNDEFINED", + Self::SvfopCurrentDate => "SVFOP_CURRENT_DATE", + Self::SvfopCurrentTime => "SVFOP_CURRENT_TIME", + Self::SvfopCurrentTimeN => "SVFOP_CURRENT_TIME_N", + Self::SvfopCurrentTimestamp => "SVFOP_CURRENT_TIMESTAMP", + Self::SvfopCurrentTimestampN => "SVFOP_CURRENT_TIMESTAMP_N", + Self::SvfopLocaltime => "SVFOP_LOCALTIME", + Self::SvfopLocaltimeN => "SVFOP_LOCALTIME_N", + Self::SvfopLocaltimestamp => "SVFOP_LOCALTIMESTAMP", + Self::SvfopLocaltimestampN => "SVFOP_LOCALTIMESTAMP_N", + Self::SvfopCurrentRole => "SVFOP_CURRENT_ROLE", + Self::SvfopCurrentUser => "SVFOP_CURRENT_USER", + Self::SvfopUser => "SVFOP_USER", + Self::SvfopSessionUser => "SVFOP_SESSION_USER", + Self::SvfopCurrentCatalog => "SVFOP_CURRENT_CATALOG", + Self::SvfopCurrentSchema => "SVFOP_CURRENT_SCHEMA", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SQLVALUE_FUNCTION_OP_UNDEFINED" => Some(Self::SqlvalueFunctionOpUndefined), + "SVFOP_CURRENT_DATE" => Some(Self::SvfopCurrentDate), + "SVFOP_CURRENT_TIME" => Some(Self::SvfopCurrentTime), + "SVFOP_CURRENT_TIME_N" => Some(Self::SvfopCurrentTimeN), + "SVFOP_CURRENT_TIMESTAMP" => Some(Self::SvfopCurrentTimestamp), + "SVFOP_CURRENT_TIMESTAMP_N" => Some(Self::SvfopCurrentTimestampN), + "SVFOP_LOCALTIME" => Some(Self::SvfopLocaltime), + "SVFOP_LOCALTIME_N" => Some(Self::SvfopLocaltimeN), + "SVFOP_LOCALTIMESTAMP" => Some(Self::SvfopLocaltimestamp), + "SVFOP_LOCALTIMESTAMP_N" => Some(Self::SvfopLocaltimestampN), + "SVFOP_CURRENT_ROLE" => Some(Self::SvfopCurrentRole), + "SVFOP_CURRENT_USER" => Some(Self::SvfopCurrentUser), + "SVFOP_USER" => Some(Self::SvfopUser), + "SVFOP_SESSION_USER" => Some(Self::SvfopSessionUser), + "SVFOP_CURRENT_CATALOG" => Some(Self::SvfopCurrentCatalog), + "SVFOP_CURRENT_SCHEMA" => Some(Self::SvfopCurrentSchema), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum XmlExprOp { + Undefined = 0, + IsXmlconcat = 1, + IsXmlelement = 2, + IsXmlforest = 3, + IsXmlparse = 4, + IsXmlpi = 5, + IsXmlroot = 6, + IsXmlserialize = 7, + IsDocument = 8, +} +impl XmlExprOp { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "XML_EXPR_OP_UNDEFINED", + Self::IsXmlconcat => "IS_XMLCONCAT", + Self::IsXmlelement => "IS_XMLELEMENT", + Self::IsXmlforest => "IS_XMLFOREST", + Self::IsXmlparse => "IS_XMLPARSE", + Self::IsXmlpi => "IS_XMLPI", + Self::IsXmlroot => "IS_XMLROOT", + Self::IsXmlserialize => "IS_XMLSERIALIZE", + Self::IsDocument => "IS_DOCUMENT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "XML_EXPR_OP_UNDEFINED" => Some(Self::Undefined), + "IS_XMLCONCAT" => Some(Self::IsXmlconcat), + "IS_XMLELEMENT" => Some(Self::IsXmlelement), + "IS_XMLFOREST" => Some(Self::IsXmlforest), + "IS_XMLPARSE" => Some(Self::IsXmlparse), + "IS_XMLPI" => Some(Self::IsXmlpi), + "IS_XMLROOT" => Some(Self::IsXmlroot), + "IS_XMLSERIALIZE" => Some(Self::IsXmlserialize), + "IS_DOCUMENT" => Some(Self::IsDocument), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum XmlOptionType { + Undefined = 0, + XmloptionDocument = 1, + XmloptionContent = 2, +} +impl XmlOptionType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "XML_OPTION_TYPE_UNDEFINED", + Self::XmloptionDocument => "XMLOPTION_DOCUMENT", + Self::XmloptionContent => "XMLOPTION_CONTENT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "XML_OPTION_TYPE_UNDEFINED" => Some(Self::Undefined), + "XMLOPTION_DOCUMENT" => Some(Self::XmloptionDocument), + "XMLOPTION_CONTENT" => Some(Self::XmloptionContent), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JsonEncoding { + Undefined = 0, + JsEncDefault = 1, + JsEncUtf8 = 2, + JsEncUtf16 = 3, + JsEncUtf32 = 4, +} +impl JsonEncoding { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JSON_ENCODING_UNDEFINED", + Self::JsEncDefault => "JS_ENC_DEFAULT", + Self::JsEncUtf8 => "JS_ENC_UTF8", + Self::JsEncUtf16 => "JS_ENC_UTF16", + Self::JsEncUtf32 => "JS_ENC_UTF32", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON_ENCODING_UNDEFINED" => Some(Self::Undefined), + "JS_ENC_DEFAULT" => Some(Self::JsEncDefault), + "JS_ENC_UTF8" => Some(Self::JsEncUtf8), + "JS_ENC_UTF16" => Some(Self::JsEncUtf16), + "JS_ENC_UTF32" => Some(Self::JsEncUtf32), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JsonFormatType { + Undefined = 0, + JsFormatDefault = 1, + JsFormatJson = 2, + JsFormatJsonb = 3, +} +impl JsonFormatType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JSON_FORMAT_TYPE_UNDEFINED", + Self::JsFormatDefault => "JS_FORMAT_DEFAULT", + Self::JsFormatJson => "JS_FORMAT_JSON", + Self::JsFormatJsonb => "JS_FORMAT_JSONB", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON_FORMAT_TYPE_UNDEFINED" => Some(Self::Undefined), + "JS_FORMAT_DEFAULT" => Some(Self::JsFormatDefault), + "JS_FORMAT_JSON" => Some(Self::JsFormatJson), + "JS_FORMAT_JSONB" => Some(Self::JsFormatJsonb), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JsonConstructorType { + Undefined = 0, + JsctorJsonObject = 1, + JsctorJsonArray = 2, + JsctorJsonObjectagg = 3, + JsctorJsonArrayagg = 4, + JsctorJsonParse = 5, + JsctorJsonScalar = 6, + JsctorJsonSerialize = 7, +} +impl JsonConstructorType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JSON_CONSTRUCTOR_TYPE_UNDEFINED", + Self::JsctorJsonObject => "JSCTOR_JSON_OBJECT", + Self::JsctorJsonArray => "JSCTOR_JSON_ARRAY", + Self::JsctorJsonObjectagg => "JSCTOR_JSON_OBJECTAGG", + Self::JsctorJsonArrayagg => "JSCTOR_JSON_ARRAYAGG", + Self::JsctorJsonParse => "JSCTOR_JSON_PARSE", + Self::JsctorJsonScalar => "JSCTOR_JSON_SCALAR", + Self::JsctorJsonSerialize => "JSCTOR_JSON_SERIALIZE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON_CONSTRUCTOR_TYPE_UNDEFINED" => Some(Self::Undefined), + "JSCTOR_JSON_OBJECT" => Some(Self::JsctorJsonObject), + "JSCTOR_JSON_ARRAY" => Some(Self::JsctorJsonArray), + "JSCTOR_JSON_OBJECTAGG" => Some(Self::JsctorJsonObjectagg), + "JSCTOR_JSON_ARRAYAGG" => Some(Self::JsctorJsonArrayagg), + "JSCTOR_JSON_PARSE" => Some(Self::JsctorJsonParse), + "JSCTOR_JSON_SCALAR" => Some(Self::JsctorJsonScalar), + "JSCTOR_JSON_SERIALIZE" => Some(Self::JsctorJsonSerialize), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JsonValueType { + Undefined = 0, + JsTypeAny = 1, + JsTypeObject = 2, + JsTypeArray = 3, + JsTypeScalar = 4, +} +impl JsonValueType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JSON_VALUE_TYPE_UNDEFINED", + Self::JsTypeAny => "JS_TYPE_ANY", + Self::JsTypeObject => "JS_TYPE_OBJECT", + Self::JsTypeArray => "JS_TYPE_ARRAY", + Self::JsTypeScalar => "JS_TYPE_SCALAR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON_VALUE_TYPE_UNDEFINED" => Some(Self::Undefined), + "JS_TYPE_ANY" => Some(Self::JsTypeAny), + "JS_TYPE_OBJECT" => Some(Self::JsTypeObject), + "JS_TYPE_ARRAY" => Some(Self::JsTypeArray), + "JS_TYPE_SCALAR" => Some(Self::JsTypeScalar), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JsonWrapper { + Undefined = 0, + JswUnspec = 1, + JswNone = 2, + JswConditional = 3, + JswUnconditional = 4, +} +impl JsonWrapper { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JSON_WRAPPER_UNDEFINED", + Self::JswUnspec => "JSW_UNSPEC", + Self::JswNone => "JSW_NONE", + Self::JswConditional => "JSW_CONDITIONAL", + Self::JswUnconditional => "JSW_UNCONDITIONAL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON_WRAPPER_UNDEFINED" => Some(Self::Undefined), + "JSW_UNSPEC" => Some(Self::JswUnspec), + "JSW_NONE" => Some(Self::JswNone), + "JSW_CONDITIONAL" => Some(Self::JswConditional), + "JSW_UNCONDITIONAL" => Some(Self::JswUnconditional), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JsonBehaviorType { + Undefined = 0, + JsonBehaviorNull = 1, + JsonBehaviorError = 2, + JsonBehaviorEmpty = 3, + JsonBehaviorTrue = 4, + JsonBehaviorFalse = 5, + JsonBehaviorUnknown = 6, + JsonBehaviorEmptyArray = 7, + JsonBehaviorEmptyObject = 8, + JsonBehaviorDefault = 9, +} +impl JsonBehaviorType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JSON_BEHAVIOR_TYPE_UNDEFINED", + Self::JsonBehaviorNull => "JSON_BEHAVIOR_NULL", + Self::JsonBehaviorError => "JSON_BEHAVIOR_ERROR", + Self::JsonBehaviorEmpty => "JSON_BEHAVIOR_EMPTY", + Self::JsonBehaviorTrue => "JSON_BEHAVIOR_TRUE", + Self::JsonBehaviorFalse => "JSON_BEHAVIOR_FALSE", + Self::JsonBehaviorUnknown => "JSON_BEHAVIOR_UNKNOWN", + Self::JsonBehaviorEmptyArray => "JSON_BEHAVIOR_EMPTY_ARRAY", + Self::JsonBehaviorEmptyObject => "JSON_BEHAVIOR_EMPTY_OBJECT", + Self::JsonBehaviorDefault => "JSON_BEHAVIOR_DEFAULT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON_BEHAVIOR_TYPE_UNDEFINED" => Some(Self::Undefined), + "JSON_BEHAVIOR_NULL" => Some(Self::JsonBehaviorNull), + "JSON_BEHAVIOR_ERROR" => Some(Self::JsonBehaviorError), + "JSON_BEHAVIOR_EMPTY" => Some(Self::JsonBehaviorEmpty), + "JSON_BEHAVIOR_TRUE" => Some(Self::JsonBehaviorTrue), + "JSON_BEHAVIOR_FALSE" => Some(Self::JsonBehaviorFalse), + "JSON_BEHAVIOR_UNKNOWN" => Some(Self::JsonBehaviorUnknown), + "JSON_BEHAVIOR_EMPTY_ARRAY" => Some(Self::JsonBehaviorEmptyArray), + "JSON_BEHAVIOR_EMPTY_OBJECT" => Some(Self::JsonBehaviorEmptyObject), + "JSON_BEHAVIOR_DEFAULT" => Some(Self::JsonBehaviorDefault), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JsonExprOp { + Undefined = 0, + JsonExistsOp = 1, + JsonQueryOp = 2, + JsonValueOp = 3, + JsonTableOp = 4, +} +impl JsonExprOp { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JSON_EXPR_OP_UNDEFINED", + Self::JsonExistsOp => "JSON_EXISTS_OP", + Self::JsonQueryOp => "JSON_QUERY_OP", + Self::JsonValueOp => "JSON_VALUE_OP", + Self::JsonTableOp => "JSON_TABLE_OP", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON_EXPR_OP_UNDEFINED" => Some(Self::Undefined), + "JSON_EXISTS_OP" => Some(Self::JsonExistsOp), + "JSON_QUERY_OP" => Some(Self::JsonQueryOp), + "JSON_VALUE_OP" => Some(Self::JsonValueOp), + "JSON_TABLE_OP" => Some(Self::JsonTableOp), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum NullTestType { + Undefined = 0, + IsNull = 1, + IsNotNull = 2, +} +impl NullTestType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "NULL_TEST_TYPE_UNDEFINED", + Self::IsNull => "IS_NULL", + Self::IsNotNull => "IS_NOT_NULL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NULL_TEST_TYPE_UNDEFINED" => Some(Self::Undefined), + "IS_NULL" => Some(Self::IsNull), + "IS_NOT_NULL" => Some(Self::IsNotNull), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum BoolTestType { + Undefined = 0, + IsTrue = 1, + IsNotTrue = 2, + IsFalse = 3, + IsNotFalse = 4, + IsUnknown = 5, + IsNotUnknown = 6, +} +impl BoolTestType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "BOOL_TEST_TYPE_UNDEFINED", + Self::IsTrue => "IS_TRUE", + Self::IsNotTrue => "IS_NOT_TRUE", + Self::IsFalse => "IS_FALSE", + Self::IsNotFalse => "IS_NOT_FALSE", + Self::IsUnknown => "IS_UNKNOWN", + Self::IsNotUnknown => "IS_NOT_UNKNOWN", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "BOOL_TEST_TYPE_UNDEFINED" => Some(Self::Undefined), + "IS_TRUE" => Some(Self::IsTrue), + "IS_NOT_TRUE" => Some(Self::IsNotTrue), + "IS_FALSE" => Some(Self::IsFalse), + "IS_NOT_FALSE" => Some(Self::IsNotFalse), + "IS_UNKNOWN" => Some(Self::IsUnknown), + "IS_NOT_UNKNOWN" => Some(Self::IsNotUnknown), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum MergeMatchKind { + Undefined = 0, + MergeWhenMatched = 1, + MergeWhenNotMatchedBySource = 2, + MergeWhenNotMatchedByTarget = 3, +} +impl MergeMatchKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "MERGE_MATCH_KIND_UNDEFINED", + Self::MergeWhenMatched => "MERGE_WHEN_MATCHED", + Self::MergeWhenNotMatchedBySource => "MERGE_WHEN_NOT_MATCHED_BY_SOURCE", + Self::MergeWhenNotMatchedByTarget => "MERGE_WHEN_NOT_MATCHED_BY_TARGET", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "MERGE_MATCH_KIND_UNDEFINED" => Some(Self::Undefined), + "MERGE_WHEN_MATCHED" => Some(Self::MergeWhenMatched), + "MERGE_WHEN_NOT_MATCHED_BY_SOURCE" => Some(Self::MergeWhenNotMatchedBySource), + "MERGE_WHEN_NOT_MATCHED_BY_TARGET" => Some(Self::MergeWhenNotMatchedByTarget), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum CmdType { + Undefined = 0, + CmdUnknown = 1, + CmdSelect = 2, + CmdUpdate = 3, + CmdInsert = 4, + CmdDelete = 5, + CmdMerge = 6, + CmdUtility = 7, + CmdNothing = 8, +} +impl CmdType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "CMD_TYPE_UNDEFINED", + Self::CmdUnknown => "CMD_UNKNOWN", + Self::CmdSelect => "CMD_SELECT", + Self::CmdUpdate => "CMD_UPDATE", + Self::CmdInsert => "CMD_INSERT", + Self::CmdDelete => "CMD_DELETE", + Self::CmdMerge => "CMD_MERGE", + Self::CmdUtility => "CMD_UTILITY", + Self::CmdNothing => "CMD_NOTHING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CMD_TYPE_UNDEFINED" => Some(Self::Undefined), + "CMD_UNKNOWN" => Some(Self::CmdUnknown), + "CMD_SELECT" => Some(Self::CmdSelect), + "CMD_UPDATE" => Some(Self::CmdUpdate), + "CMD_INSERT" => Some(Self::CmdInsert), + "CMD_DELETE" => Some(Self::CmdDelete), + "CMD_MERGE" => Some(Self::CmdMerge), + "CMD_UTILITY" => Some(Self::CmdUtility), + "CMD_NOTHING" => Some(Self::CmdNothing), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum JoinType { + Undefined = 0, + JoinInner = 1, + JoinLeft = 2, + JoinFull = 3, + JoinRight = 4, + JoinSemi = 5, + JoinAnti = 6, + JoinRightAnti = 7, + JoinUniqueOuter = 8, + JoinUniqueInner = 9, +} +impl JoinType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "JOIN_TYPE_UNDEFINED", + Self::JoinInner => "JOIN_INNER", + Self::JoinLeft => "JOIN_LEFT", + Self::JoinFull => "JOIN_FULL", + Self::JoinRight => "JOIN_RIGHT", + Self::JoinSemi => "JOIN_SEMI", + Self::JoinAnti => "JOIN_ANTI", + Self::JoinRightAnti => "JOIN_RIGHT_ANTI", + Self::JoinUniqueOuter => "JOIN_UNIQUE_OUTER", + Self::JoinUniqueInner => "JOIN_UNIQUE_INNER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JOIN_TYPE_UNDEFINED" => Some(Self::Undefined), + "JOIN_INNER" => Some(Self::JoinInner), + "JOIN_LEFT" => Some(Self::JoinLeft), + "JOIN_FULL" => Some(Self::JoinFull), + "JOIN_RIGHT" => Some(Self::JoinRight), + "JOIN_SEMI" => Some(Self::JoinSemi), + "JOIN_ANTI" => Some(Self::JoinAnti), + "JOIN_RIGHT_ANTI" => Some(Self::JoinRightAnti), + "JOIN_UNIQUE_OUTER" => Some(Self::JoinUniqueOuter), + "JOIN_UNIQUE_INNER" => Some(Self::JoinUniqueInner), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AggStrategy { + Undefined = 0, + AggPlain = 1, + AggSorted = 2, + AggHashed = 3, + AggMixed = 4, +} +impl AggStrategy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "AGG_STRATEGY_UNDEFINED", + Self::AggPlain => "AGG_PLAIN", + Self::AggSorted => "AGG_SORTED", + Self::AggHashed => "AGG_HASHED", + Self::AggMixed => "AGG_MIXED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "AGG_STRATEGY_UNDEFINED" => Some(Self::Undefined), + "AGG_PLAIN" => Some(Self::AggPlain), + "AGG_SORTED" => Some(Self::AggSorted), + "AGG_HASHED" => Some(Self::AggHashed), + "AGG_MIXED" => Some(Self::AggMixed), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AggSplit { + Undefined = 0, + AggsplitSimple = 1, + AggsplitInitialSerial = 2, + AggsplitFinalDeserial = 3, +} +impl AggSplit { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "AGG_SPLIT_UNDEFINED", + Self::AggsplitSimple => "AGGSPLIT_SIMPLE", + Self::AggsplitInitialSerial => "AGGSPLIT_INITIAL_SERIAL", + Self::AggsplitFinalDeserial => "AGGSPLIT_FINAL_DESERIAL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "AGG_SPLIT_UNDEFINED" => Some(Self::Undefined), + "AGGSPLIT_SIMPLE" => Some(Self::AggsplitSimple), + "AGGSPLIT_INITIAL_SERIAL" => Some(Self::AggsplitInitialSerial), + "AGGSPLIT_FINAL_DESERIAL" => Some(Self::AggsplitFinalDeserial), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SetOpCmd { + Undefined = 0, + SetopcmdIntersect = 1, + SetopcmdIntersectAll = 2, + SetopcmdExcept = 3, + SetopcmdExceptAll = 4, +} +impl SetOpCmd { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "SET_OP_CMD_UNDEFINED", + Self::SetopcmdIntersect => "SETOPCMD_INTERSECT", + Self::SetopcmdIntersectAll => "SETOPCMD_INTERSECT_ALL", + Self::SetopcmdExcept => "SETOPCMD_EXCEPT", + Self::SetopcmdExceptAll => "SETOPCMD_EXCEPT_ALL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SET_OP_CMD_UNDEFINED" => Some(Self::Undefined), + "SETOPCMD_INTERSECT" => Some(Self::SetopcmdIntersect), + "SETOPCMD_INTERSECT_ALL" => Some(Self::SetopcmdIntersectAll), + "SETOPCMD_EXCEPT" => Some(Self::SetopcmdExcept), + "SETOPCMD_EXCEPT_ALL" => Some(Self::SetopcmdExceptAll), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SetOpStrategy { + Undefined = 0, + SetopSorted = 1, + SetopHashed = 2, +} +impl SetOpStrategy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "SET_OP_STRATEGY_UNDEFINED", + Self::SetopSorted => "SETOP_SORTED", + Self::SetopHashed => "SETOP_HASHED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SET_OP_STRATEGY_UNDEFINED" => Some(Self::Undefined), + "SETOP_SORTED" => Some(Self::SetopSorted), + "SETOP_HASHED" => Some(Self::SetopHashed), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum OnConflictAction { + Undefined = 0, + OnconflictNone = 1, + OnconflictNothing = 2, + OnconflictUpdate = 3, +} +impl OnConflictAction { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "ON_CONFLICT_ACTION_UNDEFINED", + Self::OnconflictNone => "ONCONFLICT_NONE", + Self::OnconflictNothing => "ONCONFLICT_NOTHING", + Self::OnconflictUpdate => "ONCONFLICT_UPDATE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ON_CONFLICT_ACTION_UNDEFINED" => Some(Self::Undefined), + "ONCONFLICT_NONE" => Some(Self::OnconflictNone), + "ONCONFLICT_NOTHING" => Some(Self::OnconflictNothing), + "ONCONFLICT_UPDATE" => Some(Self::OnconflictUpdate), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LimitOption { + Undefined = 0, + Default = 1, + Count = 2, + WithTies = 3, +} +impl LimitOption { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "LIMIT_OPTION_UNDEFINED", + Self::Default => "LIMIT_OPTION_DEFAULT", + Self::Count => "LIMIT_OPTION_COUNT", + Self::WithTies => "LIMIT_OPTION_WITH_TIES", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LIMIT_OPTION_UNDEFINED" => Some(Self::Undefined), + "LIMIT_OPTION_DEFAULT" => Some(Self::Default), + "LIMIT_OPTION_COUNT" => Some(Self::Count), + "LIMIT_OPTION_WITH_TIES" => Some(Self::WithTies), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LockClauseStrength { + Undefined = 0, + LcsNone = 1, + LcsForkeyshare = 2, + LcsForshare = 3, + LcsFornokeyupdate = 4, + LcsForupdate = 5, +} +impl LockClauseStrength { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "LOCK_CLAUSE_STRENGTH_UNDEFINED", + Self::LcsNone => "LCS_NONE", + Self::LcsForkeyshare => "LCS_FORKEYSHARE", + Self::LcsForshare => "LCS_FORSHARE", + Self::LcsFornokeyupdate => "LCS_FORNOKEYUPDATE", + Self::LcsForupdate => "LCS_FORUPDATE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LOCK_CLAUSE_STRENGTH_UNDEFINED" => Some(Self::Undefined), + "LCS_NONE" => Some(Self::LcsNone), + "LCS_FORKEYSHARE" => Some(Self::LcsForkeyshare), + "LCS_FORSHARE" => Some(Self::LcsForshare), + "LCS_FORNOKEYUPDATE" => Some(Self::LcsFornokeyupdate), + "LCS_FORUPDATE" => Some(Self::LcsForupdate), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LockWaitPolicy { + Undefined = 0, + LockWaitBlock = 1, + LockWaitSkip = 2, + LockWaitError = 3, +} +impl LockWaitPolicy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "LOCK_WAIT_POLICY_UNDEFINED", + Self::LockWaitBlock => "LockWaitBlock", + Self::LockWaitSkip => "LockWaitSkip", + Self::LockWaitError => "LockWaitError", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LOCK_WAIT_POLICY_UNDEFINED" => Some(Self::Undefined), + "LockWaitBlock" => Some(Self::LockWaitBlock), + "LockWaitSkip" => Some(Self::LockWaitSkip), + "LockWaitError" => Some(Self::LockWaitError), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LockTupleMode { + Undefined = 0, + LockTupleKeyShare = 1, + LockTupleShare = 2, + LockTupleNoKeyExclusive = 3, + LockTupleExclusive = 4, +} +impl LockTupleMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Undefined => "LOCK_TUPLE_MODE_UNDEFINED", + Self::LockTupleKeyShare => "LockTupleKeyShare", + Self::LockTupleShare => "LockTupleShare", + Self::LockTupleNoKeyExclusive => "LockTupleNoKeyExclusive", + Self::LockTupleExclusive => "LockTupleExclusive", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LOCK_TUPLE_MODE_UNDEFINED" => Some(Self::Undefined), + "LockTupleKeyShare" => Some(Self::LockTupleKeyShare), + "LockTupleShare" => Some(Self::LockTupleShare), + "LockTupleNoKeyExclusive" => Some(Self::LockTupleNoKeyExclusive), + "LockTupleExclusive" => Some(Self::LockTupleExclusive), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum KeywordKind { + NoKeyword = 0, + UnreservedKeyword = 1, + ColNameKeyword = 2, + TypeFuncNameKeyword = 3, + ReservedKeyword = 4, +} +impl KeywordKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::NoKeyword => "NO_KEYWORD", + Self::UnreservedKeyword => "UNRESERVED_KEYWORD", + Self::ColNameKeyword => "COL_NAME_KEYWORD", + Self::TypeFuncNameKeyword => "TYPE_FUNC_NAME_KEYWORD", + Self::ReservedKeyword => "RESERVED_KEYWORD", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NO_KEYWORD" => Some(Self::NoKeyword), + "UNRESERVED_KEYWORD" => Some(Self::UnreservedKeyword), + "COL_NAME_KEYWORD" => Some(Self::ColNameKeyword), + "TYPE_FUNC_NAME_KEYWORD" => Some(Self::TypeFuncNameKeyword), + "RESERVED_KEYWORD" => Some(Self::ReservedKeyword), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Token { + Nul = 0, + /// Single-character tokens that are returned 1:1 (identical with "self" list in scan.l) + /// Either supporting syntax, or single-character operators (some can be both) + /// Also see + /// + /// "$" + Ascii36 = 36, + /// "%" + Ascii37 = 37, + /// "(" + Ascii40 = 40, + /// ")" + Ascii41 = 41, + /// "*" + Ascii42 = 42, + /// "+" + Ascii43 = 43, + /// "," + Ascii44 = 44, + /// "-" + Ascii45 = 45, + /// "." + Ascii46 = 46, + /// "/" + Ascii47 = 47, + /// ":" + Ascii58 = 58, + /// ";" + Ascii59 = 59, + /// "<" + Ascii60 = 60, + /// "=" + Ascii61 = 61, + /// ">" + Ascii62 = 62, + /// "?" + Ascii63 = 63, + /// "[" + Ascii91 = 91, + /// "\" + Ascii92 = 92, + /// "]" + Ascii93 = 93, + /// "^" + Ascii94 = 94, + /// Named tokens in scan.l + Ident = 258, + Uident = 259, + Fconst = 260, + Sconst = 261, + Usconst = 262, + Bconst = 263, + Xconst = 264, + Op = 265, + Iconst = 266, + Param = 267, + Typecast = 268, + DotDot = 269, + ColonEquals = 270, + EqualsGreater = 271, + LessEquals = 272, + GreaterEquals = 273, + NotEquals = 274, + SqlComment = 275, + CComment = 276, + AbortP = 277, + Absent = 278, + AbsoluteP = 279, + Access = 280, + Action = 281, + AddP = 282, + Admin = 283, + After = 284, + Aggregate = 285, + All = 286, + Also = 287, + Alter = 288, + Always = 289, + Analyse = 290, + Analyze = 291, + And = 292, + Any = 293, + Array = 294, + As = 295, + Asc = 296, + Asensitive = 297, + Assertion = 298, + Assignment = 299, + Asymmetric = 300, + Atomic = 301, + At = 302, + Attach = 303, + Attribute = 304, + Authorization = 305, + Backward = 306, + Before = 307, + BeginP = 308, + Between = 309, + Bigint = 310, + Binary = 311, + Bit = 312, + BooleanP = 313, + Both = 314, + Breadth = 315, + By = 316, + Cache = 317, + Call = 318, + Called = 319, + Cascade = 320, + Cascaded = 321, + Case = 322, + Cast = 323, + CatalogP = 324, + Chain = 325, + CharP = 326, + Character = 327, + Characteristics = 328, + Check = 329, + Checkpoint = 330, + Class = 331, + Close = 332, + Cluster = 333, + Coalesce = 334, + Collate = 335, + Collation = 336, + Column = 337, + Columns = 338, + Comment = 339, + Comments = 340, + Commit = 341, + Committed = 342, + Compression = 343, + Concurrently = 344, + Conditional = 345, + Configuration = 346, + Conflict = 347, + Connection = 348, + Constraint = 349, + Constraints = 350, + ContentP = 351, + ContinueP = 352, + ConversionP = 353, + Copy = 354, + Cost = 355, + Create = 356, + Cross = 357, + Csv = 358, + Cube = 359, + CurrentP = 360, + CurrentCatalog = 361, + CurrentDate = 362, + CurrentRole = 363, + CurrentSchema = 364, + CurrentTime = 365, + CurrentTimestamp = 366, + CurrentUser = 367, + Cursor = 368, + Cycle = 369, + DataP = 370, + Database = 371, + DayP = 372, + Deallocate = 373, + Dec = 374, + DecimalP = 375, + Declare = 376, + Default = 377, + Defaults = 378, + Deferrable = 379, + Deferred = 380, + Definer = 381, + DeleteP = 382, + Delimiter = 383, + Delimiters = 384, + Depends = 385, + Depth = 386, + Desc = 387, + Detach = 388, + Dictionary = 389, + DisableP = 390, + Discard = 391, + Distinct = 392, + Do = 393, + DocumentP = 394, + DomainP = 395, + DoubleP = 396, + Drop = 397, + Each = 398, + Else = 399, + EmptyP = 400, + EnableP = 401, + Encoding = 402, + Encrypted = 403, + EndP = 404, + EnumP = 405, + ErrorP = 406, + Escape = 407, + Event = 408, + Except = 409, + Exclude = 410, + Excluding = 411, + Exclusive = 412, + Execute = 413, + Exists = 414, + Explain = 415, + Expression = 416, + Extension = 417, + External = 418, + Extract = 419, + FalseP = 420, + Family = 421, + Fetch = 422, + Filter = 423, + Finalize = 424, + FirstP = 425, + FloatP = 426, + Following = 427, + For = 428, + Force = 429, + Foreign = 430, + Format = 431, + Forward = 432, + Freeze = 433, + From = 434, + Full = 435, + Function = 436, + Functions = 437, + Generated = 438, + Global = 439, + Grant = 440, + Granted = 441, + Greatest = 442, + GroupP = 443, + Grouping = 444, + Groups = 445, + Handler = 446, + Having = 447, + HeaderP = 448, + Hold = 449, + HourP = 450, + IdentityP = 451, + IfP = 452, + Ilike = 453, + Immediate = 454, + Immutable = 455, + ImplicitP = 456, + ImportP = 457, + InP = 458, + Include = 459, + Including = 460, + Increment = 461, + Indent = 462, + Index = 463, + Indexes = 464, + Inherit = 465, + Inherits = 466, + Initially = 467, + InlineP = 468, + InnerP = 469, + Inout = 470, + InputP = 471, + Insensitive = 472, + Insert = 473, + Instead = 474, + IntP = 475, + Integer = 476, + Intersect = 477, + Interval = 478, + Into = 479, + Invoker = 480, + Is = 481, + Isnull = 482, + Isolation = 483, + Join = 484, + Json = 485, + JsonArray = 486, + JsonArrayagg = 487, + JsonExists = 488, + JsonObject = 489, + JsonObjectagg = 490, + JsonQuery = 491, + JsonScalar = 492, + JsonSerialize = 493, + JsonTable = 494, + JsonValue = 495, + Keep = 496, + Key = 497, + Keys = 498, + Label = 499, + Language = 500, + LargeP = 501, + LastP = 502, + LateralP = 503, + Leading = 504, + Leakproof = 505, + Least = 506, + Left = 507, + Level = 508, + Like = 509, + Limit = 510, + Listen = 511, + Load = 512, + Local = 513, + Localtime = 514, + Localtimestamp = 515, + Location = 516, + LockP = 517, + Locked = 518, + Logged = 519, + Mapping = 520, + Match = 521, + Matched = 522, + Materialized = 523, + Maxvalue = 524, + Merge = 525, + MergeAction = 526, + Method = 527, + MinuteP = 528, + Minvalue = 529, + Mode = 530, + MonthP = 531, + Move = 532, + NameP = 533, + Names = 534, + National = 535, + Natural = 536, + Nchar = 537, + Nested = 538, + New = 539, + Next = 540, + Nfc = 541, + Nfd = 542, + Nfkc = 543, + Nfkd = 544, + No = 545, + None = 546, + Normalize = 547, + Normalized = 548, + Not = 549, + Nothing = 550, + Notify = 551, + Notnull = 552, + Nowait = 553, + NullP = 554, + Nullif = 555, + NullsP = 556, + Numeric = 557, + ObjectP = 558, + Of = 559, + Off = 560, + Offset = 561, + Oids = 562, + Old = 563, + Omit = 564, + On = 565, + Only = 566, + Operator = 567, + Option = 568, + Options = 569, + Or = 570, + Order = 571, + Ordinality = 572, + Others = 573, + OutP = 574, + OuterP = 575, + Over = 576, + Overlaps = 577, + Overlay = 578, + Overriding = 579, + Owned = 580, + Owner = 581, + Parallel = 582, + Parameter = 583, + Parser = 584, + Partial = 585, + Partition = 586, + Passing = 587, + Password = 588, + Path = 589, + Placing = 590, + Plan = 591, + Plans = 592, + Policy = 593, + Position = 594, + Preceding = 595, + Precision = 596, + Preserve = 597, + Prepare = 598, + Prepared = 599, + Primary = 600, + Prior = 601, + Privileges = 602, + Procedural = 603, + Procedure = 604, + Procedures = 605, + Program = 606, + Publication = 607, + Quote = 608, + Quotes = 609, + Range = 610, + Read = 611, + Real = 612, + Reassign = 613, + Recheck = 614, + Recursive = 615, + RefP = 616, + References = 617, + Referencing = 618, + Refresh = 619, + Reindex = 620, + RelativeP = 621, + Release = 622, + Rename = 623, + Repeatable = 624, + Replace = 625, + Replica = 626, + Reset = 627, + Restart = 628, + Restrict = 629, + Return = 630, + Returning = 631, + Returns = 632, + Revoke = 633, + Right = 634, + Role = 635, + Rollback = 636, + Rollup = 637, + Routine = 638, + Routines = 639, + Row = 640, + Rows = 641, + Rule = 642, + Savepoint = 643, + Scalar = 644, + Schema = 645, + Schemas = 646, + Scroll = 647, + Search = 648, + SecondP = 649, + Security = 650, + Select = 651, + Sequence = 652, + Sequences = 653, + Serializable = 654, + Server = 655, + Session = 656, + SessionUser = 657, + Set = 658, + Sets = 659, + Setof = 660, + Share = 661, + Show = 662, + Similar = 663, + Simple = 664, + Skip = 665, + Smallint = 666, + Snapshot = 667, + Some = 668, + Source = 669, + SqlP = 670, + Stable = 671, + StandaloneP = 672, + Start = 673, + Statement = 674, + Statistics = 675, + Stdin = 676, + Stdout = 677, + Storage = 678, + Stored = 679, + StrictP = 680, + StringP = 681, + StripP = 682, + Subscription = 683, + Substring = 684, + Support = 685, + Symmetric = 686, + Sysid = 687, + SystemP = 688, + SystemUser = 689, + Table = 690, + Tables = 691, + Tablesample = 692, + Tablespace = 693, + Target = 694, + Temp = 695, + Template = 696, + Temporary = 697, + TextP = 698, + Then = 699, + Ties = 700, + Time = 701, + Timestamp = 702, + To = 703, + Trailing = 704, + Transaction = 705, + Transform = 706, + Treat = 707, + Trigger = 708, + Trim = 709, + TrueP = 710, + Truncate = 711, + Trusted = 712, + TypeP = 713, + TypesP = 714, + Uescape = 715, + Unbounded = 716, + Unconditional = 717, + Uncommitted = 718, + Unencrypted = 719, + Union = 720, + Unique = 721, + Unknown = 722, + Unlisten = 723, + Unlogged = 724, + Until = 725, + Update = 726, + User = 727, + Using = 728, + Vacuum = 729, + Valid = 730, + Validate = 731, + Validator = 732, + ValueP = 733, + Values = 734, + Varchar = 735, + Variadic = 736, + Varying = 737, + Verbose = 738, + VersionP = 739, + View = 740, + Views = 741, + Volatile = 742, + When = 743, + Where = 744, + WhitespaceP = 745, + Window = 746, + With = 747, + Within = 748, + Without = 749, + Work = 750, + Wrapper = 751, + Write = 752, + XmlP = 753, + Xmlattributes = 754, + Xmlconcat = 755, + Xmlelement = 756, + Xmlexists = 757, + Xmlforest = 758, + Xmlnamespaces = 759, + Xmlparse = 760, + Xmlpi = 761, + Xmlroot = 762, + Xmlserialize = 763, + Xmltable = 764, + YearP = 765, + YesP = 766, + Zone = 767, + FormatLa = 768, + NotLa = 769, + NullsLa = 770, + WithLa = 771, + WithoutLa = 772, + ModeTypeName = 773, + ModePlpgsqlExpr = 774, + ModePlpgsqlAssign1 = 775, + ModePlpgsqlAssign2 = 776, + ModePlpgsqlAssign3 = 777, + Uminus = 778, +} +impl Token { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Nul => "NUL", + Self::Ascii36 => "ASCII_36", + Self::Ascii37 => "ASCII_37", + Self::Ascii40 => "ASCII_40", + Self::Ascii41 => "ASCII_41", + Self::Ascii42 => "ASCII_42", + Self::Ascii43 => "ASCII_43", + Self::Ascii44 => "ASCII_44", + Self::Ascii45 => "ASCII_45", + Self::Ascii46 => "ASCII_46", + Self::Ascii47 => "ASCII_47", + Self::Ascii58 => "ASCII_58", + Self::Ascii59 => "ASCII_59", + Self::Ascii60 => "ASCII_60", + Self::Ascii61 => "ASCII_61", + Self::Ascii62 => "ASCII_62", + Self::Ascii63 => "ASCII_63", + Self::Ascii91 => "ASCII_91", + Self::Ascii92 => "ASCII_92", + Self::Ascii93 => "ASCII_93", + Self::Ascii94 => "ASCII_94", + Self::Ident => "IDENT", + Self::Uident => "UIDENT", + Self::Fconst => "FCONST", + Self::Sconst => "SCONST", + Self::Usconst => "USCONST", + Self::Bconst => "BCONST", + Self::Xconst => "XCONST", + Self::Op => "Op", + Self::Iconst => "ICONST", + Self::Param => "PARAM", + Self::Typecast => "TYPECAST", + Self::DotDot => "DOT_DOT", + Self::ColonEquals => "COLON_EQUALS", + Self::EqualsGreater => "EQUALS_GREATER", + Self::LessEquals => "LESS_EQUALS", + Self::GreaterEquals => "GREATER_EQUALS", + Self::NotEquals => "NOT_EQUALS", + Self::SqlComment => "SQL_COMMENT", + Self::CComment => "C_COMMENT", + Self::AbortP => "ABORT_P", + Self::Absent => "ABSENT", + Self::AbsoluteP => "ABSOLUTE_P", + Self::Access => "ACCESS", + Self::Action => "ACTION", + Self::AddP => "ADD_P", + Self::Admin => "ADMIN", + Self::After => "AFTER", + Self::Aggregate => "AGGREGATE", + Self::All => "ALL", + Self::Also => "ALSO", + Self::Alter => "ALTER", + Self::Always => "ALWAYS", + Self::Analyse => "ANALYSE", + Self::Analyze => "ANALYZE", + Self::And => "AND", + Self::Any => "ANY", + Self::Array => "ARRAY", + Self::As => "AS", + Self::Asc => "ASC", + Self::Asensitive => "ASENSITIVE", + Self::Assertion => "ASSERTION", + Self::Assignment => "ASSIGNMENT", + Self::Asymmetric => "ASYMMETRIC", + Self::Atomic => "ATOMIC", + Self::At => "AT", + Self::Attach => "ATTACH", + Self::Attribute => "ATTRIBUTE", + Self::Authorization => "AUTHORIZATION", + Self::Backward => "BACKWARD", + Self::Before => "BEFORE", + Self::BeginP => "BEGIN_P", + Self::Between => "BETWEEN", + Self::Bigint => "BIGINT", + Self::Binary => "BINARY", + Self::Bit => "BIT", + Self::BooleanP => "BOOLEAN_P", + Self::Both => "BOTH", + Self::Breadth => "BREADTH", + Self::By => "BY", + Self::Cache => "CACHE", + Self::Call => "CALL", + Self::Called => "CALLED", + Self::Cascade => "CASCADE", + Self::Cascaded => "CASCADED", + Self::Case => "CASE", + Self::Cast => "CAST", + Self::CatalogP => "CATALOG_P", + Self::Chain => "CHAIN", + Self::CharP => "CHAR_P", + Self::Character => "CHARACTER", + Self::Characteristics => "CHARACTERISTICS", + Self::Check => "CHECK", + Self::Checkpoint => "CHECKPOINT", + Self::Class => "CLASS", + Self::Close => "CLOSE", + Self::Cluster => "CLUSTER", + Self::Coalesce => "COALESCE", + Self::Collate => "COLLATE", + Self::Collation => "COLLATION", + Self::Column => "COLUMN", + Self::Columns => "COLUMNS", + Self::Comment => "COMMENT", + Self::Comments => "COMMENTS", + Self::Commit => "COMMIT", + Self::Committed => "COMMITTED", + Self::Compression => "COMPRESSION", + Self::Concurrently => "CONCURRENTLY", + Self::Conditional => "CONDITIONAL", + Self::Configuration => "CONFIGURATION", + Self::Conflict => "CONFLICT", + Self::Connection => "CONNECTION", + Self::Constraint => "CONSTRAINT", + Self::Constraints => "CONSTRAINTS", + Self::ContentP => "CONTENT_P", + Self::ContinueP => "CONTINUE_P", + Self::ConversionP => "CONVERSION_P", + Self::Copy => "COPY", + Self::Cost => "COST", + Self::Create => "CREATE", + Self::Cross => "CROSS", + Self::Csv => "CSV", + Self::Cube => "CUBE", + Self::CurrentP => "CURRENT_P", + Self::CurrentCatalog => "CURRENT_CATALOG", + Self::CurrentDate => "CURRENT_DATE", + Self::CurrentRole => "CURRENT_ROLE", + Self::CurrentSchema => "CURRENT_SCHEMA", + Self::CurrentTime => "CURRENT_TIME", + Self::CurrentTimestamp => "CURRENT_TIMESTAMP", + Self::CurrentUser => "CURRENT_USER", + Self::Cursor => "CURSOR", + Self::Cycle => "CYCLE", + Self::DataP => "DATA_P", + Self::Database => "DATABASE", + Self::DayP => "DAY_P", + Self::Deallocate => "DEALLOCATE", + Self::Dec => "DEC", + Self::DecimalP => "DECIMAL_P", + Self::Declare => "DECLARE", + Self::Default => "DEFAULT", + Self::Defaults => "DEFAULTS", + Self::Deferrable => "DEFERRABLE", + Self::Deferred => "DEFERRED", + Self::Definer => "DEFINER", + Self::DeleteP => "DELETE_P", + Self::Delimiter => "DELIMITER", + Self::Delimiters => "DELIMITERS", + Self::Depends => "DEPENDS", + Self::Depth => "DEPTH", + Self::Desc => "DESC", + Self::Detach => "DETACH", + Self::Dictionary => "DICTIONARY", + Self::DisableP => "DISABLE_P", + Self::Discard => "DISCARD", + Self::Distinct => "DISTINCT", + Self::Do => "DO", + Self::DocumentP => "DOCUMENT_P", + Self::DomainP => "DOMAIN_P", + Self::DoubleP => "DOUBLE_P", + Self::Drop => "DROP", + Self::Each => "EACH", + Self::Else => "ELSE", + Self::EmptyP => "EMPTY_P", + Self::EnableP => "ENABLE_P", + Self::Encoding => "ENCODING", + Self::Encrypted => "ENCRYPTED", + Self::EndP => "END_P", + Self::EnumP => "ENUM_P", + Self::ErrorP => "ERROR_P", + Self::Escape => "ESCAPE", + Self::Event => "EVENT", + Self::Except => "EXCEPT", + Self::Exclude => "EXCLUDE", + Self::Excluding => "EXCLUDING", + Self::Exclusive => "EXCLUSIVE", + Self::Execute => "EXECUTE", + Self::Exists => "EXISTS", + Self::Explain => "EXPLAIN", + Self::Expression => "EXPRESSION", + Self::Extension => "EXTENSION", + Self::External => "EXTERNAL", + Self::Extract => "EXTRACT", + Self::FalseP => "FALSE_P", + Self::Family => "FAMILY", + Self::Fetch => "FETCH", + Self::Filter => "FILTER", + Self::Finalize => "FINALIZE", + Self::FirstP => "FIRST_P", + Self::FloatP => "FLOAT_P", + Self::Following => "FOLLOWING", + Self::For => "FOR", + Self::Force => "FORCE", + Self::Foreign => "FOREIGN", + Self::Format => "FORMAT", + Self::Forward => "FORWARD", + Self::Freeze => "FREEZE", + Self::From => "FROM", + Self::Full => "FULL", + Self::Function => "FUNCTION", + Self::Functions => "FUNCTIONS", + Self::Generated => "GENERATED", + Self::Global => "GLOBAL", + Self::Grant => "GRANT", + Self::Granted => "GRANTED", + Self::Greatest => "GREATEST", + Self::GroupP => "GROUP_P", + Self::Grouping => "GROUPING", + Self::Groups => "GROUPS", + Self::Handler => "HANDLER", + Self::Having => "HAVING", + Self::HeaderP => "HEADER_P", + Self::Hold => "HOLD", + Self::HourP => "HOUR_P", + Self::IdentityP => "IDENTITY_P", + Self::IfP => "IF_P", + Self::Ilike => "ILIKE", + Self::Immediate => "IMMEDIATE", + Self::Immutable => "IMMUTABLE", + Self::ImplicitP => "IMPLICIT_P", + Self::ImportP => "IMPORT_P", + Self::InP => "IN_P", + Self::Include => "INCLUDE", + Self::Including => "INCLUDING", + Self::Increment => "INCREMENT", + Self::Indent => "INDENT", + Self::Index => "INDEX", + Self::Indexes => "INDEXES", + Self::Inherit => "INHERIT", + Self::Inherits => "INHERITS", + Self::Initially => "INITIALLY", + Self::InlineP => "INLINE_P", + Self::InnerP => "INNER_P", + Self::Inout => "INOUT", + Self::InputP => "INPUT_P", + Self::Insensitive => "INSENSITIVE", + Self::Insert => "INSERT", + Self::Instead => "INSTEAD", + Self::IntP => "INT_P", + Self::Integer => "INTEGER", + Self::Intersect => "INTERSECT", + Self::Interval => "INTERVAL", + Self::Into => "INTO", + Self::Invoker => "INVOKER", + Self::Is => "IS", + Self::Isnull => "ISNULL", + Self::Isolation => "ISOLATION", + Self::Join => "JOIN", + Self::Json => "JSON", + Self::JsonArray => "JSON_ARRAY", + Self::JsonArrayagg => "JSON_ARRAYAGG", + Self::JsonExists => "JSON_EXISTS", + Self::JsonObject => "JSON_OBJECT", + Self::JsonObjectagg => "JSON_OBJECTAGG", + Self::JsonQuery => "JSON_QUERY", + Self::JsonScalar => "JSON_SCALAR", + Self::JsonSerialize => "JSON_SERIALIZE", + Self::JsonTable => "JSON_TABLE", + Self::JsonValue => "JSON_VALUE", + Self::Keep => "KEEP", + Self::Key => "KEY", + Self::Keys => "KEYS", + Self::Label => "LABEL", + Self::Language => "LANGUAGE", + Self::LargeP => "LARGE_P", + Self::LastP => "LAST_P", + Self::LateralP => "LATERAL_P", + Self::Leading => "LEADING", + Self::Leakproof => "LEAKPROOF", + Self::Least => "LEAST", + Self::Left => "LEFT", + Self::Level => "LEVEL", + Self::Like => "LIKE", + Self::Limit => "LIMIT", + Self::Listen => "LISTEN", + Self::Load => "LOAD", + Self::Local => "LOCAL", + Self::Localtime => "LOCALTIME", + Self::Localtimestamp => "LOCALTIMESTAMP", + Self::Location => "LOCATION", + Self::LockP => "LOCK_P", + Self::Locked => "LOCKED", + Self::Logged => "LOGGED", + Self::Mapping => "MAPPING", + Self::Match => "MATCH", + Self::Matched => "MATCHED", + Self::Materialized => "MATERIALIZED", + Self::Maxvalue => "MAXVALUE", + Self::Merge => "MERGE", + Self::MergeAction => "MERGE_ACTION", + Self::Method => "METHOD", + Self::MinuteP => "MINUTE_P", + Self::Minvalue => "MINVALUE", + Self::Mode => "MODE", + Self::MonthP => "MONTH_P", + Self::Move => "MOVE", + Self::NameP => "NAME_P", + Self::Names => "NAMES", + Self::National => "NATIONAL", + Self::Natural => "NATURAL", + Self::Nchar => "NCHAR", + Self::Nested => "NESTED", + Self::New => "NEW", + Self::Next => "NEXT", + Self::Nfc => "NFC", + Self::Nfd => "NFD", + Self::Nfkc => "NFKC", + Self::Nfkd => "NFKD", + Self::No => "NO", + Self::None => "NONE", + Self::Normalize => "NORMALIZE", + Self::Normalized => "NORMALIZED", + Self::Not => "NOT", + Self::Nothing => "NOTHING", + Self::Notify => "NOTIFY", + Self::Notnull => "NOTNULL", + Self::Nowait => "NOWAIT", + Self::NullP => "NULL_P", + Self::Nullif => "NULLIF", + Self::NullsP => "NULLS_P", + Self::Numeric => "NUMERIC", + Self::ObjectP => "OBJECT_P", + Self::Of => "OF", + Self::Off => "OFF", + Self::Offset => "OFFSET", + Self::Oids => "OIDS", + Self::Old => "OLD", + Self::Omit => "OMIT", + Self::On => "ON", + Self::Only => "ONLY", + Self::Operator => "OPERATOR", + Self::Option => "OPTION", + Self::Options => "OPTIONS", + Self::Or => "OR", + Self::Order => "ORDER", + Self::Ordinality => "ORDINALITY", + Self::Others => "OTHERS", + Self::OutP => "OUT_P", + Self::OuterP => "OUTER_P", + Self::Over => "OVER", + Self::Overlaps => "OVERLAPS", + Self::Overlay => "OVERLAY", + Self::Overriding => "OVERRIDING", + Self::Owned => "OWNED", + Self::Owner => "OWNER", + Self::Parallel => "PARALLEL", + Self::Parameter => "PARAMETER", + Self::Parser => "PARSER", + Self::Partial => "PARTIAL", + Self::Partition => "PARTITION", + Self::Passing => "PASSING", + Self::Password => "PASSWORD", + Self::Path => "PATH", + Self::Placing => "PLACING", + Self::Plan => "PLAN", + Self::Plans => "PLANS", + Self::Policy => "POLICY", + Self::Position => "POSITION", + Self::Preceding => "PRECEDING", + Self::Precision => "PRECISION", + Self::Preserve => "PRESERVE", + Self::Prepare => "PREPARE", + Self::Prepared => "PREPARED", + Self::Primary => "PRIMARY", + Self::Prior => "PRIOR", + Self::Privileges => "PRIVILEGES", + Self::Procedural => "PROCEDURAL", + Self::Procedure => "PROCEDURE", + Self::Procedures => "PROCEDURES", + Self::Program => "PROGRAM", + Self::Publication => "PUBLICATION", + Self::Quote => "QUOTE", + Self::Quotes => "QUOTES", + Self::Range => "RANGE", + Self::Read => "READ", + Self::Real => "REAL", + Self::Reassign => "REASSIGN", + Self::Recheck => "RECHECK", + Self::Recursive => "RECURSIVE", + Self::RefP => "REF_P", + Self::References => "REFERENCES", + Self::Referencing => "REFERENCING", + Self::Refresh => "REFRESH", + Self::Reindex => "REINDEX", + Self::RelativeP => "RELATIVE_P", + Self::Release => "RELEASE", + Self::Rename => "RENAME", + Self::Repeatable => "REPEATABLE", + Self::Replace => "REPLACE", + Self::Replica => "REPLICA", + Self::Reset => "RESET", + Self::Restart => "RESTART", + Self::Restrict => "RESTRICT", + Self::Return => "RETURN", + Self::Returning => "RETURNING", + Self::Returns => "RETURNS", + Self::Revoke => "REVOKE", + Self::Right => "RIGHT", + Self::Role => "ROLE", + Self::Rollback => "ROLLBACK", + Self::Rollup => "ROLLUP", + Self::Routine => "ROUTINE", + Self::Routines => "ROUTINES", + Self::Row => "ROW", + Self::Rows => "ROWS", + Self::Rule => "RULE", + Self::Savepoint => "SAVEPOINT", + Self::Scalar => "SCALAR", + Self::Schema => "SCHEMA", + Self::Schemas => "SCHEMAS", + Self::Scroll => "SCROLL", + Self::Search => "SEARCH", + Self::SecondP => "SECOND_P", + Self::Security => "SECURITY", + Self::Select => "SELECT", + Self::Sequence => "SEQUENCE", + Self::Sequences => "SEQUENCES", + Self::Serializable => "SERIALIZABLE", + Self::Server => "SERVER", + Self::Session => "SESSION", + Self::SessionUser => "SESSION_USER", + Self::Set => "SET", + Self::Sets => "SETS", + Self::Setof => "SETOF", + Self::Share => "SHARE", + Self::Show => "SHOW", + Self::Similar => "SIMILAR", + Self::Simple => "SIMPLE", + Self::Skip => "SKIP", + Self::Smallint => "SMALLINT", + Self::Snapshot => "SNAPSHOT", + Self::Some => "SOME", + Self::Source => "SOURCE", + Self::SqlP => "SQL_P", + Self::Stable => "STABLE", + Self::StandaloneP => "STANDALONE_P", + Self::Start => "START", + Self::Statement => "STATEMENT", + Self::Statistics => "STATISTICS", + Self::Stdin => "STDIN", + Self::Stdout => "STDOUT", + Self::Storage => "STORAGE", + Self::Stored => "STORED", + Self::StrictP => "STRICT_P", + Self::StringP => "STRING_P", + Self::StripP => "STRIP_P", + Self::Subscription => "SUBSCRIPTION", + Self::Substring => "SUBSTRING", + Self::Support => "SUPPORT", + Self::Symmetric => "SYMMETRIC", + Self::Sysid => "SYSID", + Self::SystemP => "SYSTEM_P", + Self::SystemUser => "SYSTEM_USER", + Self::Table => "TABLE", + Self::Tables => "TABLES", + Self::Tablesample => "TABLESAMPLE", + Self::Tablespace => "TABLESPACE", + Self::Target => "TARGET", + Self::Temp => "TEMP", + Self::Template => "TEMPLATE", + Self::Temporary => "TEMPORARY", + Self::TextP => "TEXT_P", + Self::Then => "THEN", + Self::Ties => "TIES", + Self::Time => "TIME", + Self::Timestamp => "TIMESTAMP", + Self::To => "TO", + Self::Trailing => "TRAILING", + Self::Transaction => "TRANSACTION", + Self::Transform => "TRANSFORM", + Self::Treat => "TREAT", + Self::Trigger => "TRIGGER", + Self::Trim => "TRIM", + Self::TrueP => "TRUE_P", + Self::Truncate => "TRUNCATE", + Self::Trusted => "TRUSTED", + Self::TypeP => "TYPE_P", + Self::TypesP => "TYPES_P", + Self::Uescape => "UESCAPE", + Self::Unbounded => "UNBOUNDED", + Self::Unconditional => "UNCONDITIONAL", + Self::Uncommitted => "UNCOMMITTED", + Self::Unencrypted => "UNENCRYPTED", + Self::Union => "UNION", + Self::Unique => "UNIQUE", + Self::Unknown => "UNKNOWN", + Self::Unlisten => "UNLISTEN", + Self::Unlogged => "UNLOGGED", + Self::Until => "UNTIL", + Self::Update => "UPDATE", + Self::User => "USER", + Self::Using => "USING", + Self::Vacuum => "VACUUM", + Self::Valid => "VALID", + Self::Validate => "VALIDATE", + Self::Validator => "VALIDATOR", + Self::ValueP => "VALUE_P", + Self::Values => "VALUES", + Self::Varchar => "VARCHAR", + Self::Variadic => "VARIADIC", + Self::Varying => "VARYING", + Self::Verbose => "VERBOSE", + Self::VersionP => "VERSION_P", + Self::View => "VIEW", + Self::Views => "VIEWS", + Self::Volatile => "VOLATILE", + Self::When => "WHEN", + Self::Where => "WHERE", + Self::WhitespaceP => "WHITESPACE_P", + Self::Window => "WINDOW", + Self::With => "WITH", + Self::Within => "WITHIN", + Self::Without => "WITHOUT", + Self::Work => "WORK", + Self::Wrapper => "WRAPPER", + Self::Write => "WRITE", + Self::XmlP => "XML_P", + Self::Xmlattributes => "XMLATTRIBUTES", + Self::Xmlconcat => "XMLCONCAT", + Self::Xmlelement => "XMLELEMENT", + Self::Xmlexists => "XMLEXISTS", + Self::Xmlforest => "XMLFOREST", + Self::Xmlnamespaces => "XMLNAMESPACES", + Self::Xmlparse => "XMLPARSE", + Self::Xmlpi => "XMLPI", + Self::Xmlroot => "XMLROOT", + Self::Xmlserialize => "XMLSERIALIZE", + Self::Xmltable => "XMLTABLE", + Self::YearP => "YEAR_P", + Self::YesP => "YES_P", + Self::Zone => "ZONE", + Self::FormatLa => "FORMAT_LA", + Self::NotLa => "NOT_LA", + Self::NullsLa => "NULLS_LA", + Self::WithLa => "WITH_LA", + Self::WithoutLa => "WITHOUT_LA", + Self::ModeTypeName => "MODE_TYPE_NAME", + Self::ModePlpgsqlExpr => "MODE_PLPGSQL_EXPR", + Self::ModePlpgsqlAssign1 => "MODE_PLPGSQL_ASSIGN1", + Self::ModePlpgsqlAssign2 => "MODE_PLPGSQL_ASSIGN2", + Self::ModePlpgsqlAssign3 => "MODE_PLPGSQL_ASSIGN3", + Self::Uminus => "UMINUS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NUL" => Some(Self::Nul), + "ASCII_36" => Some(Self::Ascii36), + "ASCII_37" => Some(Self::Ascii37), + "ASCII_40" => Some(Self::Ascii40), + "ASCII_41" => Some(Self::Ascii41), + "ASCII_42" => Some(Self::Ascii42), + "ASCII_43" => Some(Self::Ascii43), + "ASCII_44" => Some(Self::Ascii44), + "ASCII_45" => Some(Self::Ascii45), + "ASCII_46" => Some(Self::Ascii46), + "ASCII_47" => Some(Self::Ascii47), + "ASCII_58" => Some(Self::Ascii58), + "ASCII_59" => Some(Self::Ascii59), + "ASCII_60" => Some(Self::Ascii60), + "ASCII_61" => Some(Self::Ascii61), + "ASCII_62" => Some(Self::Ascii62), + "ASCII_63" => Some(Self::Ascii63), + "ASCII_91" => Some(Self::Ascii91), + "ASCII_92" => Some(Self::Ascii92), + "ASCII_93" => Some(Self::Ascii93), + "ASCII_94" => Some(Self::Ascii94), + "IDENT" => Some(Self::Ident), + "UIDENT" => Some(Self::Uident), + "FCONST" => Some(Self::Fconst), + "SCONST" => Some(Self::Sconst), + "USCONST" => Some(Self::Usconst), + "BCONST" => Some(Self::Bconst), + "XCONST" => Some(Self::Xconst), + "Op" => Some(Self::Op), + "ICONST" => Some(Self::Iconst), + "PARAM" => Some(Self::Param), + "TYPECAST" => Some(Self::Typecast), + "DOT_DOT" => Some(Self::DotDot), + "COLON_EQUALS" => Some(Self::ColonEquals), + "EQUALS_GREATER" => Some(Self::EqualsGreater), + "LESS_EQUALS" => Some(Self::LessEquals), + "GREATER_EQUALS" => Some(Self::GreaterEquals), + "NOT_EQUALS" => Some(Self::NotEquals), + "SQL_COMMENT" => Some(Self::SqlComment), + "C_COMMENT" => Some(Self::CComment), + "ABORT_P" => Some(Self::AbortP), + "ABSENT" => Some(Self::Absent), + "ABSOLUTE_P" => Some(Self::AbsoluteP), + "ACCESS" => Some(Self::Access), + "ACTION" => Some(Self::Action), + "ADD_P" => Some(Self::AddP), + "ADMIN" => Some(Self::Admin), + "AFTER" => Some(Self::After), + "AGGREGATE" => Some(Self::Aggregate), + "ALL" => Some(Self::All), + "ALSO" => Some(Self::Also), + "ALTER" => Some(Self::Alter), + "ALWAYS" => Some(Self::Always), + "ANALYSE" => Some(Self::Analyse), + "ANALYZE" => Some(Self::Analyze), + "AND" => Some(Self::And), + "ANY" => Some(Self::Any), + "ARRAY" => Some(Self::Array), + "AS" => Some(Self::As), + "ASC" => Some(Self::Asc), + "ASENSITIVE" => Some(Self::Asensitive), + "ASSERTION" => Some(Self::Assertion), + "ASSIGNMENT" => Some(Self::Assignment), + "ASYMMETRIC" => Some(Self::Asymmetric), + "ATOMIC" => Some(Self::Atomic), + "AT" => Some(Self::At), + "ATTACH" => Some(Self::Attach), + "ATTRIBUTE" => Some(Self::Attribute), + "AUTHORIZATION" => Some(Self::Authorization), + "BACKWARD" => Some(Self::Backward), + "BEFORE" => Some(Self::Before), + "BEGIN_P" => Some(Self::BeginP), + "BETWEEN" => Some(Self::Between), + "BIGINT" => Some(Self::Bigint), + "BINARY" => Some(Self::Binary), + "BIT" => Some(Self::Bit), + "BOOLEAN_P" => Some(Self::BooleanP), + "BOTH" => Some(Self::Both), + "BREADTH" => Some(Self::Breadth), + "BY" => Some(Self::By), + "CACHE" => Some(Self::Cache), + "CALL" => Some(Self::Call), + "CALLED" => Some(Self::Called), + "CASCADE" => Some(Self::Cascade), + "CASCADED" => Some(Self::Cascaded), + "CASE" => Some(Self::Case), + "CAST" => Some(Self::Cast), + "CATALOG_P" => Some(Self::CatalogP), + "CHAIN" => Some(Self::Chain), + "CHAR_P" => Some(Self::CharP), + "CHARACTER" => Some(Self::Character), + "CHARACTERISTICS" => Some(Self::Characteristics), + "CHECK" => Some(Self::Check), + "CHECKPOINT" => Some(Self::Checkpoint), + "CLASS" => Some(Self::Class), + "CLOSE" => Some(Self::Close), + "CLUSTER" => Some(Self::Cluster), + "COALESCE" => Some(Self::Coalesce), + "COLLATE" => Some(Self::Collate), + "COLLATION" => Some(Self::Collation), + "COLUMN" => Some(Self::Column), + "COLUMNS" => Some(Self::Columns), + "COMMENT" => Some(Self::Comment), + "COMMENTS" => Some(Self::Comments), + "COMMIT" => Some(Self::Commit), + "COMMITTED" => Some(Self::Committed), + "COMPRESSION" => Some(Self::Compression), + "CONCURRENTLY" => Some(Self::Concurrently), + "CONDITIONAL" => Some(Self::Conditional), + "CONFIGURATION" => Some(Self::Configuration), + "CONFLICT" => Some(Self::Conflict), + "CONNECTION" => Some(Self::Connection), + "CONSTRAINT" => Some(Self::Constraint), + "CONSTRAINTS" => Some(Self::Constraints), + "CONTENT_P" => Some(Self::ContentP), + "CONTINUE_P" => Some(Self::ContinueP), + "CONVERSION_P" => Some(Self::ConversionP), + "COPY" => Some(Self::Copy), + "COST" => Some(Self::Cost), + "CREATE" => Some(Self::Create), + "CROSS" => Some(Self::Cross), + "CSV" => Some(Self::Csv), + "CUBE" => Some(Self::Cube), + "CURRENT_P" => Some(Self::CurrentP), + "CURRENT_CATALOG" => Some(Self::CurrentCatalog), + "CURRENT_DATE" => Some(Self::CurrentDate), + "CURRENT_ROLE" => Some(Self::CurrentRole), + "CURRENT_SCHEMA" => Some(Self::CurrentSchema), + "CURRENT_TIME" => Some(Self::CurrentTime), + "CURRENT_TIMESTAMP" => Some(Self::CurrentTimestamp), + "CURRENT_USER" => Some(Self::CurrentUser), + "CURSOR" => Some(Self::Cursor), + "CYCLE" => Some(Self::Cycle), + "DATA_P" => Some(Self::DataP), + "DATABASE" => Some(Self::Database), + "DAY_P" => Some(Self::DayP), + "DEALLOCATE" => Some(Self::Deallocate), + "DEC" => Some(Self::Dec), + "DECIMAL_P" => Some(Self::DecimalP), + "DECLARE" => Some(Self::Declare), + "DEFAULT" => Some(Self::Default), + "DEFAULTS" => Some(Self::Defaults), + "DEFERRABLE" => Some(Self::Deferrable), + "DEFERRED" => Some(Self::Deferred), + "DEFINER" => Some(Self::Definer), + "DELETE_P" => Some(Self::DeleteP), + "DELIMITER" => Some(Self::Delimiter), + "DELIMITERS" => Some(Self::Delimiters), + "DEPENDS" => Some(Self::Depends), + "DEPTH" => Some(Self::Depth), + "DESC" => Some(Self::Desc), + "DETACH" => Some(Self::Detach), + "DICTIONARY" => Some(Self::Dictionary), + "DISABLE_P" => Some(Self::DisableP), + "DISCARD" => Some(Self::Discard), + "DISTINCT" => Some(Self::Distinct), + "DO" => Some(Self::Do), + "DOCUMENT_P" => Some(Self::DocumentP), + "DOMAIN_P" => Some(Self::DomainP), + "DOUBLE_P" => Some(Self::DoubleP), + "DROP" => Some(Self::Drop), + "EACH" => Some(Self::Each), + "ELSE" => Some(Self::Else), + "EMPTY_P" => Some(Self::EmptyP), + "ENABLE_P" => Some(Self::EnableP), + "ENCODING" => Some(Self::Encoding), + "ENCRYPTED" => Some(Self::Encrypted), + "END_P" => Some(Self::EndP), + "ENUM_P" => Some(Self::EnumP), + "ERROR_P" => Some(Self::ErrorP), + "ESCAPE" => Some(Self::Escape), + "EVENT" => Some(Self::Event), + "EXCEPT" => Some(Self::Except), + "EXCLUDE" => Some(Self::Exclude), + "EXCLUDING" => Some(Self::Excluding), + "EXCLUSIVE" => Some(Self::Exclusive), + "EXECUTE" => Some(Self::Execute), + "EXISTS" => Some(Self::Exists), + "EXPLAIN" => Some(Self::Explain), + "EXPRESSION" => Some(Self::Expression), + "EXTENSION" => Some(Self::Extension), + "EXTERNAL" => Some(Self::External), + "EXTRACT" => Some(Self::Extract), + "FALSE_P" => Some(Self::FalseP), + "FAMILY" => Some(Self::Family), + "FETCH" => Some(Self::Fetch), + "FILTER" => Some(Self::Filter), + "FINALIZE" => Some(Self::Finalize), + "FIRST_P" => Some(Self::FirstP), + "FLOAT_P" => Some(Self::FloatP), + "FOLLOWING" => Some(Self::Following), + "FOR" => Some(Self::For), + "FORCE" => Some(Self::Force), + "FOREIGN" => Some(Self::Foreign), + "FORMAT" => Some(Self::Format), + "FORWARD" => Some(Self::Forward), + "FREEZE" => Some(Self::Freeze), + "FROM" => Some(Self::From), + "FULL" => Some(Self::Full), + "FUNCTION" => Some(Self::Function), + "FUNCTIONS" => Some(Self::Functions), + "GENERATED" => Some(Self::Generated), + "GLOBAL" => Some(Self::Global), + "GRANT" => Some(Self::Grant), + "GRANTED" => Some(Self::Granted), + "GREATEST" => Some(Self::Greatest), + "GROUP_P" => Some(Self::GroupP), + "GROUPING" => Some(Self::Grouping), + "GROUPS" => Some(Self::Groups), + "HANDLER" => Some(Self::Handler), + "HAVING" => Some(Self::Having), + "HEADER_P" => Some(Self::HeaderP), + "HOLD" => Some(Self::Hold), + "HOUR_P" => Some(Self::HourP), + "IDENTITY_P" => Some(Self::IdentityP), + "IF_P" => Some(Self::IfP), + "ILIKE" => Some(Self::Ilike), + "IMMEDIATE" => Some(Self::Immediate), + "IMMUTABLE" => Some(Self::Immutable), + "IMPLICIT_P" => Some(Self::ImplicitP), + "IMPORT_P" => Some(Self::ImportP), + "IN_P" => Some(Self::InP), + "INCLUDE" => Some(Self::Include), + "INCLUDING" => Some(Self::Including), + "INCREMENT" => Some(Self::Increment), + "INDENT" => Some(Self::Indent), + "INDEX" => Some(Self::Index), + "INDEXES" => Some(Self::Indexes), + "INHERIT" => Some(Self::Inherit), + "INHERITS" => Some(Self::Inherits), + "INITIALLY" => Some(Self::Initially), + "INLINE_P" => Some(Self::InlineP), + "INNER_P" => Some(Self::InnerP), + "INOUT" => Some(Self::Inout), + "INPUT_P" => Some(Self::InputP), + "INSENSITIVE" => Some(Self::Insensitive), + "INSERT" => Some(Self::Insert), + "INSTEAD" => Some(Self::Instead), + "INT_P" => Some(Self::IntP), + "INTEGER" => Some(Self::Integer), + "INTERSECT" => Some(Self::Intersect), + "INTERVAL" => Some(Self::Interval), + "INTO" => Some(Self::Into), + "INVOKER" => Some(Self::Invoker), + "IS" => Some(Self::Is), + "ISNULL" => Some(Self::Isnull), + "ISOLATION" => Some(Self::Isolation), + "JOIN" => Some(Self::Join), + "JSON" => Some(Self::Json), + "JSON_ARRAY" => Some(Self::JsonArray), + "JSON_ARRAYAGG" => Some(Self::JsonArrayagg), + "JSON_EXISTS" => Some(Self::JsonExists), + "JSON_OBJECT" => Some(Self::JsonObject), + "JSON_OBJECTAGG" => Some(Self::JsonObjectagg), + "JSON_QUERY" => Some(Self::JsonQuery), + "JSON_SCALAR" => Some(Self::JsonScalar), + "JSON_SERIALIZE" => Some(Self::JsonSerialize), + "JSON_TABLE" => Some(Self::JsonTable), + "JSON_VALUE" => Some(Self::JsonValue), + "KEEP" => Some(Self::Keep), + "KEY" => Some(Self::Key), + "KEYS" => Some(Self::Keys), + "LABEL" => Some(Self::Label), + "LANGUAGE" => Some(Self::Language), + "LARGE_P" => Some(Self::LargeP), + "LAST_P" => Some(Self::LastP), + "LATERAL_P" => Some(Self::LateralP), + "LEADING" => Some(Self::Leading), + "LEAKPROOF" => Some(Self::Leakproof), + "LEAST" => Some(Self::Least), + "LEFT" => Some(Self::Left), + "LEVEL" => Some(Self::Level), + "LIKE" => Some(Self::Like), + "LIMIT" => Some(Self::Limit), + "LISTEN" => Some(Self::Listen), + "LOAD" => Some(Self::Load), + "LOCAL" => Some(Self::Local), + "LOCALTIME" => Some(Self::Localtime), + "LOCALTIMESTAMP" => Some(Self::Localtimestamp), + "LOCATION" => Some(Self::Location), + "LOCK_P" => Some(Self::LockP), + "LOCKED" => Some(Self::Locked), + "LOGGED" => Some(Self::Logged), + "MAPPING" => Some(Self::Mapping), + "MATCH" => Some(Self::Match), + "MATCHED" => Some(Self::Matched), + "MATERIALIZED" => Some(Self::Materialized), + "MAXVALUE" => Some(Self::Maxvalue), + "MERGE" => Some(Self::Merge), + "MERGE_ACTION" => Some(Self::MergeAction), + "METHOD" => Some(Self::Method), + "MINUTE_P" => Some(Self::MinuteP), + "MINVALUE" => Some(Self::Minvalue), + "MODE" => Some(Self::Mode), + "MONTH_P" => Some(Self::MonthP), + "MOVE" => Some(Self::Move), + "NAME_P" => Some(Self::NameP), + "NAMES" => Some(Self::Names), + "NATIONAL" => Some(Self::National), + "NATURAL" => Some(Self::Natural), + "NCHAR" => Some(Self::Nchar), + "NESTED" => Some(Self::Nested), + "NEW" => Some(Self::New), + "NEXT" => Some(Self::Next), + "NFC" => Some(Self::Nfc), + "NFD" => Some(Self::Nfd), + "NFKC" => Some(Self::Nfkc), + "NFKD" => Some(Self::Nfkd), + "NO" => Some(Self::No), + "NONE" => Some(Self::None), + "NORMALIZE" => Some(Self::Normalize), + "NORMALIZED" => Some(Self::Normalized), + "NOT" => Some(Self::Not), + "NOTHING" => Some(Self::Nothing), + "NOTIFY" => Some(Self::Notify), + "NOTNULL" => Some(Self::Notnull), + "NOWAIT" => Some(Self::Nowait), + "NULL_P" => Some(Self::NullP), + "NULLIF" => Some(Self::Nullif), + "NULLS_P" => Some(Self::NullsP), + "NUMERIC" => Some(Self::Numeric), + "OBJECT_P" => Some(Self::ObjectP), + "OF" => Some(Self::Of), + "OFF" => Some(Self::Off), + "OFFSET" => Some(Self::Offset), + "OIDS" => Some(Self::Oids), + "OLD" => Some(Self::Old), + "OMIT" => Some(Self::Omit), + "ON" => Some(Self::On), + "ONLY" => Some(Self::Only), + "OPERATOR" => Some(Self::Operator), + "OPTION" => Some(Self::Option), + "OPTIONS" => Some(Self::Options), + "OR" => Some(Self::Or), + "ORDER" => Some(Self::Order), + "ORDINALITY" => Some(Self::Ordinality), + "OTHERS" => Some(Self::Others), + "OUT_P" => Some(Self::OutP), + "OUTER_P" => Some(Self::OuterP), + "OVER" => Some(Self::Over), + "OVERLAPS" => Some(Self::Overlaps), + "OVERLAY" => Some(Self::Overlay), + "OVERRIDING" => Some(Self::Overriding), + "OWNED" => Some(Self::Owned), + "OWNER" => Some(Self::Owner), + "PARALLEL" => Some(Self::Parallel), + "PARAMETER" => Some(Self::Parameter), + "PARSER" => Some(Self::Parser), + "PARTIAL" => Some(Self::Partial), + "PARTITION" => Some(Self::Partition), + "PASSING" => Some(Self::Passing), + "PASSWORD" => Some(Self::Password), + "PATH" => Some(Self::Path), + "PLACING" => Some(Self::Placing), + "PLAN" => Some(Self::Plan), + "PLANS" => Some(Self::Plans), + "POLICY" => Some(Self::Policy), + "POSITION" => Some(Self::Position), + "PRECEDING" => Some(Self::Preceding), + "PRECISION" => Some(Self::Precision), + "PRESERVE" => Some(Self::Preserve), + "PREPARE" => Some(Self::Prepare), + "PREPARED" => Some(Self::Prepared), + "PRIMARY" => Some(Self::Primary), + "PRIOR" => Some(Self::Prior), + "PRIVILEGES" => Some(Self::Privileges), + "PROCEDURAL" => Some(Self::Procedural), + "PROCEDURE" => Some(Self::Procedure), + "PROCEDURES" => Some(Self::Procedures), + "PROGRAM" => Some(Self::Program), + "PUBLICATION" => Some(Self::Publication), + "QUOTE" => Some(Self::Quote), + "QUOTES" => Some(Self::Quotes), + "RANGE" => Some(Self::Range), + "READ" => Some(Self::Read), + "REAL" => Some(Self::Real), + "REASSIGN" => Some(Self::Reassign), + "RECHECK" => Some(Self::Recheck), + "RECURSIVE" => Some(Self::Recursive), + "REF_P" => Some(Self::RefP), + "REFERENCES" => Some(Self::References), + "REFERENCING" => Some(Self::Referencing), + "REFRESH" => Some(Self::Refresh), + "REINDEX" => Some(Self::Reindex), + "RELATIVE_P" => Some(Self::RelativeP), + "RELEASE" => Some(Self::Release), + "RENAME" => Some(Self::Rename), + "REPEATABLE" => Some(Self::Repeatable), + "REPLACE" => Some(Self::Replace), + "REPLICA" => Some(Self::Replica), + "RESET" => Some(Self::Reset), + "RESTART" => Some(Self::Restart), + "RESTRICT" => Some(Self::Restrict), + "RETURN" => Some(Self::Return), + "RETURNING" => Some(Self::Returning), + "RETURNS" => Some(Self::Returns), + "REVOKE" => Some(Self::Revoke), + "RIGHT" => Some(Self::Right), + "ROLE" => Some(Self::Role), + "ROLLBACK" => Some(Self::Rollback), + "ROLLUP" => Some(Self::Rollup), + "ROUTINE" => Some(Self::Routine), + "ROUTINES" => Some(Self::Routines), + "ROW" => Some(Self::Row), + "ROWS" => Some(Self::Rows), + "RULE" => Some(Self::Rule), + "SAVEPOINT" => Some(Self::Savepoint), + "SCALAR" => Some(Self::Scalar), + "SCHEMA" => Some(Self::Schema), + "SCHEMAS" => Some(Self::Schemas), + "SCROLL" => Some(Self::Scroll), + "SEARCH" => Some(Self::Search), + "SECOND_P" => Some(Self::SecondP), + "SECURITY" => Some(Self::Security), + "SELECT" => Some(Self::Select), + "SEQUENCE" => Some(Self::Sequence), + "SEQUENCES" => Some(Self::Sequences), + "SERIALIZABLE" => Some(Self::Serializable), + "SERVER" => Some(Self::Server), + "SESSION" => Some(Self::Session), + "SESSION_USER" => Some(Self::SessionUser), + "SET" => Some(Self::Set), + "SETS" => Some(Self::Sets), + "SETOF" => Some(Self::Setof), + "SHARE" => Some(Self::Share), + "SHOW" => Some(Self::Show), + "SIMILAR" => Some(Self::Similar), + "SIMPLE" => Some(Self::Simple), + "SKIP" => Some(Self::Skip), + "SMALLINT" => Some(Self::Smallint), + "SNAPSHOT" => Some(Self::Snapshot), + "SOME" => Some(Self::Some), + "SOURCE" => Some(Self::Source), + "SQL_P" => Some(Self::SqlP), + "STABLE" => Some(Self::Stable), + "STANDALONE_P" => Some(Self::StandaloneP), + "START" => Some(Self::Start), + "STATEMENT" => Some(Self::Statement), + "STATISTICS" => Some(Self::Statistics), + "STDIN" => Some(Self::Stdin), + "STDOUT" => Some(Self::Stdout), + "STORAGE" => Some(Self::Storage), + "STORED" => Some(Self::Stored), + "STRICT_P" => Some(Self::StrictP), + "STRING_P" => Some(Self::StringP), + "STRIP_P" => Some(Self::StripP), + "SUBSCRIPTION" => Some(Self::Subscription), + "SUBSTRING" => Some(Self::Substring), + "SUPPORT" => Some(Self::Support), + "SYMMETRIC" => Some(Self::Symmetric), + "SYSID" => Some(Self::Sysid), + "SYSTEM_P" => Some(Self::SystemP), + "SYSTEM_USER" => Some(Self::SystemUser), + "TABLE" => Some(Self::Table), + "TABLES" => Some(Self::Tables), + "TABLESAMPLE" => Some(Self::Tablesample), + "TABLESPACE" => Some(Self::Tablespace), + "TARGET" => Some(Self::Target), + "TEMP" => Some(Self::Temp), + "TEMPLATE" => Some(Self::Template), + "TEMPORARY" => Some(Self::Temporary), + "TEXT_P" => Some(Self::TextP), + "THEN" => Some(Self::Then), + "TIES" => Some(Self::Ties), + "TIME" => Some(Self::Time), + "TIMESTAMP" => Some(Self::Timestamp), + "TO" => Some(Self::To), + "TRAILING" => Some(Self::Trailing), + "TRANSACTION" => Some(Self::Transaction), + "TRANSFORM" => Some(Self::Transform), + "TREAT" => Some(Self::Treat), + "TRIGGER" => Some(Self::Trigger), + "TRIM" => Some(Self::Trim), + "TRUE_P" => Some(Self::TrueP), + "TRUNCATE" => Some(Self::Truncate), + "TRUSTED" => Some(Self::Trusted), + "TYPE_P" => Some(Self::TypeP), + "TYPES_P" => Some(Self::TypesP), + "UESCAPE" => Some(Self::Uescape), + "UNBOUNDED" => Some(Self::Unbounded), + "UNCONDITIONAL" => Some(Self::Unconditional), + "UNCOMMITTED" => Some(Self::Uncommitted), + "UNENCRYPTED" => Some(Self::Unencrypted), + "UNION" => Some(Self::Union), + "UNIQUE" => Some(Self::Unique), + "UNKNOWN" => Some(Self::Unknown), + "UNLISTEN" => Some(Self::Unlisten), + "UNLOGGED" => Some(Self::Unlogged), + "UNTIL" => Some(Self::Until), + "UPDATE" => Some(Self::Update), + "USER" => Some(Self::User), + "USING" => Some(Self::Using), + "VACUUM" => Some(Self::Vacuum), + "VALID" => Some(Self::Valid), + "VALIDATE" => Some(Self::Validate), + "VALIDATOR" => Some(Self::Validator), + "VALUE_P" => Some(Self::ValueP), + "VALUES" => Some(Self::Values), + "VARCHAR" => Some(Self::Varchar), + "VARIADIC" => Some(Self::Variadic), + "VARYING" => Some(Self::Varying), + "VERBOSE" => Some(Self::Verbose), + "VERSION_P" => Some(Self::VersionP), + "VIEW" => Some(Self::View), + "VIEWS" => Some(Self::Views), + "VOLATILE" => Some(Self::Volatile), + "WHEN" => Some(Self::When), + "WHERE" => Some(Self::Where), + "WHITESPACE_P" => Some(Self::WhitespaceP), + "WINDOW" => Some(Self::Window), + "WITH" => Some(Self::With), + "WITHIN" => Some(Self::Within), + "WITHOUT" => Some(Self::Without), + "WORK" => Some(Self::Work), + "WRAPPER" => Some(Self::Wrapper), + "WRITE" => Some(Self::Write), + "XML_P" => Some(Self::XmlP), + "XMLATTRIBUTES" => Some(Self::Xmlattributes), + "XMLCONCAT" => Some(Self::Xmlconcat), + "XMLELEMENT" => Some(Self::Xmlelement), + "XMLEXISTS" => Some(Self::Xmlexists), + "XMLFOREST" => Some(Self::Xmlforest), + "XMLNAMESPACES" => Some(Self::Xmlnamespaces), + "XMLPARSE" => Some(Self::Xmlparse), + "XMLPI" => Some(Self::Xmlpi), + "XMLROOT" => Some(Self::Xmlroot), + "XMLSERIALIZE" => Some(Self::Xmlserialize), + "XMLTABLE" => Some(Self::Xmltable), + "YEAR_P" => Some(Self::YearP), + "YES_P" => Some(Self::YesP), + "ZONE" => Some(Self::Zone), + "FORMAT_LA" => Some(Self::FormatLa), + "NOT_LA" => Some(Self::NotLa), + "NULLS_LA" => Some(Self::NullsLa), + "WITH_LA" => Some(Self::WithLa), + "WITHOUT_LA" => Some(Self::WithoutLa), + "MODE_TYPE_NAME" => Some(Self::ModeTypeName), + "MODE_PLPGSQL_EXPR" => Some(Self::ModePlpgsqlExpr), + "MODE_PLPGSQL_ASSIGN1" => Some(Self::ModePlpgsqlAssign1), + "MODE_PLPGSQL_ASSIGN2" => Some(Self::ModePlpgsqlAssign2), + "MODE_PLPGSQL_ASSIGN3" => Some(Self::ModePlpgsqlAssign3), + "UMINUS" => Some(Self::Uminus), + _ => None, + } + } +} diff --git a/crates/pgt_query/src/scan.rs b/crates/pgt_query/src/scan.rs new file mode 100644 index 00000000..b12061e7 --- /dev/null +++ b/crates/pgt_query/src/scan.rs @@ -0,0 +1,33 @@ +use std::ffi::{CStr, CString}; + +use crate::bindings::*; +use crate::error::*; +use crate::protobuf; + +use prost::Message; + +/// Scans (lexes) the given SQL statement into tokens. +/// +/// # Example +/// +/// ```rust +/// let result = pgt_query::scan("SELECT * FROM contacts"); +/// assert!(result.is_ok()); +/// ``` +pub fn scan(sql: &str) -> Result { + let input = CString::new(sql)?; + let result = unsafe { pg_query_scan(input.as_ptr()) }; + let scan_result = if !result.error.is_null() { + let message = unsafe { CStr::from_ptr((*result.error).message) } + .to_string_lossy() + .to_string(); + Err(Error::Scan(message)) + } else { + let data = unsafe { + std::slice::from_raw_parts(result.pbuf.data as *const u8, result.pbuf.len as usize) + }; + protobuf::ScanResult::decode(data).map_err(Error::Decode) + }; + unsafe { pg_query_free_scan_result(result) }; + scan_result +} diff --git a/crates/pgt_query/src/split.rs b/crates/pgt_query/src/split.rs new file mode 100644 index 00000000..abb95eb8 --- /dev/null +++ b/crates/pgt_query/src/split.rs @@ -0,0 +1,86 @@ +use std::ffi::{CStr, CString}; + +use crate::bindings::*; +use crate::error::*; + +/// Split a well-formed query into separate statements. +/// +/// # Example +/// +/// ```rust +/// let query = r#"select /*;*/ 1; select "2;", (select 3);"#; +/// let statements = pgt_query::split_with_parser(query).unwrap(); +/// assert_eq!(statements, vec!["select /*;*/ 1", r#" select "2;", (select 3)"#]); +/// ``` +/// +/// However, `split_with_parser` will fail on malformed statements +/// +/// ```rust +/// let query = "select 1; this statement is not sql; select 2;"; +/// let result = pgt_query::split_with_parser(query); +/// let err = r#"syntax error at or near "this""#; +/// assert_eq!(result, Err(pgt_query::Error::Split(err.to_string()))); +/// ``` +pub fn split_with_parser(query: &str) -> Result> { + let input = CString::new(query)?; + let result = unsafe { pg_query_split_with_parser(input.as_ptr()) }; + let split_result = if !result.error.is_null() { + let message = unsafe { CStr::from_ptr((*result.error).message) } + .to_string_lossy() + .to_string(); + Err(Error::Split(message)) + } else { + let n_stmts = result.n_stmts as usize; + let mut statements = Vec::with_capacity(n_stmts); + for offset in 0..n_stmts { + let split_stmt = unsafe { *result.stmts.add(offset).read() }; + let start = split_stmt.stmt_location as usize; + let end = start + split_stmt.stmt_len as usize; + statements.push(&query[start..end]); + // not sure the start..end slice'll hold up for non-utf8 charsets + } + Ok(statements) + }; + unsafe { pg_query_free_split_result(result) }; + split_result +} + +/// Split a potentially-malformed query into separate statements. Note that +/// invalid tokens will be skipped +/// ```rust +/// let query = r#"select /*;*/ 1; asdf; select "2;", (select 3); asdf"#; +/// let statements = pgt_query::split_with_scanner(query).unwrap(); +/// assert_eq!(statements, vec![ +/// "select /*;*/ 1", +/// // skipped " asdf" since it was an invalid token +/// r#" select "2;", (select 3)"#, +/// ]); +/// ``` +pub fn split_with_scanner(query: &str) -> Result> { + let input = CString::new(query)?; + let result = unsafe { pg_query_split_with_scanner(input.as_ptr()) }; + let split_result = if !result.error.is_null() { + let message = unsafe { CStr::from_ptr((*result.error).message) } + .to_string_lossy() + .to_string(); + Err(Error::Split(message)) + } else { + // don't use result.stderr_buffer since it appears unused unless + // libpg_query is compiled with DEBUG defined. + let n_stmts = result.n_stmts as usize; + let mut start: usize; + let mut end: usize; + let mut statements = Vec::with_capacity(n_stmts); + for offset in 0..n_stmts { + let split_stmt = unsafe { *result.stmts.add(offset).read() }; + start = split_stmt.stmt_location as usize; + // TODO: consider comparing the new value of start to the old value + // of end to see if any region larger than a statement-separator got skipped + end = start + split_stmt.stmt_len as usize; + statements.push(&query[start..end]); + } + Ok(statements) + }; + unsafe { pg_query_free_split_result(result) }; + split_result +} diff --git a/crates/pgt_query_ext/Cargo.toml b/crates/pgt_query_ext/Cargo.toml index 3e6b57c1..9b4bfa1d 100644 --- a/crates/pgt_query_ext/Cargo.toml +++ b/crates/pgt_query_ext/Cargo.toml @@ -12,10 +12,8 @@ version = "0.0.0" [dependencies] -petgraph = "0.6.4" - -pg_query.workspace = true pgt_diagnostics.workspace = true +pgt_query.workspace = true pgt_text_size.workspace = true [lib] diff --git a/crates/pgt_query_ext/src/diagnostics.rs b/crates/pgt_query_ext/src/diagnostics.rs index 1a068dc0..4b5d92e9 100644 --- a/crates/pgt_query_ext/src/diagnostics.rs +++ b/crates/pgt_query_ext/src/diagnostics.rs @@ -30,8 +30,8 @@ impl SyntaxDiagnostic { } } -impl From for SyntaxDiagnostic { - fn from(err: pg_query::Error) -> Self { +impl From for SyntaxDiagnostic { + fn from(err: pgt_query::Error) -> Self { SyntaxDiagnostic { span: None, message: MessageAndDescription::from(err.to_string()), diff --git a/crates/pgt_query_ext/src/lib.rs b/crates/pgt_query_ext/src/lib.rs index 5882a778..4c630487 100644 --- a/crates/pgt_query_ext/src/lib.rs +++ b/crates/pgt_query_ext/src/lib.rs @@ -1,62 +1 @@ -//! Postgres Statement Parser -//! -//! Simple wrapper crate for `pg_query` to expose types and a function to get the root node for an -//! SQL statement. -//! -//! It also host any "extensions" to the `pg_query` crate that are not yet contributed upstream. -//! Extensions include -//! - `get_location` to get the location of a node -//! - `get_node_properties` to get the properties of a node -//! - `get_nodes` to get all the nodes in the AST as a petgraph tree -//! - `ChildrenIterator` to iterate over the children of a node pub mod diagnostics; - -pub use pg_query::protobuf; -pub use pg_query::{Error, NodeEnum, Result}; - -pub fn parse(sql: &str) -> Result { - pg_query::parse(sql).map(|parsed| { - parsed - .protobuf - .nodes() - .iter() - .find(|n| n.1 == 1) - .map(|n| n.0.to_enum()) - .ok_or_else(|| Error::Parse("Unable to find root node".to_string())) - })? -} - -/// This function parses a PL/pgSQL function. -/// -/// It expects the entire `CREATE FUNCTION` statement. -pub fn parse_plpgsql(sql: &str) -> Result<()> { - // we swallow the error until we have a proper binding - let _ = pg_query::parse_plpgsql(sql)?; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_plpgsql_err() { - let input = " -create function test_organisation_id () - returns setof text - language plpgsql - security invoker - as $$ - -- syntax error here - decare - v_organisation_id uuid; -begin - select 1; -end -$$; - "; - - assert!(parse_plpgsql(input).is_err()); - } -} diff --git a/crates/pgt_query_proto_parser/Cargo.toml b/crates/pgt_query_macros/Cargo.toml similarity index 51% rename from crates/pgt_query_proto_parser/Cargo.toml rename to crates/pgt_query_macros/Cargo.toml index 729c94b4..0fcc52cf 100644 --- a/crates/pgt_query_proto_parser/Cargo.toml +++ b/crates/pgt_query_macros/Cargo.toml @@ -6,15 +6,19 @@ edition.workspace = true homepage.workspace = true keywords.workspace = true license.workspace = true -name = "pgt_query_proto_parser" +name = "pgt_query_macros" repository.workspace = true version = "0.0.0" - [dependencies] -convert_case = "0.6.0" -protobuf = "3.3.0" -protobuf-parse = "3.3.0" +convert_case = { workspace = true } +proc-macro2.workspace = true +prost-reflect = { workspace = true } +protox = { workspace = true } +quote.workspace = true [lib] -doctest = false +proc-macro = true + +[build-dependencies] +ureq = "2.9" diff --git a/crates/pgt_query_macros/build.rs b/crates/pgt_query_macros/build.rs new file mode 100644 index 00000000..db83ce86 --- /dev/null +++ b/crates/pgt_query_macros/build.rs @@ -0,0 +1,59 @@ +use std::env; +use std::fs; +use std::io::Write; +use std::path::PathBuf; + +// This should match the version used by pgt_query crate +// You can configure this via environment variable PG_QUERY_VERSION if needed +static LIBPG_QUERY_TAG: &str = "17-6.1.0"; + +fn main() -> Result<(), Box> { + // Allow version override via environment variable + let version = env::var("PG_QUERY_VERSION").unwrap_or_else(|_| LIBPG_QUERY_TAG.to_string()); + + // Get the manifest directory (source directory) + let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?); + let postgres_dir = manifest_dir.join("postgres"); + let proto_filename = format!("{}.proto", version); + let proto_path = postgres_dir.join(&proto_filename); + + // Download proto file if not already present in source directory + if !proto_path.exists() { + println!( + "cargo:warning=Downloading pg_query.proto for libpg_query {} to source directory", + version + ); + + // Create postgres directory if it doesn't exist + fs::create_dir_all(&postgres_dir)?; + + // Download the proto file + let proto_url = format!( + "https://raw.githubusercontent.com/pganalyze/libpg_query/{}/protobuf/pg_query.proto", + version + ); + + let response = ureq::get(&proto_url).call()?; + let proto_content = response.into_string()?; + + // Write proto file to source directory + let mut file = fs::File::create(&proto_path)?; + file.write_all(proto_content.as_bytes())?; + + println!( + "cargo:warning=Successfully downloaded pg_query.proto to {}", + proto_path.display() + ); + } + + // Set environment variable for the proc macro + println!( + "cargo:rustc-env=PG_QUERY_PROTO_PATH={}", + proto_path.display() + ); + + // Tell cargo to rerun if the proto file changes + println!("cargo:rerun-if-changed={}", proto_path.display()); + + Ok(()) +} diff --git a/crates/pgt_query_macros/postgres/17-6.1.0.proto b/crates/pgt_query_macros/postgres/17-6.1.0.proto new file mode 100644 index 00000000..24a8f14c --- /dev/null +++ b/crates/pgt_query_macros/postgres/17-6.1.0.proto @@ -0,0 +1,4110 @@ +// This file is autogenerated by ./scripts/generate_protobuf_and_funcs.rb + +syntax = "proto3"; + +package pg_query; + +message ParseResult { + int32 version = 1; + repeated RawStmt stmts = 2; +} + +message ScanResult { + int32 version = 1; + repeated ScanToken tokens = 2; +} + +message Node { + oneof node { + Alias alias = 1 [json_name="Alias"]; + RangeVar range_var = 2 [json_name="RangeVar"]; + TableFunc table_func = 3 [json_name="TableFunc"]; + IntoClause into_clause = 4 [json_name="IntoClause"]; + Var var = 5 [json_name="Var"]; + Param param = 6 [json_name="Param"]; + Aggref aggref = 7 [json_name="Aggref"]; + GroupingFunc grouping_func = 8 [json_name="GroupingFunc"]; + WindowFunc window_func = 9 [json_name="WindowFunc"]; + WindowFuncRunCondition window_func_run_condition = 10 [json_name="WindowFuncRunCondition"]; + MergeSupportFunc merge_support_func = 11 [json_name="MergeSupportFunc"]; + SubscriptingRef subscripting_ref = 12 [json_name="SubscriptingRef"]; + FuncExpr func_expr = 13 [json_name="FuncExpr"]; + NamedArgExpr named_arg_expr = 14 [json_name="NamedArgExpr"]; + OpExpr op_expr = 15 [json_name="OpExpr"]; + DistinctExpr distinct_expr = 16 [json_name="DistinctExpr"]; + NullIfExpr null_if_expr = 17 [json_name="NullIfExpr"]; + ScalarArrayOpExpr scalar_array_op_expr = 18 [json_name="ScalarArrayOpExpr"]; + BoolExpr bool_expr = 19 [json_name="BoolExpr"]; + SubLink sub_link = 20 [json_name="SubLink"]; + SubPlan sub_plan = 21 [json_name="SubPlan"]; + AlternativeSubPlan alternative_sub_plan = 22 [json_name="AlternativeSubPlan"]; + FieldSelect field_select = 23 [json_name="FieldSelect"]; + FieldStore field_store = 24 [json_name="FieldStore"]; + RelabelType relabel_type = 25 [json_name="RelabelType"]; + CoerceViaIO coerce_via_io = 26 [json_name="CoerceViaIO"]; + ArrayCoerceExpr array_coerce_expr = 27 [json_name="ArrayCoerceExpr"]; + ConvertRowtypeExpr convert_rowtype_expr = 28 [json_name="ConvertRowtypeExpr"]; + CollateExpr collate_expr = 29 [json_name="CollateExpr"]; + CaseExpr case_expr = 30 [json_name="CaseExpr"]; + CaseWhen case_when = 31 [json_name="CaseWhen"]; + CaseTestExpr case_test_expr = 32 [json_name="CaseTestExpr"]; + ArrayExpr array_expr = 33 [json_name="ArrayExpr"]; + RowExpr row_expr = 34 [json_name="RowExpr"]; + RowCompareExpr row_compare_expr = 35 [json_name="RowCompareExpr"]; + CoalesceExpr coalesce_expr = 36 [json_name="CoalesceExpr"]; + MinMaxExpr min_max_expr = 37 [json_name="MinMaxExpr"]; + SQLValueFunction sqlvalue_function = 38 [json_name="SQLValueFunction"]; + XmlExpr xml_expr = 39 [json_name="XmlExpr"]; + JsonFormat json_format = 40 [json_name="JsonFormat"]; + JsonReturning json_returning = 41 [json_name="JsonReturning"]; + JsonValueExpr json_value_expr = 42 [json_name="JsonValueExpr"]; + JsonConstructorExpr json_constructor_expr = 43 [json_name="JsonConstructorExpr"]; + JsonIsPredicate json_is_predicate = 44 [json_name="JsonIsPredicate"]; + JsonBehavior json_behavior = 45 [json_name="JsonBehavior"]; + JsonExpr json_expr = 46 [json_name="JsonExpr"]; + JsonTablePath json_table_path = 47 [json_name="JsonTablePath"]; + JsonTablePathScan json_table_path_scan = 48 [json_name="JsonTablePathScan"]; + JsonTableSiblingJoin json_table_sibling_join = 49 [json_name="JsonTableSiblingJoin"]; + NullTest null_test = 50 [json_name="NullTest"]; + BooleanTest boolean_test = 51 [json_name="BooleanTest"]; + MergeAction merge_action = 52 [json_name="MergeAction"]; + CoerceToDomain coerce_to_domain = 53 [json_name="CoerceToDomain"]; + CoerceToDomainValue coerce_to_domain_value = 54 [json_name="CoerceToDomainValue"]; + SetToDefault set_to_default = 55 [json_name="SetToDefault"]; + CurrentOfExpr current_of_expr = 56 [json_name="CurrentOfExpr"]; + NextValueExpr next_value_expr = 57 [json_name="NextValueExpr"]; + InferenceElem inference_elem = 58 [json_name="InferenceElem"]; + TargetEntry target_entry = 59 [json_name="TargetEntry"]; + RangeTblRef range_tbl_ref = 60 [json_name="RangeTblRef"]; + JoinExpr join_expr = 61 [json_name="JoinExpr"]; + FromExpr from_expr = 62 [json_name="FromExpr"]; + OnConflictExpr on_conflict_expr = 63 [json_name="OnConflictExpr"]; + Query query = 64 [json_name="Query"]; + TypeName type_name = 65 [json_name="TypeName"]; + ColumnRef column_ref = 66 [json_name="ColumnRef"]; + ParamRef param_ref = 67 [json_name="ParamRef"]; + A_Expr a_expr = 68 [json_name="A_Expr"]; + TypeCast type_cast = 69 [json_name="TypeCast"]; + CollateClause collate_clause = 70 [json_name="CollateClause"]; + RoleSpec role_spec = 71 [json_name="RoleSpec"]; + FuncCall func_call = 72 [json_name="FuncCall"]; + A_Star a_star = 73 [json_name="A_Star"]; + A_Indices a_indices = 74 [json_name="A_Indices"]; + A_Indirection a_indirection = 75 [json_name="A_Indirection"]; + A_ArrayExpr a_array_expr = 76 [json_name="A_ArrayExpr"]; + ResTarget res_target = 77 [json_name="ResTarget"]; + MultiAssignRef multi_assign_ref = 78 [json_name="MultiAssignRef"]; + SortBy sort_by = 79 [json_name="SortBy"]; + WindowDef window_def = 80 [json_name="WindowDef"]; + RangeSubselect range_subselect = 81 [json_name="RangeSubselect"]; + RangeFunction range_function = 82 [json_name="RangeFunction"]; + RangeTableFunc range_table_func = 83 [json_name="RangeTableFunc"]; + RangeTableFuncCol range_table_func_col = 84 [json_name="RangeTableFuncCol"]; + RangeTableSample range_table_sample = 85 [json_name="RangeTableSample"]; + ColumnDef column_def = 86 [json_name="ColumnDef"]; + TableLikeClause table_like_clause = 87 [json_name="TableLikeClause"]; + IndexElem index_elem = 88 [json_name="IndexElem"]; + DefElem def_elem = 89 [json_name="DefElem"]; + LockingClause locking_clause = 90 [json_name="LockingClause"]; + XmlSerialize xml_serialize = 91 [json_name="XmlSerialize"]; + PartitionElem partition_elem = 92 [json_name="PartitionElem"]; + PartitionSpec partition_spec = 93 [json_name="PartitionSpec"]; + PartitionBoundSpec partition_bound_spec = 94 [json_name="PartitionBoundSpec"]; + PartitionRangeDatum partition_range_datum = 95 [json_name="PartitionRangeDatum"]; + SinglePartitionSpec single_partition_spec = 96 [json_name="SinglePartitionSpec"]; + PartitionCmd partition_cmd = 97 [json_name="PartitionCmd"]; + RangeTblEntry range_tbl_entry = 98 [json_name="RangeTblEntry"]; + RTEPermissionInfo rtepermission_info = 99 [json_name="RTEPermissionInfo"]; + RangeTblFunction range_tbl_function = 100 [json_name="RangeTblFunction"]; + TableSampleClause table_sample_clause = 101 [json_name="TableSampleClause"]; + WithCheckOption with_check_option = 102 [json_name="WithCheckOption"]; + SortGroupClause sort_group_clause = 103 [json_name="SortGroupClause"]; + GroupingSet grouping_set = 104 [json_name="GroupingSet"]; + WindowClause window_clause = 105 [json_name="WindowClause"]; + RowMarkClause row_mark_clause = 106 [json_name="RowMarkClause"]; + WithClause with_clause = 107 [json_name="WithClause"]; + InferClause infer_clause = 108 [json_name="InferClause"]; + OnConflictClause on_conflict_clause = 109 [json_name="OnConflictClause"]; + CTESearchClause ctesearch_clause = 110 [json_name="CTESearchClause"]; + CTECycleClause ctecycle_clause = 111 [json_name="CTECycleClause"]; + CommonTableExpr common_table_expr = 112 [json_name="CommonTableExpr"]; + MergeWhenClause merge_when_clause = 113 [json_name="MergeWhenClause"]; + TriggerTransition trigger_transition = 114 [json_name="TriggerTransition"]; + JsonOutput json_output = 115 [json_name="JsonOutput"]; + JsonArgument json_argument = 116 [json_name="JsonArgument"]; + JsonFuncExpr json_func_expr = 117 [json_name="JsonFuncExpr"]; + JsonTablePathSpec json_table_path_spec = 118 [json_name="JsonTablePathSpec"]; + JsonTable json_table = 119 [json_name="JsonTable"]; + JsonTableColumn json_table_column = 120 [json_name="JsonTableColumn"]; + JsonKeyValue json_key_value = 121 [json_name="JsonKeyValue"]; + JsonParseExpr json_parse_expr = 122 [json_name="JsonParseExpr"]; + JsonScalarExpr json_scalar_expr = 123 [json_name="JsonScalarExpr"]; + JsonSerializeExpr json_serialize_expr = 124 [json_name="JsonSerializeExpr"]; + JsonObjectConstructor json_object_constructor = 125 [json_name="JsonObjectConstructor"]; + JsonArrayConstructor json_array_constructor = 126 [json_name="JsonArrayConstructor"]; + JsonArrayQueryConstructor json_array_query_constructor = 127 [json_name="JsonArrayQueryConstructor"]; + JsonAggConstructor json_agg_constructor = 128 [json_name="JsonAggConstructor"]; + JsonObjectAgg json_object_agg = 129 [json_name="JsonObjectAgg"]; + JsonArrayAgg json_array_agg = 130 [json_name="JsonArrayAgg"]; + RawStmt raw_stmt = 131 [json_name="RawStmt"]; + InsertStmt insert_stmt = 132 [json_name="InsertStmt"]; + DeleteStmt delete_stmt = 133 [json_name="DeleteStmt"]; + UpdateStmt update_stmt = 134 [json_name="UpdateStmt"]; + MergeStmt merge_stmt = 135 [json_name="MergeStmt"]; + SelectStmt select_stmt = 136 [json_name="SelectStmt"]; + SetOperationStmt set_operation_stmt = 137 [json_name="SetOperationStmt"]; + ReturnStmt return_stmt = 138 [json_name="ReturnStmt"]; + PLAssignStmt plassign_stmt = 139 [json_name="PLAssignStmt"]; + CreateSchemaStmt create_schema_stmt = 140 [json_name="CreateSchemaStmt"]; + AlterTableStmt alter_table_stmt = 141 [json_name="AlterTableStmt"]; + ReplicaIdentityStmt replica_identity_stmt = 142 [json_name="ReplicaIdentityStmt"]; + AlterTableCmd alter_table_cmd = 143 [json_name="AlterTableCmd"]; + AlterCollationStmt alter_collation_stmt = 144 [json_name="AlterCollationStmt"]; + AlterDomainStmt alter_domain_stmt = 145 [json_name="AlterDomainStmt"]; + GrantStmt grant_stmt = 146 [json_name="GrantStmt"]; + ObjectWithArgs object_with_args = 147 [json_name="ObjectWithArgs"]; + AccessPriv access_priv = 148 [json_name="AccessPriv"]; + GrantRoleStmt grant_role_stmt = 149 [json_name="GrantRoleStmt"]; + AlterDefaultPrivilegesStmt alter_default_privileges_stmt = 150 [json_name="AlterDefaultPrivilegesStmt"]; + CopyStmt copy_stmt = 151 [json_name="CopyStmt"]; + VariableSetStmt variable_set_stmt = 152 [json_name="VariableSetStmt"]; + VariableShowStmt variable_show_stmt = 153 [json_name="VariableShowStmt"]; + CreateStmt create_stmt = 154 [json_name="CreateStmt"]; + Constraint constraint = 155 [json_name="Constraint"]; + CreateTableSpaceStmt create_table_space_stmt = 156 [json_name="CreateTableSpaceStmt"]; + DropTableSpaceStmt drop_table_space_stmt = 157 [json_name="DropTableSpaceStmt"]; + AlterTableSpaceOptionsStmt alter_table_space_options_stmt = 158 [json_name="AlterTableSpaceOptionsStmt"]; + AlterTableMoveAllStmt alter_table_move_all_stmt = 159 [json_name="AlterTableMoveAllStmt"]; + CreateExtensionStmt create_extension_stmt = 160 [json_name="CreateExtensionStmt"]; + AlterExtensionStmt alter_extension_stmt = 161 [json_name="AlterExtensionStmt"]; + AlterExtensionContentsStmt alter_extension_contents_stmt = 162 [json_name="AlterExtensionContentsStmt"]; + CreateFdwStmt create_fdw_stmt = 163 [json_name="CreateFdwStmt"]; + AlterFdwStmt alter_fdw_stmt = 164 [json_name="AlterFdwStmt"]; + CreateForeignServerStmt create_foreign_server_stmt = 165 [json_name="CreateForeignServerStmt"]; + AlterForeignServerStmt alter_foreign_server_stmt = 166 [json_name="AlterForeignServerStmt"]; + CreateForeignTableStmt create_foreign_table_stmt = 167 [json_name="CreateForeignTableStmt"]; + CreateUserMappingStmt create_user_mapping_stmt = 168 [json_name="CreateUserMappingStmt"]; + AlterUserMappingStmt alter_user_mapping_stmt = 169 [json_name="AlterUserMappingStmt"]; + DropUserMappingStmt drop_user_mapping_stmt = 170 [json_name="DropUserMappingStmt"]; + ImportForeignSchemaStmt import_foreign_schema_stmt = 171 [json_name="ImportForeignSchemaStmt"]; + CreatePolicyStmt create_policy_stmt = 172 [json_name="CreatePolicyStmt"]; + AlterPolicyStmt alter_policy_stmt = 173 [json_name="AlterPolicyStmt"]; + CreateAmStmt create_am_stmt = 174 [json_name="CreateAmStmt"]; + CreateTrigStmt create_trig_stmt = 175 [json_name="CreateTrigStmt"]; + CreateEventTrigStmt create_event_trig_stmt = 176 [json_name="CreateEventTrigStmt"]; + AlterEventTrigStmt alter_event_trig_stmt = 177 [json_name="AlterEventTrigStmt"]; + CreatePLangStmt create_plang_stmt = 178 [json_name="CreatePLangStmt"]; + CreateRoleStmt create_role_stmt = 179 [json_name="CreateRoleStmt"]; + AlterRoleStmt alter_role_stmt = 180 [json_name="AlterRoleStmt"]; + AlterRoleSetStmt alter_role_set_stmt = 181 [json_name="AlterRoleSetStmt"]; + DropRoleStmt drop_role_stmt = 182 [json_name="DropRoleStmt"]; + CreateSeqStmt create_seq_stmt = 183 [json_name="CreateSeqStmt"]; + AlterSeqStmt alter_seq_stmt = 184 [json_name="AlterSeqStmt"]; + DefineStmt define_stmt = 185 [json_name="DefineStmt"]; + CreateDomainStmt create_domain_stmt = 186 [json_name="CreateDomainStmt"]; + CreateOpClassStmt create_op_class_stmt = 187 [json_name="CreateOpClassStmt"]; + CreateOpClassItem create_op_class_item = 188 [json_name="CreateOpClassItem"]; + CreateOpFamilyStmt create_op_family_stmt = 189 [json_name="CreateOpFamilyStmt"]; + AlterOpFamilyStmt alter_op_family_stmt = 190 [json_name="AlterOpFamilyStmt"]; + DropStmt drop_stmt = 191 [json_name="DropStmt"]; + TruncateStmt truncate_stmt = 192 [json_name="TruncateStmt"]; + CommentStmt comment_stmt = 193 [json_name="CommentStmt"]; + SecLabelStmt sec_label_stmt = 194 [json_name="SecLabelStmt"]; + DeclareCursorStmt declare_cursor_stmt = 195 [json_name="DeclareCursorStmt"]; + ClosePortalStmt close_portal_stmt = 196 [json_name="ClosePortalStmt"]; + FetchStmt fetch_stmt = 197 [json_name="FetchStmt"]; + IndexStmt index_stmt = 198 [json_name="IndexStmt"]; + CreateStatsStmt create_stats_stmt = 199 [json_name="CreateStatsStmt"]; + StatsElem stats_elem = 200 [json_name="StatsElem"]; + AlterStatsStmt alter_stats_stmt = 201 [json_name="AlterStatsStmt"]; + CreateFunctionStmt create_function_stmt = 202 [json_name="CreateFunctionStmt"]; + FunctionParameter function_parameter = 203 [json_name="FunctionParameter"]; + AlterFunctionStmt alter_function_stmt = 204 [json_name="AlterFunctionStmt"]; + DoStmt do_stmt = 205 [json_name="DoStmt"]; + InlineCodeBlock inline_code_block = 206 [json_name="InlineCodeBlock"]; + CallStmt call_stmt = 207 [json_name="CallStmt"]; + CallContext call_context = 208 [json_name="CallContext"]; + RenameStmt rename_stmt = 209 [json_name="RenameStmt"]; + AlterObjectDependsStmt alter_object_depends_stmt = 210 [json_name="AlterObjectDependsStmt"]; + AlterObjectSchemaStmt alter_object_schema_stmt = 211 [json_name="AlterObjectSchemaStmt"]; + AlterOwnerStmt alter_owner_stmt = 212 [json_name="AlterOwnerStmt"]; + AlterOperatorStmt alter_operator_stmt = 213 [json_name="AlterOperatorStmt"]; + AlterTypeStmt alter_type_stmt = 214 [json_name="AlterTypeStmt"]; + RuleStmt rule_stmt = 215 [json_name="RuleStmt"]; + NotifyStmt notify_stmt = 216 [json_name="NotifyStmt"]; + ListenStmt listen_stmt = 217 [json_name="ListenStmt"]; + UnlistenStmt unlisten_stmt = 218 [json_name="UnlistenStmt"]; + TransactionStmt transaction_stmt = 219 [json_name="TransactionStmt"]; + CompositeTypeStmt composite_type_stmt = 220 [json_name="CompositeTypeStmt"]; + CreateEnumStmt create_enum_stmt = 221 [json_name="CreateEnumStmt"]; + CreateRangeStmt create_range_stmt = 222 [json_name="CreateRangeStmt"]; + AlterEnumStmt alter_enum_stmt = 223 [json_name="AlterEnumStmt"]; + ViewStmt view_stmt = 224 [json_name="ViewStmt"]; + LoadStmt load_stmt = 225 [json_name="LoadStmt"]; + CreatedbStmt createdb_stmt = 226 [json_name="CreatedbStmt"]; + AlterDatabaseStmt alter_database_stmt = 227 [json_name="AlterDatabaseStmt"]; + AlterDatabaseRefreshCollStmt alter_database_refresh_coll_stmt = 228 [json_name="AlterDatabaseRefreshCollStmt"]; + AlterDatabaseSetStmt alter_database_set_stmt = 229 [json_name="AlterDatabaseSetStmt"]; + DropdbStmt dropdb_stmt = 230 [json_name="DropdbStmt"]; + AlterSystemStmt alter_system_stmt = 231 [json_name="AlterSystemStmt"]; + ClusterStmt cluster_stmt = 232 [json_name="ClusterStmt"]; + VacuumStmt vacuum_stmt = 233 [json_name="VacuumStmt"]; + VacuumRelation vacuum_relation = 234 [json_name="VacuumRelation"]; + ExplainStmt explain_stmt = 235 [json_name="ExplainStmt"]; + CreateTableAsStmt create_table_as_stmt = 236 [json_name="CreateTableAsStmt"]; + RefreshMatViewStmt refresh_mat_view_stmt = 237 [json_name="RefreshMatViewStmt"]; + CheckPointStmt check_point_stmt = 238 [json_name="CheckPointStmt"]; + DiscardStmt discard_stmt = 239 [json_name="DiscardStmt"]; + LockStmt lock_stmt = 240 [json_name="LockStmt"]; + ConstraintsSetStmt constraints_set_stmt = 241 [json_name="ConstraintsSetStmt"]; + ReindexStmt reindex_stmt = 242 [json_name="ReindexStmt"]; + CreateConversionStmt create_conversion_stmt = 243 [json_name="CreateConversionStmt"]; + CreateCastStmt create_cast_stmt = 244 [json_name="CreateCastStmt"]; + CreateTransformStmt create_transform_stmt = 245 [json_name="CreateTransformStmt"]; + PrepareStmt prepare_stmt = 246 [json_name="PrepareStmt"]; + ExecuteStmt execute_stmt = 247 [json_name="ExecuteStmt"]; + DeallocateStmt deallocate_stmt = 248 [json_name="DeallocateStmt"]; + DropOwnedStmt drop_owned_stmt = 249 [json_name="DropOwnedStmt"]; + ReassignOwnedStmt reassign_owned_stmt = 250 [json_name="ReassignOwnedStmt"]; + AlterTSDictionaryStmt alter_tsdictionary_stmt = 251 [json_name="AlterTSDictionaryStmt"]; + AlterTSConfigurationStmt alter_tsconfiguration_stmt = 252 [json_name="AlterTSConfigurationStmt"]; + PublicationTable publication_table = 253 [json_name="PublicationTable"]; + PublicationObjSpec publication_obj_spec = 254 [json_name="PublicationObjSpec"]; + CreatePublicationStmt create_publication_stmt = 255 [json_name="CreatePublicationStmt"]; + AlterPublicationStmt alter_publication_stmt = 256 [json_name="AlterPublicationStmt"]; + CreateSubscriptionStmt create_subscription_stmt = 257 [json_name="CreateSubscriptionStmt"]; + AlterSubscriptionStmt alter_subscription_stmt = 258 [json_name="AlterSubscriptionStmt"]; + DropSubscriptionStmt drop_subscription_stmt = 259 [json_name="DropSubscriptionStmt"]; + Integer integer = 260 [json_name="Integer"]; + Float float = 261 [json_name="Float"]; + Boolean boolean = 262 [json_name="Boolean"]; + String string = 263 [json_name="String"]; + BitString bit_string = 264 [json_name="BitString"]; + List list = 265 [json_name="List"]; + IntList int_list = 266 [json_name="IntList"]; + OidList oid_list = 267 [json_name="OidList"]; + A_Const a_const = 268 [json_name="A_Const"]; + } +} + +message Integer +{ + int32 ival = 1; /* machine integer */ +} + +message Float +{ + string fval = 1; /* string */ +} + +message Boolean +{ + bool boolval = 1; +} + +message String +{ + string sval = 1; /* string */ +} + +message BitString +{ + string bsval = 1; /* string */ +} + +message List +{ + repeated Node items = 1; +} + +message OidList +{ + repeated Node items = 1; +} + +message IntList +{ + repeated Node items = 1; +} + +message A_Const +{ + oneof val { + Integer ival = 1; + Float fval = 2; + Boolean boolval = 3; + String sval = 4; + BitString bsval = 5; + } + bool isnull = 10; + int32 location = 11; +} + +message Alias +{ + string aliasname = 1 [json_name="aliasname"]; + repeated Node colnames = 2 [json_name="colnames"]; +} + +message RangeVar +{ + string catalogname = 1 [json_name="catalogname"]; + string schemaname = 2 [json_name="schemaname"]; + string relname = 3 [json_name="relname"]; + bool inh = 4 [json_name="inh"]; + string relpersistence = 5 [json_name="relpersistence"]; + Alias alias = 6 [json_name="alias"]; + int32 location = 7 [json_name="location"]; +} + +message TableFunc +{ + TableFuncType functype = 1 [json_name="functype"]; + repeated Node ns_uris = 2 [json_name="ns_uris"]; + repeated Node ns_names = 3 [json_name="ns_names"]; + Node docexpr = 4 [json_name="docexpr"]; + Node rowexpr = 5 [json_name="rowexpr"]; + repeated Node colnames = 6 [json_name="colnames"]; + repeated Node coltypes = 7 [json_name="coltypes"]; + repeated Node coltypmods = 8 [json_name="coltypmods"]; + repeated Node colcollations = 9 [json_name="colcollations"]; + repeated Node colexprs = 10 [json_name="colexprs"]; + repeated Node coldefexprs = 11 [json_name="coldefexprs"]; + repeated Node colvalexprs = 12 [json_name="colvalexprs"]; + repeated Node passingvalexprs = 13 [json_name="passingvalexprs"]; + repeated uint64 notnulls = 14 [json_name="notnulls"]; + Node plan = 15 [json_name="plan"]; + int32 ordinalitycol = 16 [json_name="ordinalitycol"]; + int32 location = 17 [json_name="location"]; +} + +message IntoClause +{ + RangeVar rel = 1 [json_name="rel"]; + repeated Node col_names = 2 [json_name="colNames"]; + string access_method = 3 [json_name="accessMethod"]; + repeated Node options = 4 [json_name="options"]; + OnCommitAction on_commit = 5 [json_name="onCommit"]; + string table_space_name = 6 [json_name="tableSpaceName"]; + Node view_query = 7 [json_name="viewQuery"]; + bool skip_data = 8 [json_name="skipData"]; +} + +message Var +{ + Node xpr = 1 [json_name="xpr"]; + int32 varno = 2 [json_name="varno"]; + int32 varattno = 3 [json_name="varattno"]; + uint32 vartype = 4 [json_name="vartype"]; + int32 vartypmod = 5 [json_name="vartypmod"]; + uint32 varcollid = 6 [json_name="varcollid"]; + repeated uint64 varnullingrels = 7 [json_name="varnullingrels"]; + uint32 varlevelsup = 8 [json_name="varlevelsup"]; + int32 location = 9 [json_name="location"]; +} + +message Param +{ + Node xpr = 1 [json_name="xpr"]; + ParamKind paramkind = 2 [json_name="paramkind"]; + int32 paramid = 3 [json_name="paramid"]; + uint32 paramtype = 4 [json_name="paramtype"]; + int32 paramtypmod = 5 [json_name="paramtypmod"]; + uint32 paramcollid = 6 [json_name="paramcollid"]; + int32 location = 7 [json_name="location"]; +} + +message Aggref +{ + Node xpr = 1 [json_name="xpr"]; + uint32 aggfnoid = 2 [json_name="aggfnoid"]; + uint32 aggtype = 3 [json_name="aggtype"]; + uint32 aggcollid = 4 [json_name="aggcollid"]; + uint32 inputcollid = 5 [json_name="inputcollid"]; + repeated Node aggargtypes = 6 [json_name="aggargtypes"]; + repeated Node aggdirectargs = 7 [json_name="aggdirectargs"]; + repeated Node args = 8 [json_name="args"]; + repeated Node aggorder = 9 [json_name="aggorder"]; + repeated Node aggdistinct = 10 [json_name="aggdistinct"]; + Node aggfilter = 11 [json_name="aggfilter"]; + bool aggstar = 12 [json_name="aggstar"]; + bool aggvariadic = 13 [json_name="aggvariadic"]; + string aggkind = 14 [json_name="aggkind"]; + uint32 agglevelsup = 15 [json_name="agglevelsup"]; + AggSplit aggsplit = 16 [json_name="aggsplit"]; + int32 aggno = 17 [json_name="aggno"]; + int32 aggtransno = 18 [json_name="aggtransno"]; + int32 location = 19 [json_name="location"]; +} + +message GroupingFunc +{ + Node xpr = 1 [json_name="xpr"]; + repeated Node args = 2 [json_name="args"]; + repeated Node refs = 3 [json_name="refs"]; + uint32 agglevelsup = 4 [json_name="agglevelsup"]; + int32 location = 5 [json_name="location"]; +} + +message WindowFunc +{ + Node xpr = 1 [json_name="xpr"]; + uint32 winfnoid = 2 [json_name="winfnoid"]; + uint32 wintype = 3 [json_name="wintype"]; + uint32 wincollid = 4 [json_name="wincollid"]; + uint32 inputcollid = 5 [json_name="inputcollid"]; + repeated Node args = 6 [json_name="args"]; + Node aggfilter = 7 [json_name="aggfilter"]; + repeated Node run_condition = 8 [json_name="runCondition"]; + uint32 winref = 9 [json_name="winref"]; + bool winstar = 10 [json_name="winstar"]; + bool winagg = 11 [json_name="winagg"]; + int32 location = 12 [json_name="location"]; +} + +message WindowFuncRunCondition +{ + Node xpr = 1 [json_name="xpr"]; + uint32 opno = 2 [json_name="opno"]; + uint32 inputcollid = 3 [json_name="inputcollid"]; + bool wfunc_left = 4 [json_name="wfunc_left"]; + Node arg = 5 [json_name="arg"]; +} + +message MergeSupportFunc +{ + Node xpr = 1 [json_name="xpr"]; + uint32 msftype = 2 [json_name="msftype"]; + uint32 msfcollid = 3 [json_name="msfcollid"]; + int32 location = 4 [json_name="location"]; +} + +message SubscriptingRef +{ + Node xpr = 1 [json_name="xpr"]; + uint32 refcontainertype = 2 [json_name="refcontainertype"]; + uint32 refelemtype = 3 [json_name="refelemtype"]; + uint32 refrestype = 4 [json_name="refrestype"]; + int32 reftypmod = 5 [json_name="reftypmod"]; + uint32 refcollid = 6 [json_name="refcollid"]; + repeated Node refupperindexpr = 7 [json_name="refupperindexpr"]; + repeated Node reflowerindexpr = 8 [json_name="reflowerindexpr"]; + Node refexpr = 9 [json_name="refexpr"]; + Node refassgnexpr = 10 [json_name="refassgnexpr"]; +} + +message FuncExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 funcid = 2 [json_name="funcid"]; + uint32 funcresulttype = 3 [json_name="funcresulttype"]; + bool funcretset = 4 [json_name="funcretset"]; + bool funcvariadic = 5 [json_name="funcvariadic"]; + CoercionForm funcformat = 6 [json_name="funcformat"]; + uint32 funccollid = 7 [json_name="funccollid"]; + uint32 inputcollid = 8 [json_name="inputcollid"]; + repeated Node args = 9 [json_name="args"]; + int32 location = 10 [json_name="location"]; +} + +message NamedArgExpr +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + string name = 3 [json_name="name"]; + int32 argnumber = 4 [json_name="argnumber"]; + int32 location = 5 [json_name="location"]; +} + +message OpExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 opno = 2 [json_name="opno"]; + uint32 opresulttype = 3 [json_name="opresulttype"]; + bool opretset = 4 [json_name="opretset"]; + uint32 opcollid = 5 [json_name="opcollid"]; + uint32 inputcollid = 6 [json_name="inputcollid"]; + repeated Node args = 7 [json_name="args"]; + int32 location = 8 [json_name="location"]; +} + +message DistinctExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 opno = 2 [json_name="opno"]; + uint32 opresulttype = 3 [json_name="opresulttype"]; + bool opretset = 4 [json_name="opretset"]; + uint32 opcollid = 5 [json_name="opcollid"]; + uint32 inputcollid = 6 [json_name="inputcollid"]; + repeated Node args = 7 [json_name="args"]; + int32 location = 8 [json_name="location"]; +} + +message NullIfExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 opno = 2 [json_name="opno"]; + uint32 opresulttype = 3 [json_name="opresulttype"]; + bool opretset = 4 [json_name="opretset"]; + uint32 opcollid = 5 [json_name="opcollid"]; + uint32 inputcollid = 6 [json_name="inputcollid"]; + repeated Node args = 7 [json_name="args"]; + int32 location = 8 [json_name="location"]; +} + +message ScalarArrayOpExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 opno = 2 [json_name="opno"]; + bool use_or = 3 [json_name="useOr"]; + uint32 inputcollid = 4 [json_name="inputcollid"]; + repeated Node args = 5 [json_name="args"]; + int32 location = 6 [json_name="location"]; +} + +message BoolExpr +{ + Node xpr = 1 [json_name="xpr"]; + BoolExprType boolop = 2 [json_name="boolop"]; + repeated Node args = 3 [json_name="args"]; + int32 location = 4 [json_name="location"]; +} + +message SubLink +{ + Node xpr = 1 [json_name="xpr"]; + SubLinkType sub_link_type = 2 [json_name="subLinkType"]; + int32 sub_link_id = 3 [json_name="subLinkId"]; + Node testexpr = 4 [json_name="testexpr"]; + repeated Node oper_name = 5 [json_name="operName"]; + Node subselect = 6 [json_name="subselect"]; + int32 location = 7 [json_name="location"]; +} + +message SubPlan +{ + Node xpr = 1 [json_name="xpr"]; + SubLinkType sub_link_type = 2 [json_name="subLinkType"]; + Node testexpr = 3 [json_name="testexpr"]; + repeated Node param_ids = 4 [json_name="paramIds"]; + int32 plan_id = 5 [json_name="plan_id"]; + string plan_name = 6 [json_name="plan_name"]; + uint32 first_col_type = 7 [json_name="firstColType"]; + int32 first_col_typmod = 8 [json_name="firstColTypmod"]; + uint32 first_col_collation = 9 [json_name="firstColCollation"]; + bool use_hash_table = 10 [json_name="useHashTable"]; + bool unknown_eq_false = 11 [json_name="unknownEqFalse"]; + bool parallel_safe = 12 [json_name="parallel_safe"]; + repeated Node set_param = 13 [json_name="setParam"]; + repeated Node par_param = 14 [json_name="parParam"]; + repeated Node args = 15 [json_name="args"]; + double startup_cost = 16 [json_name="startup_cost"]; + double per_call_cost = 17 [json_name="per_call_cost"]; +} + +message AlternativeSubPlan +{ + Node xpr = 1 [json_name="xpr"]; + repeated Node subplans = 2 [json_name="subplans"]; +} + +message FieldSelect +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + int32 fieldnum = 3 [json_name="fieldnum"]; + uint32 resulttype = 4 [json_name="resulttype"]; + int32 resulttypmod = 5 [json_name="resulttypmod"]; + uint32 resultcollid = 6 [json_name="resultcollid"]; +} + +message FieldStore +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + repeated Node newvals = 3 [json_name="newvals"]; + repeated Node fieldnums = 4 [json_name="fieldnums"]; + uint32 resulttype = 5 [json_name="resulttype"]; +} + +message RelabelType +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + uint32 resulttype = 3 [json_name="resulttype"]; + int32 resulttypmod = 4 [json_name="resulttypmod"]; + uint32 resultcollid = 5 [json_name="resultcollid"]; + CoercionForm relabelformat = 6 [json_name="relabelformat"]; + int32 location = 7 [json_name="location"]; +} + +message CoerceViaIO +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + uint32 resulttype = 3 [json_name="resulttype"]; + uint32 resultcollid = 4 [json_name="resultcollid"]; + CoercionForm coerceformat = 5 [json_name="coerceformat"]; + int32 location = 6 [json_name="location"]; +} + +message ArrayCoerceExpr +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + Node elemexpr = 3 [json_name="elemexpr"]; + uint32 resulttype = 4 [json_name="resulttype"]; + int32 resulttypmod = 5 [json_name="resulttypmod"]; + uint32 resultcollid = 6 [json_name="resultcollid"]; + CoercionForm coerceformat = 7 [json_name="coerceformat"]; + int32 location = 8 [json_name="location"]; +} + +message ConvertRowtypeExpr +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + uint32 resulttype = 3 [json_name="resulttype"]; + CoercionForm convertformat = 4 [json_name="convertformat"]; + int32 location = 5 [json_name="location"]; +} + +message CollateExpr +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + uint32 coll_oid = 3 [json_name="collOid"]; + int32 location = 4 [json_name="location"]; +} + +message CaseExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 casetype = 2 [json_name="casetype"]; + uint32 casecollid = 3 [json_name="casecollid"]; + Node arg = 4 [json_name="arg"]; + repeated Node args = 5 [json_name="args"]; + Node defresult = 6 [json_name="defresult"]; + int32 location = 7 [json_name="location"]; +} + +message CaseWhen +{ + Node xpr = 1 [json_name="xpr"]; + Node expr = 2 [json_name="expr"]; + Node result = 3 [json_name="result"]; + int32 location = 4 [json_name="location"]; +} + +message CaseTestExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 type_id = 2 [json_name="typeId"]; + int32 type_mod = 3 [json_name="typeMod"]; + uint32 collation = 4 [json_name="collation"]; +} + +message ArrayExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 array_typeid = 2 [json_name="array_typeid"]; + uint32 array_collid = 3 [json_name="array_collid"]; + uint32 element_typeid = 4 [json_name="element_typeid"]; + repeated Node elements = 5 [json_name="elements"]; + bool multidims = 6 [json_name="multidims"]; + int32 location = 7 [json_name="location"]; +} + +message RowExpr +{ + Node xpr = 1 [json_name="xpr"]; + repeated Node args = 2 [json_name="args"]; + uint32 row_typeid = 3 [json_name="row_typeid"]; + CoercionForm row_format = 4 [json_name="row_format"]; + repeated Node colnames = 5 [json_name="colnames"]; + int32 location = 6 [json_name="location"]; +} + +message RowCompareExpr +{ + Node xpr = 1 [json_name="xpr"]; + RowCompareType rctype = 2 [json_name="rctype"]; + repeated Node opnos = 3 [json_name="opnos"]; + repeated Node opfamilies = 4 [json_name="opfamilies"]; + repeated Node inputcollids = 5 [json_name="inputcollids"]; + repeated Node largs = 6 [json_name="largs"]; + repeated Node rargs = 7 [json_name="rargs"]; +} + +message CoalesceExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 coalescetype = 2 [json_name="coalescetype"]; + uint32 coalescecollid = 3 [json_name="coalescecollid"]; + repeated Node args = 4 [json_name="args"]; + int32 location = 5 [json_name="location"]; +} + +message MinMaxExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 minmaxtype = 2 [json_name="minmaxtype"]; + uint32 minmaxcollid = 3 [json_name="minmaxcollid"]; + uint32 inputcollid = 4 [json_name="inputcollid"]; + MinMaxOp op = 5 [json_name="op"]; + repeated Node args = 6 [json_name="args"]; + int32 location = 7 [json_name="location"]; +} + +message SQLValueFunction +{ + Node xpr = 1 [json_name="xpr"]; + SQLValueFunctionOp op = 2 [json_name="op"]; + uint32 type = 3 [json_name="type"]; + int32 typmod = 4 [json_name="typmod"]; + int32 location = 5 [json_name="location"]; +} + +message XmlExpr +{ + Node xpr = 1 [json_name="xpr"]; + XmlExprOp op = 2 [json_name="op"]; + string name = 3 [json_name="name"]; + repeated Node named_args = 4 [json_name="named_args"]; + repeated Node arg_names = 5 [json_name="arg_names"]; + repeated Node args = 6 [json_name="args"]; + XmlOptionType xmloption = 7 [json_name="xmloption"]; + bool indent = 8 [json_name="indent"]; + uint32 type = 9 [json_name="type"]; + int32 typmod = 10 [json_name="typmod"]; + int32 location = 11 [json_name="location"]; +} + +message JsonFormat +{ + JsonFormatType format_type = 1 [json_name="format_type"]; + JsonEncoding encoding = 2 [json_name="encoding"]; + int32 location = 3 [json_name="location"]; +} + +message JsonReturning +{ + JsonFormat format = 1 [json_name="format"]; + uint32 typid = 2 [json_name="typid"]; + int32 typmod = 3 [json_name="typmod"]; +} + +message JsonValueExpr +{ + Node raw_expr = 1 [json_name="raw_expr"]; + Node formatted_expr = 2 [json_name="formatted_expr"]; + JsonFormat format = 3 [json_name="format"]; +} + +message JsonConstructorExpr +{ + Node xpr = 1 [json_name="xpr"]; + JsonConstructorType type = 2 [json_name="type"]; + repeated Node args = 3 [json_name="args"]; + Node func = 4 [json_name="func"]; + Node coercion = 5 [json_name="coercion"]; + JsonReturning returning = 6 [json_name="returning"]; + bool absent_on_null = 7 [json_name="absent_on_null"]; + bool unique = 8 [json_name="unique"]; + int32 location = 9 [json_name="location"]; +} + +message JsonIsPredicate +{ + Node expr = 1 [json_name="expr"]; + JsonFormat format = 2 [json_name="format"]; + JsonValueType item_type = 3 [json_name="item_type"]; + bool unique_keys = 4 [json_name="unique_keys"]; + int32 location = 5 [json_name="location"]; +} + +message JsonBehavior +{ + JsonBehaviorType btype = 1 [json_name="btype"]; + Node expr = 2 [json_name="expr"]; + bool coerce = 3 [json_name="coerce"]; + int32 location = 4 [json_name="location"]; +} + +message JsonExpr +{ + Node xpr = 1 [json_name="xpr"]; + JsonExprOp op = 2 [json_name="op"]; + string column_name = 3 [json_name="column_name"]; + Node formatted_expr = 4 [json_name="formatted_expr"]; + JsonFormat format = 5 [json_name="format"]; + Node path_spec = 6 [json_name="path_spec"]; + JsonReturning returning = 7 [json_name="returning"]; + repeated Node passing_names = 8 [json_name="passing_names"]; + repeated Node passing_values = 9 [json_name="passing_values"]; + JsonBehavior on_empty = 10 [json_name="on_empty"]; + JsonBehavior on_error = 11 [json_name="on_error"]; + bool use_io_coercion = 12 [json_name="use_io_coercion"]; + bool use_json_coercion = 13 [json_name="use_json_coercion"]; + JsonWrapper wrapper = 14 [json_name="wrapper"]; + bool omit_quotes = 15 [json_name="omit_quotes"]; + uint32 collation = 16 [json_name="collation"]; + int32 location = 17 [json_name="location"]; +} + +message JsonTablePath +{ + string name = 1 [json_name="name"]; +} + +message JsonTablePathScan +{ + Node plan = 1 [json_name="plan"]; + JsonTablePath path = 2 [json_name="path"]; + bool error_on_error = 3 [json_name="errorOnError"]; + Node child = 4 [json_name="child"]; + int32 col_min = 5 [json_name="colMin"]; + int32 col_max = 6 [json_name="colMax"]; +} + +message JsonTableSiblingJoin +{ + Node plan = 1 [json_name="plan"]; + Node lplan = 2 [json_name="lplan"]; + Node rplan = 3 [json_name="rplan"]; +} + +message NullTest +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + NullTestType nulltesttype = 3 [json_name="nulltesttype"]; + bool argisrow = 4 [json_name="argisrow"]; + int32 location = 5 [json_name="location"]; +} + +message BooleanTest +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + BoolTestType booltesttype = 3 [json_name="booltesttype"]; + int32 location = 4 [json_name="location"]; +} + +message MergeAction +{ + MergeMatchKind match_kind = 1 [json_name="matchKind"]; + CmdType command_type = 2 [json_name="commandType"]; + OverridingKind override = 3 [json_name="override"]; + Node qual = 4 [json_name="qual"]; + repeated Node target_list = 5 [json_name="targetList"]; + repeated Node update_colnos = 6 [json_name="updateColnos"]; +} + +message CoerceToDomain +{ + Node xpr = 1 [json_name="xpr"]; + Node arg = 2 [json_name="arg"]; + uint32 resulttype = 3 [json_name="resulttype"]; + int32 resulttypmod = 4 [json_name="resulttypmod"]; + uint32 resultcollid = 5 [json_name="resultcollid"]; + CoercionForm coercionformat = 6 [json_name="coercionformat"]; + int32 location = 7 [json_name="location"]; +} + +message CoerceToDomainValue +{ + Node xpr = 1 [json_name="xpr"]; + uint32 type_id = 2 [json_name="typeId"]; + int32 type_mod = 3 [json_name="typeMod"]; + uint32 collation = 4 [json_name="collation"]; + int32 location = 5 [json_name="location"]; +} + +message SetToDefault +{ + Node xpr = 1 [json_name="xpr"]; + uint32 type_id = 2 [json_name="typeId"]; + int32 type_mod = 3 [json_name="typeMod"]; + uint32 collation = 4 [json_name="collation"]; + int32 location = 5 [json_name="location"]; +} + +message CurrentOfExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 cvarno = 2 [json_name="cvarno"]; + string cursor_name = 3 [json_name="cursor_name"]; + int32 cursor_param = 4 [json_name="cursor_param"]; +} + +message NextValueExpr +{ + Node xpr = 1 [json_name="xpr"]; + uint32 seqid = 2 [json_name="seqid"]; + uint32 type_id = 3 [json_name="typeId"]; +} + +message InferenceElem +{ + Node xpr = 1 [json_name="xpr"]; + Node expr = 2 [json_name="expr"]; + uint32 infercollid = 3 [json_name="infercollid"]; + uint32 inferopclass = 4 [json_name="inferopclass"]; +} + +message TargetEntry +{ + Node xpr = 1 [json_name="xpr"]; + Node expr = 2 [json_name="expr"]; + int32 resno = 3 [json_name="resno"]; + string resname = 4 [json_name="resname"]; + uint32 ressortgroupref = 5 [json_name="ressortgroupref"]; + uint32 resorigtbl = 6 [json_name="resorigtbl"]; + int32 resorigcol = 7 [json_name="resorigcol"]; + bool resjunk = 8 [json_name="resjunk"]; +} + +message RangeTblRef +{ + int32 rtindex = 1 [json_name="rtindex"]; +} + +message JoinExpr +{ + JoinType jointype = 1 [json_name="jointype"]; + bool is_natural = 2 [json_name="isNatural"]; + Node larg = 3 [json_name="larg"]; + Node rarg = 4 [json_name="rarg"]; + repeated Node using_clause = 5 [json_name="usingClause"]; + Alias join_using_alias = 6 [json_name="join_using_alias"]; + Node quals = 7 [json_name="quals"]; + Alias alias = 8 [json_name="alias"]; + int32 rtindex = 9 [json_name="rtindex"]; +} + +message FromExpr +{ + repeated Node fromlist = 1 [json_name="fromlist"]; + Node quals = 2 [json_name="quals"]; +} + +message OnConflictExpr +{ + OnConflictAction action = 1 [json_name="action"]; + repeated Node arbiter_elems = 2 [json_name="arbiterElems"]; + Node arbiter_where = 3 [json_name="arbiterWhere"]; + uint32 constraint = 4 [json_name="constraint"]; + repeated Node on_conflict_set = 5 [json_name="onConflictSet"]; + Node on_conflict_where = 6 [json_name="onConflictWhere"]; + int32 excl_rel_index = 7 [json_name="exclRelIndex"]; + repeated Node excl_rel_tlist = 8 [json_name="exclRelTlist"]; +} + +message Query +{ + CmdType command_type = 1 [json_name="commandType"]; + QuerySource query_source = 2 [json_name="querySource"]; + bool can_set_tag = 3 [json_name="canSetTag"]; + Node utility_stmt = 4 [json_name="utilityStmt"]; + int32 result_relation = 5 [json_name="resultRelation"]; + bool has_aggs = 6 [json_name="hasAggs"]; + bool has_window_funcs = 7 [json_name="hasWindowFuncs"]; + bool has_target_srfs = 8 [json_name="hasTargetSRFs"]; + bool has_sub_links = 9 [json_name="hasSubLinks"]; + bool has_distinct_on = 10 [json_name="hasDistinctOn"]; + bool has_recursive = 11 [json_name="hasRecursive"]; + bool has_modifying_cte = 12 [json_name="hasModifyingCTE"]; + bool has_for_update = 13 [json_name="hasForUpdate"]; + bool has_row_security = 14 [json_name="hasRowSecurity"]; + bool is_return = 15 [json_name="isReturn"]; + repeated Node cte_list = 16 [json_name="cteList"]; + repeated Node rtable = 17 [json_name="rtable"]; + repeated Node rteperminfos = 18 [json_name="rteperminfos"]; + FromExpr jointree = 19 [json_name="jointree"]; + repeated Node merge_action_list = 20 [json_name="mergeActionList"]; + int32 merge_target_relation = 21 [json_name="mergeTargetRelation"]; + Node merge_join_condition = 22 [json_name="mergeJoinCondition"]; + repeated Node target_list = 23 [json_name="targetList"]; + OverridingKind override = 24 [json_name="override"]; + OnConflictExpr on_conflict = 25 [json_name="onConflict"]; + repeated Node returning_list = 26 [json_name="returningList"]; + repeated Node group_clause = 27 [json_name="groupClause"]; + bool group_distinct = 28 [json_name="groupDistinct"]; + repeated Node grouping_sets = 29 [json_name="groupingSets"]; + Node having_qual = 30 [json_name="havingQual"]; + repeated Node window_clause = 31 [json_name="windowClause"]; + repeated Node distinct_clause = 32 [json_name="distinctClause"]; + repeated Node sort_clause = 33 [json_name="sortClause"]; + Node limit_offset = 34 [json_name="limitOffset"]; + Node limit_count = 35 [json_name="limitCount"]; + LimitOption limit_option = 36 [json_name="limitOption"]; + repeated Node row_marks = 37 [json_name="rowMarks"]; + Node set_operations = 38 [json_name="setOperations"]; + repeated Node constraint_deps = 39 [json_name="constraintDeps"]; + repeated Node with_check_options = 40 [json_name="withCheckOptions"]; + int32 stmt_location = 41 [json_name="stmt_location"]; + int32 stmt_len = 42 [json_name="stmt_len"]; +} + +message TypeName +{ + repeated Node names = 1 [json_name="names"]; + uint32 type_oid = 2 [json_name="typeOid"]; + bool setof = 3 [json_name="setof"]; + bool pct_type = 4 [json_name="pct_type"]; + repeated Node typmods = 5 [json_name="typmods"]; + int32 typemod = 6 [json_name="typemod"]; + repeated Node array_bounds = 7 [json_name="arrayBounds"]; + int32 location = 8 [json_name="location"]; +} + +message ColumnRef +{ + repeated Node fields = 1 [json_name="fields"]; + int32 location = 2 [json_name="location"]; +} + +message ParamRef +{ + int32 number = 1 [json_name="number"]; + int32 location = 2 [json_name="location"]; +} + +message A_Expr +{ + A_Expr_Kind kind = 1 [json_name="kind"]; + repeated Node name = 2 [json_name="name"]; + Node lexpr = 3 [json_name="lexpr"]; + Node rexpr = 4 [json_name="rexpr"]; + int32 location = 5 [json_name="location"]; +} + +message TypeCast +{ + Node arg = 1 [json_name="arg"]; + TypeName type_name = 2 [json_name="typeName"]; + int32 location = 3 [json_name="location"]; +} + +message CollateClause +{ + Node arg = 1 [json_name="arg"]; + repeated Node collname = 2 [json_name="collname"]; + int32 location = 3 [json_name="location"]; +} + +message RoleSpec +{ + RoleSpecType roletype = 1 [json_name="roletype"]; + string rolename = 2 [json_name="rolename"]; + int32 location = 3 [json_name="location"]; +} + +message FuncCall +{ + repeated Node funcname = 1 [json_name="funcname"]; + repeated Node args = 2 [json_name="args"]; + repeated Node agg_order = 3 [json_name="agg_order"]; + Node agg_filter = 4 [json_name="agg_filter"]; + WindowDef over = 5 [json_name="over"]; + bool agg_within_group = 6 [json_name="agg_within_group"]; + bool agg_star = 7 [json_name="agg_star"]; + bool agg_distinct = 8 [json_name="agg_distinct"]; + bool func_variadic = 9 [json_name="func_variadic"]; + CoercionForm funcformat = 10 [json_name="funcformat"]; + int32 location = 11 [json_name="location"]; +} + +message A_Star +{ +} + +message A_Indices +{ + bool is_slice = 1 [json_name="is_slice"]; + Node lidx = 2 [json_name="lidx"]; + Node uidx = 3 [json_name="uidx"]; +} + +message A_Indirection +{ + Node arg = 1 [json_name="arg"]; + repeated Node indirection = 2 [json_name="indirection"]; +} + +message A_ArrayExpr +{ + repeated Node elements = 1 [json_name="elements"]; + int32 location = 2 [json_name="location"]; +} + +message ResTarget +{ + string name = 1 [json_name="name"]; + repeated Node indirection = 2 [json_name="indirection"]; + Node val = 3 [json_name="val"]; + int32 location = 4 [json_name="location"]; +} + +message MultiAssignRef +{ + Node source = 1 [json_name="source"]; + int32 colno = 2 [json_name="colno"]; + int32 ncolumns = 3 [json_name="ncolumns"]; +} + +message SortBy +{ + Node node = 1 [json_name="node"]; + SortByDir sortby_dir = 2 [json_name="sortby_dir"]; + SortByNulls sortby_nulls = 3 [json_name="sortby_nulls"]; + repeated Node use_op = 4 [json_name="useOp"]; + int32 location = 5 [json_name="location"]; +} + +message WindowDef +{ + string name = 1 [json_name="name"]; + string refname = 2 [json_name="refname"]; + repeated Node partition_clause = 3 [json_name="partitionClause"]; + repeated Node order_clause = 4 [json_name="orderClause"]; + int32 frame_options = 5 [json_name="frameOptions"]; + Node start_offset = 6 [json_name="startOffset"]; + Node end_offset = 7 [json_name="endOffset"]; + int32 location = 8 [json_name="location"]; +} + +message RangeSubselect +{ + bool lateral = 1 [json_name="lateral"]; + Node subquery = 2 [json_name="subquery"]; + Alias alias = 3 [json_name="alias"]; +} + +message RangeFunction +{ + bool lateral = 1 [json_name="lateral"]; + bool ordinality = 2 [json_name="ordinality"]; + bool is_rowsfrom = 3 [json_name="is_rowsfrom"]; + repeated Node functions = 4 [json_name="functions"]; + Alias alias = 5 [json_name="alias"]; + repeated Node coldeflist = 6 [json_name="coldeflist"]; +} + +message RangeTableFunc +{ + bool lateral = 1 [json_name="lateral"]; + Node docexpr = 2 [json_name="docexpr"]; + Node rowexpr = 3 [json_name="rowexpr"]; + repeated Node namespaces = 4 [json_name="namespaces"]; + repeated Node columns = 5 [json_name="columns"]; + Alias alias = 6 [json_name="alias"]; + int32 location = 7 [json_name="location"]; +} + +message RangeTableFuncCol +{ + string colname = 1 [json_name="colname"]; + TypeName type_name = 2 [json_name="typeName"]; + bool for_ordinality = 3 [json_name="for_ordinality"]; + bool is_not_null = 4 [json_name="is_not_null"]; + Node colexpr = 5 [json_name="colexpr"]; + Node coldefexpr = 6 [json_name="coldefexpr"]; + int32 location = 7 [json_name="location"]; +} + +message RangeTableSample +{ + Node relation = 1 [json_name="relation"]; + repeated Node method = 2 [json_name="method"]; + repeated Node args = 3 [json_name="args"]; + Node repeatable = 4 [json_name="repeatable"]; + int32 location = 5 [json_name="location"]; +} + +message ColumnDef +{ + string colname = 1 [json_name="colname"]; + TypeName type_name = 2 [json_name="typeName"]; + string compression = 3 [json_name="compression"]; + int32 inhcount = 4 [json_name="inhcount"]; + bool is_local = 5 [json_name="is_local"]; + bool is_not_null = 6 [json_name="is_not_null"]; + bool is_from_type = 7 [json_name="is_from_type"]; + string storage = 8 [json_name="storage"]; + string storage_name = 9 [json_name="storage_name"]; + Node raw_default = 10 [json_name="raw_default"]; + Node cooked_default = 11 [json_name="cooked_default"]; + string identity = 12 [json_name="identity"]; + RangeVar identity_sequence = 13 [json_name="identitySequence"]; + string generated = 14 [json_name="generated"]; + CollateClause coll_clause = 15 [json_name="collClause"]; + uint32 coll_oid = 16 [json_name="collOid"]; + repeated Node constraints = 17 [json_name="constraints"]; + repeated Node fdwoptions = 18 [json_name="fdwoptions"]; + int32 location = 19 [json_name="location"]; +} + +message TableLikeClause +{ + RangeVar relation = 1 [json_name="relation"]; + uint32 options = 2 [json_name="options"]; + uint32 relation_oid = 3 [json_name="relationOid"]; +} + +message IndexElem +{ + string name = 1 [json_name="name"]; + Node expr = 2 [json_name="expr"]; + string indexcolname = 3 [json_name="indexcolname"]; + repeated Node collation = 4 [json_name="collation"]; + repeated Node opclass = 5 [json_name="opclass"]; + repeated Node opclassopts = 6 [json_name="opclassopts"]; + SortByDir ordering = 7 [json_name="ordering"]; + SortByNulls nulls_ordering = 8 [json_name="nulls_ordering"]; +} + +message DefElem +{ + string defnamespace = 1 [json_name="defnamespace"]; + string defname = 2 [json_name="defname"]; + Node arg = 3 [json_name="arg"]; + DefElemAction defaction = 4 [json_name="defaction"]; + int32 location = 5 [json_name="location"]; +} + +message LockingClause +{ + repeated Node locked_rels = 1 [json_name="lockedRels"]; + LockClauseStrength strength = 2 [json_name="strength"]; + LockWaitPolicy wait_policy = 3 [json_name="waitPolicy"]; +} + +message XmlSerialize +{ + XmlOptionType xmloption = 1 [json_name="xmloption"]; + Node expr = 2 [json_name="expr"]; + TypeName type_name = 3 [json_name="typeName"]; + bool indent = 4 [json_name="indent"]; + int32 location = 5 [json_name="location"]; +} + +message PartitionElem +{ + string name = 1 [json_name="name"]; + Node expr = 2 [json_name="expr"]; + repeated Node collation = 3 [json_name="collation"]; + repeated Node opclass = 4 [json_name="opclass"]; + int32 location = 5 [json_name="location"]; +} + +message PartitionSpec +{ + PartitionStrategy strategy = 1 [json_name="strategy"]; + repeated Node part_params = 2 [json_name="partParams"]; + int32 location = 3 [json_name="location"]; +} + +message PartitionBoundSpec +{ + string strategy = 1 [json_name="strategy"]; + bool is_default = 2 [json_name="is_default"]; + int32 modulus = 3 [json_name="modulus"]; + int32 remainder = 4 [json_name="remainder"]; + repeated Node listdatums = 5 [json_name="listdatums"]; + repeated Node lowerdatums = 6 [json_name="lowerdatums"]; + repeated Node upperdatums = 7 [json_name="upperdatums"]; + int32 location = 8 [json_name="location"]; +} + +message PartitionRangeDatum +{ + PartitionRangeDatumKind kind = 1 [json_name="kind"]; + Node value = 2 [json_name="value"]; + int32 location = 3 [json_name="location"]; +} + +message SinglePartitionSpec +{ +} + +message PartitionCmd +{ + RangeVar name = 1 [json_name="name"]; + PartitionBoundSpec bound = 2 [json_name="bound"]; + bool concurrent = 3 [json_name="concurrent"]; +} + +message RangeTblEntry +{ + Alias alias = 1 [json_name="alias"]; + Alias eref = 2 [json_name="eref"]; + RTEKind rtekind = 3 [json_name="rtekind"]; + uint32 relid = 4 [json_name="relid"]; + bool inh = 5 [json_name="inh"]; + string relkind = 6 [json_name="relkind"]; + int32 rellockmode = 7 [json_name="rellockmode"]; + uint32 perminfoindex = 8 [json_name="perminfoindex"]; + TableSampleClause tablesample = 9 [json_name="tablesample"]; + Query subquery = 10 [json_name="subquery"]; + bool security_barrier = 11 [json_name="security_barrier"]; + JoinType jointype = 12 [json_name="jointype"]; + int32 joinmergedcols = 13 [json_name="joinmergedcols"]; + repeated Node joinaliasvars = 14 [json_name="joinaliasvars"]; + repeated Node joinleftcols = 15 [json_name="joinleftcols"]; + repeated Node joinrightcols = 16 [json_name="joinrightcols"]; + Alias join_using_alias = 17 [json_name="join_using_alias"]; + repeated Node functions = 18 [json_name="functions"]; + bool funcordinality = 19 [json_name="funcordinality"]; + TableFunc tablefunc = 20 [json_name="tablefunc"]; + repeated Node values_lists = 21 [json_name="values_lists"]; + string ctename = 22 [json_name="ctename"]; + uint32 ctelevelsup = 23 [json_name="ctelevelsup"]; + bool self_reference = 24 [json_name="self_reference"]; + repeated Node coltypes = 25 [json_name="coltypes"]; + repeated Node coltypmods = 26 [json_name="coltypmods"]; + repeated Node colcollations = 27 [json_name="colcollations"]; + string enrname = 28 [json_name="enrname"]; + double enrtuples = 29 [json_name="enrtuples"]; + bool lateral = 30 [json_name="lateral"]; + bool in_from_cl = 31 [json_name="inFromCl"]; + repeated Node security_quals = 32 [json_name="securityQuals"]; +} + +message RTEPermissionInfo +{ + uint32 relid = 1 [json_name="relid"]; + bool inh = 2 [json_name="inh"]; + uint64 required_perms = 3 [json_name="requiredPerms"]; + uint32 check_as_user = 4 [json_name="checkAsUser"]; + repeated uint64 selected_cols = 5 [json_name="selectedCols"]; + repeated uint64 inserted_cols = 6 [json_name="insertedCols"]; + repeated uint64 updated_cols = 7 [json_name="updatedCols"]; +} + +message RangeTblFunction +{ + Node funcexpr = 1 [json_name="funcexpr"]; + int32 funccolcount = 2 [json_name="funccolcount"]; + repeated Node funccolnames = 3 [json_name="funccolnames"]; + repeated Node funccoltypes = 4 [json_name="funccoltypes"]; + repeated Node funccoltypmods = 5 [json_name="funccoltypmods"]; + repeated Node funccolcollations = 6 [json_name="funccolcollations"]; + repeated uint64 funcparams = 7 [json_name="funcparams"]; +} + +message TableSampleClause +{ + uint32 tsmhandler = 1 [json_name="tsmhandler"]; + repeated Node args = 2 [json_name="args"]; + Node repeatable = 3 [json_name="repeatable"]; +} + +message WithCheckOption +{ + WCOKind kind = 1 [json_name="kind"]; + string relname = 2 [json_name="relname"]; + string polname = 3 [json_name="polname"]; + Node qual = 4 [json_name="qual"]; + bool cascaded = 5 [json_name="cascaded"]; +} + +message SortGroupClause +{ + uint32 tle_sort_group_ref = 1 [json_name="tleSortGroupRef"]; + uint32 eqop = 2 [json_name="eqop"]; + uint32 sortop = 3 [json_name="sortop"]; + bool nulls_first = 4 [json_name="nulls_first"]; + bool hashable = 5 [json_name="hashable"]; +} + +message GroupingSet +{ + GroupingSetKind kind = 1 [json_name="kind"]; + repeated Node content = 2 [json_name="content"]; + int32 location = 3 [json_name="location"]; +} + +message WindowClause +{ + string name = 1 [json_name="name"]; + string refname = 2 [json_name="refname"]; + repeated Node partition_clause = 3 [json_name="partitionClause"]; + repeated Node order_clause = 4 [json_name="orderClause"]; + int32 frame_options = 5 [json_name="frameOptions"]; + Node start_offset = 6 [json_name="startOffset"]; + Node end_offset = 7 [json_name="endOffset"]; + uint32 start_in_range_func = 8 [json_name="startInRangeFunc"]; + uint32 end_in_range_func = 9 [json_name="endInRangeFunc"]; + uint32 in_range_coll = 10 [json_name="inRangeColl"]; + bool in_range_asc = 11 [json_name="inRangeAsc"]; + bool in_range_nulls_first = 12 [json_name="inRangeNullsFirst"]; + uint32 winref = 13 [json_name="winref"]; + bool copied_order = 14 [json_name="copiedOrder"]; +} + +message RowMarkClause +{ + uint32 rti = 1 [json_name="rti"]; + LockClauseStrength strength = 2 [json_name="strength"]; + LockWaitPolicy wait_policy = 3 [json_name="waitPolicy"]; + bool pushed_down = 4 [json_name="pushedDown"]; +} + +message WithClause +{ + repeated Node ctes = 1 [json_name="ctes"]; + bool recursive = 2 [json_name="recursive"]; + int32 location = 3 [json_name="location"]; +} + +message InferClause +{ + repeated Node index_elems = 1 [json_name="indexElems"]; + Node where_clause = 2 [json_name="whereClause"]; + string conname = 3 [json_name="conname"]; + int32 location = 4 [json_name="location"]; +} + +message OnConflictClause +{ + OnConflictAction action = 1 [json_name="action"]; + InferClause infer = 2 [json_name="infer"]; + repeated Node target_list = 3 [json_name="targetList"]; + Node where_clause = 4 [json_name="whereClause"]; + int32 location = 5 [json_name="location"]; +} + +message CTESearchClause +{ + repeated Node search_col_list = 1 [json_name="search_col_list"]; + bool search_breadth_first = 2 [json_name="search_breadth_first"]; + string search_seq_column = 3 [json_name="search_seq_column"]; + int32 location = 4 [json_name="location"]; +} + +message CTECycleClause +{ + repeated Node cycle_col_list = 1 [json_name="cycle_col_list"]; + string cycle_mark_column = 2 [json_name="cycle_mark_column"]; + Node cycle_mark_value = 3 [json_name="cycle_mark_value"]; + Node cycle_mark_default = 4 [json_name="cycle_mark_default"]; + string cycle_path_column = 5 [json_name="cycle_path_column"]; + int32 location = 6 [json_name="location"]; + uint32 cycle_mark_type = 7 [json_name="cycle_mark_type"]; + int32 cycle_mark_typmod = 8 [json_name="cycle_mark_typmod"]; + uint32 cycle_mark_collation = 9 [json_name="cycle_mark_collation"]; + uint32 cycle_mark_neop = 10 [json_name="cycle_mark_neop"]; +} + +message CommonTableExpr +{ + string ctename = 1 [json_name="ctename"]; + repeated Node aliascolnames = 2 [json_name="aliascolnames"]; + CTEMaterialize ctematerialized = 3 [json_name="ctematerialized"]; + Node ctequery = 4 [json_name="ctequery"]; + CTESearchClause search_clause = 5 [json_name="search_clause"]; + CTECycleClause cycle_clause = 6 [json_name="cycle_clause"]; + int32 location = 7 [json_name="location"]; + bool cterecursive = 8 [json_name="cterecursive"]; + int32 cterefcount = 9 [json_name="cterefcount"]; + repeated Node ctecolnames = 10 [json_name="ctecolnames"]; + repeated Node ctecoltypes = 11 [json_name="ctecoltypes"]; + repeated Node ctecoltypmods = 12 [json_name="ctecoltypmods"]; + repeated Node ctecolcollations = 13 [json_name="ctecolcollations"]; +} + +message MergeWhenClause +{ + MergeMatchKind match_kind = 1 [json_name="matchKind"]; + CmdType command_type = 2 [json_name="commandType"]; + OverridingKind override = 3 [json_name="override"]; + Node condition = 4 [json_name="condition"]; + repeated Node target_list = 5 [json_name="targetList"]; + repeated Node values = 6 [json_name="values"]; +} + +message TriggerTransition +{ + string name = 1 [json_name="name"]; + bool is_new = 2 [json_name="isNew"]; + bool is_table = 3 [json_name="isTable"]; +} + +message JsonOutput +{ + TypeName type_name = 1 [json_name="typeName"]; + JsonReturning returning = 2 [json_name="returning"]; +} + +message JsonArgument +{ + JsonValueExpr val = 1 [json_name="val"]; + string name = 2 [json_name="name"]; +} + +message JsonFuncExpr +{ + JsonExprOp op = 1 [json_name="op"]; + string column_name = 2 [json_name="column_name"]; + JsonValueExpr context_item = 3 [json_name="context_item"]; + Node pathspec = 4 [json_name="pathspec"]; + repeated Node passing = 5 [json_name="passing"]; + JsonOutput output = 6 [json_name="output"]; + JsonBehavior on_empty = 7 [json_name="on_empty"]; + JsonBehavior on_error = 8 [json_name="on_error"]; + JsonWrapper wrapper = 9 [json_name="wrapper"]; + JsonQuotes quotes = 10 [json_name="quotes"]; + int32 location = 11 [json_name="location"]; +} + +message JsonTablePathSpec +{ + Node string = 1 [json_name="string"]; + string name = 2 [json_name="name"]; + int32 name_location = 3 [json_name="name_location"]; + int32 location = 4 [json_name="location"]; +} + +message JsonTable +{ + JsonValueExpr context_item = 1 [json_name="context_item"]; + JsonTablePathSpec pathspec = 2 [json_name="pathspec"]; + repeated Node passing = 3 [json_name="passing"]; + repeated Node columns = 4 [json_name="columns"]; + JsonBehavior on_error = 5 [json_name="on_error"]; + Alias alias = 6 [json_name="alias"]; + bool lateral = 7 [json_name="lateral"]; + int32 location = 8 [json_name="location"]; +} + +message JsonTableColumn +{ + JsonTableColumnType coltype = 1 [json_name="coltype"]; + string name = 2 [json_name="name"]; + TypeName type_name = 3 [json_name="typeName"]; + JsonTablePathSpec pathspec = 4 [json_name="pathspec"]; + JsonFormat format = 5 [json_name="format"]; + JsonWrapper wrapper = 6 [json_name="wrapper"]; + JsonQuotes quotes = 7 [json_name="quotes"]; + repeated Node columns = 8 [json_name="columns"]; + JsonBehavior on_empty = 9 [json_name="on_empty"]; + JsonBehavior on_error = 10 [json_name="on_error"]; + int32 location = 11 [json_name="location"]; +} + +message JsonKeyValue +{ + Node key = 1 [json_name="key"]; + JsonValueExpr value = 2 [json_name="value"]; +} + +message JsonParseExpr +{ + JsonValueExpr expr = 1 [json_name="expr"]; + JsonOutput output = 2 [json_name="output"]; + bool unique_keys = 3 [json_name="unique_keys"]; + int32 location = 4 [json_name="location"]; +} + +message JsonScalarExpr +{ + Node expr = 1 [json_name="expr"]; + JsonOutput output = 2 [json_name="output"]; + int32 location = 3 [json_name="location"]; +} + +message JsonSerializeExpr +{ + JsonValueExpr expr = 1 [json_name="expr"]; + JsonOutput output = 2 [json_name="output"]; + int32 location = 3 [json_name="location"]; +} + +message JsonObjectConstructor +{ + repeated Node exprs = 1 [json_name="exprs"]; + JsonOutput output = 2 [json_name="output"]; + bool absent_on_null = 3 [json_name="absent_on_null"]; + bool unique = 4 [json_name="unique"]; + int32 location = 5 [json_name="location"]; +} + +message JsonArrayConstructor +{ + repeated Node exprs = 1 [json_name="exprs"]; + JsonOutput output = 2 [json_name="output"]; + bool absent_on_null = 3 [json_name="absent_on_null"]; + int32 location = 4 [json_name="location"]; +} + +message JsonArrayQueryConstructor +{ + Node query = 1 [json_name="query"]; + JsonOutput output = 2 [json_name="output"]; + JsonFormat format = 3 [json_name="format"]; + bool absent_on_null = 4 [json_name="absent_on_null"]; + int32 location = 5 [json_name="location"]; +} + +message JsonAggConstructor +{ + JsonOutput output = 1 [json_name="output"]; + Node agg_filter = 2 [json_name="agg_filter"]; + repeated Node agg_order = 3 [json_name="agg_order"]; + WindowDef over = 4 [json_name="over"]; + int32 location = 5 [json_name="location"]; +} + +message JsonObjectAgg +{ + JsonAggConstructor constructor = 1 [json_name="constructor"]; + JsonKeyValue arg = 2 [json_name="arg"]; + bool absent_on_null = 3 [json_name="absent_on_null"]; + bool unique = 4 [json_name="unique"]; +} + +message JsonArrayAgg +{ + JsonAggConstructor constructor = 1 [json_name="constructor"]; + JsonValueExpr arg = 2 [json_name="arg"]; + bool absent_on_null = 3 [json_name="absent_on_null"]; +} + +message RawStmt +{ + Node stmt = 1 [json_name="stmt"]; + int32 stmt_location = 2 [json_name="stmt_location"]; + int32 stmt_len = 3 [json_name="stmt_len"]; +} + +message InsertStmt +{ + RangeVar relation = 1 [json_name="relation"]; + repeated Node cols = 2 [json_name="cols"]; + Node select_stmt = 3 [json_name="selectStmt"]; + OnConflictClause on_conflict_clause = 4 [json_name="onConflictClause"]; + repeated Node returning_list = 5 [json_name="returningList"]; + WithClause with_clause = 6 [json_name="withClause"]; + OverridingKind override = 7 [json_name="override"]; +} + +message DeleteStmt +{ + RangeVar relation = 1 [json_name="relation"]; + repeated Node using_clause = 2 [json_name="usingClause"]; + Node where_clause = 3 [json_name="whereClause"]; + repeated Node returning_list = 4 [json_name="returningList"]; + WithClause with_clause = 5 [json_name="withClause"]; +} + +message UpdateStmt +{ + RangeVar relation = 1 [json_name="relation"]; + repeated Node target_list = 2 [json_name="targetList"]; + Node where_clause = 3 [json_name="whereClause"]; + repeated Node from_clause = 4 [json_name="fromClause"]; + repeated Node returning_list = 5 [json_name="returningList"]; + WithClause with_clause = 6 [json_name="withClause"]; +} + +message MergeStmt +{ + RangeVar relation = 1 [json_name="relation"]; + Node source_relation = 2 [json_name="sourceRelation"]; + Node join_condition = 3 [json_name="joinCondition"]; + repeated Node merge_when_clauses = 4 [json_name="mergeWhenClauses"]; + repeated Node returning_list = 5 [json_name="returningList"]; + WithClause with_clause = 6 [json_name="withClause"]; +} + +message SelectStmt +{ + repeated Node distinct_clause = 1 [json_name="distinctClause"]; + IntoClause into_clause = 2 [json_name="intoClause"]; + repeated Node target_list = 3 [json_name="targetList"]; + repeated Node from_clause = 4 [json_name="fromClause"]; + Node where_clause = 5 [json_name="whereClause"]; + repeated Node group_clause = 6 [json_name="groupClause"]; + bool group_distinct = 7 [json_name="groupDistinct"]; + Node having_clause = 8 [json_name="havingClause"]; + repeated Node window_clause = 9 [json_name="windowClause"]; + repeated Node values_lists = 10 [json_name="valuesLists"]; + repeated Node sort_clause = 11 [json_name="sortClause"]; + Node limit_offset = 12 [json_name="limitOffset"]; + Node limit_count = 13 [json_name="limitCount"]; + LimitOption limit_option = 14 [json_name="limitOption"]; + repeated Node locking_clause = 15 [json_name="lockingClause"]; + WithClause with_clause = 16 [json_name="withClause"]; + SetOperation op = 17 [json_name="op"]; + bool all = 18 [json_name="all"]; + SelectStmt larg = 19 [json_name="larg"]; + SelectStmt rarg = 20 [json_name="rarg"]; +} + +message SetOperationStmt +{ + SetOperation op = 1 [json_name="op"]; + bool all = 2 [json_name="all"]; + Node larg = 3 [json_name="larg"]; + Node rarg = 4 [json_name="rarg"]; + repeated Node col_types = 5 [json_name="colTypes"]; + repeated Node col_typmods = 6 [json_name="colTypmods"]; + repeated Node col_collations = 7 [json_name="colCollations"]; + repeated Node group_clauses = 8 [json_name="groupClauses"]; +} + +message ReturnStmt +{ + Node returnval = 1 [json_name="returnval"]; +} + +message PLAssignStmt +{ + string name = 1 [json_name="name"]; + repeated Node indirection = 2 [json_name="indirection"]; + int32 nnames = 3 [json_name="nnames"]; + SelectStmt val = 4 [json_name="val"]; + int32 location = 5 [json_name="location"]; +} + +message CreateSchemaStmt +{ + string schemaname = 1 [json_name="schemaname"]; + RoleSpec authrole = 2 [json_name="authrole"]; + repeated Node schema_elts = 3 [json_name="schemaElts"]; + bool if_not_exists = 4 [json_name="if_not_exists"]; +} + +message AlterTableStmt +{ + RangeVar relation = 1 [json_name="relation"]; + repeated Node cmds = 2 [json_name="cmds"]; + ObjectType objtype = 3 [json_name="objtype"]; + bool missing_ok = 4 [json_name="missing_ok"]; +} + +message ReplicaIdentityStmt +{ + string identity_type = 1 [json_name="identity_type"]; + string name = 2 [json_name="name"]; +} + +message AlterTableCmd +{ + AlterTableType subtype = 1 [json_name="subtype"]; + string name = 2 [json_name="name"]; + int32 num = 3 [json_name="num"]; + RoleSpec newowner = 4 [json_name="newowner"]; + Node def = 5 [json_name="def"]; + DropBehavior behavior = 6 [json_name="behavior"]; + bool missing_ok = 7 [json_name="missing_ok"]; + bool recurse = 8 [json_name="recurse"]; +} + +message AlterCollationStmt +{ + repeated Node collname = 1 [json_name="collname"]; +} + +message AlterDomainStmt +{ + string subtype = 1 [json_name="subtype"]; + repeated Node type_name = 2 [json_name="typeName"]; + string name = 3 [json_name="name"]; + Node def = 4 [json_name="def"]; + DropBehavior behavior = 5 [json_name="behavior"]; + bool missing_ok = 6 [json_name="missing_ok"]; +} + +message GrantStmt +{ + bool is_grant = 1 [json_name="is_grant"]; + GrantTargetType targtype = 2 [json_name="targtype"]; + ObjectType objtype = 3 [json_name="objtype"]; + repeated Node objects = 4 [json_name="objects"]; + repeated Node privileges = 5 [json_name="privileges"]; + repeated Node grantees = 6 [json_name="grantees"]; + bool grant_option = 7 [json_name="grant_option"]; + RoleSpec grantor = 8 [json_name="grantor"]; + DropBehavior behavior = 9 [json_name="behavior"]; +} + +message ObjectWithArgs +{ + repeated Node objname = 1 [json_name="objname"]; + repeated Node objargs = 2 [json_name="objargs"]; + repeated Node objfuncargs = 3 [json_name="objfuncargs"]; + bool args_unspecified = 4 [json_name="args_unspecified"]; +} + +message AccessPriv +{ + string priv_name = 1 [json_name="priv_name"]; + repeated Node cols = 2 [json_name="cols"]; +} + +message GrantRoleStmt +{ + repeated Node granted_roles = 1 [json_name="granted_roles"]; + repeated Node grantee_roles = 2 [json_name="grantee_roles"]; + bool is_grant = 3 [json_name="is_grant"]; + repeated Node opt = 4 [json_name="opt"]; + RoleSpec grantor = 5 [json_name="grantor"]; + DropBehavior behavior = 6 [json_name="behavior"]; +} + +message AlterDefaultPrivilegesStmt +{ + repeated Node options = 1 [json_name="options"]; + GrantStmt action = 2 [json_name="action"]; +} + +message CopyStmt +{ + RangeVar relation = 1 [json_name="relation"]; + Node query = 2 [json_name="query"]; + repeated Node attlist = 3 [json_name="attlist"]; + bool is_from = 4 [json_name="is_from"]; + bool is_program = 5 [json_name="is_program"]; + string filename = 6 [json_name="filename"]; + repeated Node options = 7 [json_name="options"]; + Node where_clause = 8 [json_name="whereClause"]; +} + +message VariableSetStmt +{ + VariableSetKind kind = 1 [json_name="kind"]; + string name = 2 [json_name="name"]; + repeated Node args = 3 [json_name="args"]; + bool is_local = 4 [json_name="is_local"]; +} + +message VariableShowStmt +{ + string name = 1 [json_name="name"]; +} + +message CreateStmt +{ + RangeVar relation = 1 [json_name="relation"]; + repeated Node table_elts = 2 [json_name="tableElts"]; + repeated Node inh_relations = 3 [json_name="inhRelations"]; + PartitionBoundSpec partbound = 4 [json_name="partbound"]; + PartitionSpec partspec = 5 [json_name="partspec"]; + TypeName of_typename = 6 [json_name="ofTypename"]; + repeated Node constraints = 7 [json_name="constraints"]; + repeated Node options = 8 [json_name="options"]; + OnCommitAction oncommit = 9 [json_name="oncommit"]; + string tablespacename = 10 [json_name="tablespacename"]; + string access_method = 11 [json_name="accessMethod"]; + bool if_not_exists = 12 [json_name="if_not_exists"]; +} + +message Constraint +{ + ConstrType contype = 1 [json_name="contype"]; + string conname = 2 [json_name="conname"]; + bool deferrable = 3 [json_name="deferrable"]; + bool initdeferred = 4 [json_name="initdeferred"]; + bool skip_validation = 5 [json_name="skip_validation"]; + bool initially_valid = 6 [json_name="initially_valid"]; + bool is_no_inherit = 7 [json_name="is_no_inherit"]; + Node raw_expr = 8 [json_name="raw_expr"]; + string cooked_expr = 9 [json_name="cooked_expr"]; + string generated_when = 10 [json_name="generated_when"]; + int32 inhcount = 11 [json_name="inhcount"]; + bool nulls_not_distinct = 12 [json_name="nulls_not_distinct"]; + repeated Node keys = 13 [json_name="keys"]; + repeated Node including = 14 [json_name="including"]; + repeated Node exclusions = 15 [json_name="exclusions"]; + repeated Node options = 16 [json_name="options"]; + string indexname = 17 [json_name="indexname"]; + string indexspace = 18 [json_name="indexspace"]; + bool reset_default_tblspc = 19 [json_name="reset_default_tblspc"]; + string access_method = 20 [json_name="access_method"]; + Node where_clause = 21 [json_name="where_clause"]; + RangeVar pktable = 22 [json_name="pktable"]; + repeated Node fk_attrs = 23 [json_name="fk_attrs"]; + repeated Node pk_attrs = 24 [json_name="pk_attrs"]; + string fk_matchtype = 25 [json_name="fk_matchtype"]; + string fk_upd_action = 26 [json_name="fk_upd_action"]; + string fk_del_action = 27 [json_name="fk_del_action"]; + repeated Node fk_del_set_cols = 28 [json_name="fk_del_set_cols"]; + repeated Node old_conpfeqop = 29 [json_name="old_conpfeqop"]; + uint32 old_pktable_oid = 30 [json_name="old_pktable_oid"]; + int32 location = 31 [json_name="location"]; +} + +message CreateTableSpaceStmt +{ + string tablespacename = 1 [json_name="tablespacename"]; + RoleSpec owner = 2 [json_name="owner"]; + string location = 3 [json_name="location"]; + repeated Node options = 4 [json_name="options"]; +} + +message DropTableSpaceStmt +{ + string tablespacename = 1 [json_name="tablespacename"]; + bool missing_ok = 2 [json_name="missing_ok"]; +} + +message AlterTableSpaceOptionsStmt +{ + string tablespacename = 1 [json_name="tablespacename"]; + repeated Node options = 2 [json_name="options"]; + bool is_reset = 3 [json_name="isReset"]; +} + +message AlterTableMoveAllStmt +{ + string orig_tablespacename = 1 [json_name="orig_tablespacename"]; + ObjectType objtype = 2 [json_name="objtype"]; + repeated Node roles = 3 [json_name="roles"]; + string new_tablespacename = 4 [json_name="new_tablespacename"]; + bool nowait = 5 [json_name="nowait"]; +} + +message CreateExtensionStmt +{ + string extname = 1 [json_name="extname"]; + bool if_not_exists = 2 [json_name="if_not_exists"]; + repeated Node options = 3 [json_name="options"]; +} + +message AlterExtensionStmt +{ + string extname = 1 [json_name="extname"]; + repeated Node options = 2 [json_name="options"]; +} + +message AlterExtensionContentsStmt +{ + string extname = 1 [json_name="extname"]; + int32 action = 2 [json_name="action"]; + ObjectType objtype = 3 [json_name="objtype"]; + Node object = 4 [json_name="object"]; +} + +message CreateFdwStmt +{ + string fdwname = 1 [json_name="fdwname"]; + repeated Node func_options = 2 [json_name="func_options"]; + repeated Node options = 3 [json_name="options"]; +} + +message AlterFdwStmt +{ + string fdwname = 1 [json_name="fdwname"]; + repeated Node func_options = 2 [json_name="func_options"]; + repeated Node options = 3 [json_name="options"]; +} + +message CreateForeignServerStmt +{ + string servername = 1 [json_name="servername"]; + string servertype = 2 [json_name="servertype"]; + string version = 3 [json_name="version"]; + string fdwname = 4 [json_name="fdwname"]; + bool if_not_exists = 5 [json_name="if_not_exists"]; + repeated Node options = 6 [json_name="options"]; +} + +message AlterForeignServerStmt +{ + string servername = 1 [json_name="servername"]; + string version = 2 [json_name="version"]; + repeated Node options = 3 [json_name="options"]; + bool has_version = 4 [json_name="has_version"]; +} + +message CreateForeignTableStmt +{ + CreateStmt base_stmt = 1 [json_name="base"]; + string servername = 2 [json_name="servername"]; + repeated Node options = 3 [json_name="options"]; +} + +message CreateUserMappingStmt +{ + RoleSpec user = 1 [json_name="user"]; + string servername = 2 [json_name="servername"]; + bool if_not_exists = 3 [json_name="if_not_exists"]; + repeated Node options = 4 [json_name="options"]; +} + +message AlterUserMappingStmt +{ + RoleSpec user = 1 [json_name="user"]; + string servername = 2 [json_name="servername"]; + repeated Node options = 3 [json_name="options"]; +} + +message DropUserMappingStmt +{ + RoleSpec user = 1 [json_name="user"]; + string servername = 2 [json_name="servername"]; + bool missing_ok = 3 [json_name="missing_ok"]; +} + +message ImportForeignSchemaStmt +{ + string server_name = 1 [json_name="server_name"]; + string remote_schema = 2 [json_name="remote_schema"]; + string local_schema = 3 [json_name="local_schema"]; + ImportForeignSchemaType list_type = 4 [json_name="list_type"]; + repeated Node table_list = 5 [json_name="table_list"]; + repeated Node options = 6 [json_name="options"]; +} + +message CreatePolicyStmt +{ + string policy_name = 1 [json_name="policy_name"]; + RangeVar table = 2 [json_name="table"]; + string cmd_name = 3 [json_name="cmd_name"]; + bool permissive = 4 [json_name="permissive"]; + repeated Node roles = 5 [json_name="roles"]; + Node qual = 6 [json_name="qual"]; + Node with_check = 7 [json_name="with_check"]; +} + +message AlterPolicyStmt +{ + string policy_name = 1 [json_name="policy_name"]; + RangeVar table = 2 [json_name="table"]; + repeated Node roles = 3 [json_name="roles"]; + Node qual = 4 [json_name="qual"]; + Node with_check = 5 [json_name="with_check"]; +} + +message CreateAmStmt +{ + string amname = 1 [json_name="amname"]; + repeated Node handler_name = 2 [json_name="handler_name"]; + string amtype = 3 [json_name="amtype"]; +} + +message CreateTrigStmt +{ + bool replace = 1 [json_name="replace"]; + bool isconstraint = 2 [json_name="isconstraint"]; + string trigname = 3 [json_name="trigname"]; + RangeVar relation = 4 [json_name="relation"]; + repeated Node funcname = 5 [json_name="funcname"]; + repeated Node args = 6 [json_name="args"]; + bool row = 7 [json_name="row"]; + int32 timing = 8 [json_name="timing"]; + int32 events = 9 [json_name="events"]; + repeated Node columns = 10 [json_name="columns"]; + Node when_clause = 11 [json_name="whenClause"]; + repeated Node transition_rels = 12 [json_name="transitionRels"]; + bool deferrable = 13 [json_name="deferrable"]; + bool initdeferred = 14 [json_name="initdeferred"]; + RangeVar constrrel = 15 [json_name="constrrel"]; +} + +message CreateEventTrigStmt +{ + string trigname = 1 [json_name="trigname"]; + string eventname = 2 [json_name="eventname"]; + repeated Node whenclause = 3 [json_name="whenclause"]; + repeated Node funcname = 4 [json_name="funcname"]; +} + +message AlterEventTrigStmt +{ + string trigname = 1 [json_name="trigname"]; + string tgenabled = 2 [json_name="tgenabled"]; +} + +message CreatePLangStmt +{ + bool replace = 1 [json_name="replace"]; + string plname = 2 [json_name="plname"]; + repeated Node plhandler = 3 [json_name="plhandler"]; + repeated Node plinline = 4 [json_name="plinline"]; + repeated Node plvalidator = 5 [json_name="plvalidator"]; + bool pltrusted = 6 [json_name="pltrusted"]; +} + +message CreateRoleStmt +{ + RoleStmtType stmt_type = 1 [json_name="stmt_type"]; + string role = 2 [json_name="role"]; + repeated Node options = 3 [json_name="options"]; +} + +message AlterRoleStmt +{ + RoleSpec role = 1 [json_name="role"]; + repeated Node options = 2 [json_name="options"]; + int32 action = 3 [json_name="action"]; +} + +message AlterRoleSetStmt +{ + RoleSpec role = 1 [json_name="role"]; + string database = 2 [json_name="database"]; + VariableSetStmt setstmt = 3 [json_name="setstmt"]; +} + +message DropRoleStmt +{ + repeated Node roles = 1 [json_name="roles"]; + bool missing_ok = 2 [json_name="missing_ok"]; +} + +message CreateSeqStmt +{ + RangeVar sequence = 1 [json_name="sequence"]; + repeated Node options = 2 [json_name="options"]; + uint32 owner_id = 3 [json_name="ownerId"]; + bool for_identity = 4 [json_name="for_identity"]; + bool if_not_exists = 5 [json_name="if_not_exists"]; +} + +message AlterSeqStmt +{ + RangeVar sequence = 1 [json_name="sequence"]; + repeated Node options = 2 [json_name="options"]; + bool for_identity = 3 [json_name="for_identity"]; + bool missing_ok = 4 [json_name="missing_ok"]; +} + +message DefineStmt +{ + ObjectType kind = 1 [json_name="kind"]; + bool oldstyle = 2 [json_name="oldstyle"]; + repeated Node defnames = 3 [json_name="defnames"]; + repeated Node args = 4 [json_name="args"]; + repeated Node definition = 5 [json_name="definition"]; + bool if_not_exists = 6 [json_name="if_not_exists"]; + bool replace = 7 [json_name="replace"]; +} + +message CreateDomainStmt +{ + repeated Node domainname = 1 [json_name="domainname"]; + TypeName type_name = 2 [json_name="typeName"]; + CollateClause coll_clause = 3 [json_name="collClause"]; + repeated Node constraints = 4 [json_name="constraints"]; +} + +message CreateOpClassStmt +{ + repeated Node opclassname = 1 [json_name="opclassname"]; + repeated Node opfamilyname = 2 [json_name="opfamilyname"]; + string amname = 3 [json_name="amname"]; + TypeName datatype = 4 [json_name="datatype"]; + repeated Node items = 5 [json_name="items"]; + bool is_default = 6 [json_name="isDefault"]; +} + +message CreateOpClassItem +{ + int32 itemtype = 1 [json_name="itemtype"]; + ObjectWithArgs name = 2 [json_name="name"]; + int32 number = 3 [json_name="number"]; + repeated Node order_family = 4 [json_name="order_family"]; + repeated Node class_args = 5 [json_name="class_args"]; + TypeName storedtype = 6 [json_name="storedtype"]; +} + +message CreateOpFamilyStmt +{ + repeated Node opfamilyname = 1 [json_name="opfamilyname"]; + string amname = 2 [json_name="amname"]; +} + +message AlterOpFamilyStmt +{ + repeated Node opfamilyname = 1 [json_name="opfamilyname"]; + string amname = 2 [json_name="amname"]; + bool is_drop = 3 [json_name="isDrop"]; + repeated Node items = 4 [json_name="items"]; +} + +message DropStmt +{ + repeated Node objects = 1 [json_name="objects"]; + ObjectType remove_type = 2 [json_name="removeType"]; + DropBehavior behavior = 3 [json_name="behavior"]; + bool missing_ok = 4 [json_name="missing_ok"]; + bool concurrent = 5 [json_name="concurrent"]; +} + +message TruncateStmt +{ + repeated Node relations = 1 [json_name="relations"]; + bool restart_seqs = 2 [json_name="restart_seqs"]; + DropBehavior behavior = 3 [json_name="behavior"]; +} + +message CommentStmt +{ + ObjectType objtype = 1 [json_name="objtype"]; + Node object = 2 [json_name="object"]; + string comment = 3 [json_name="comment"]; +} + +message SecLabelStmt +{ + ObjectType objtype = 1 [json_name="objtype"]; + Node object = 2 [json_name="object"]; + string provider = 3 [json_name="provider"]; + string label = 4 [json_name="label"]; +} + +message DeclareCursorStmt +{ + string portalname = 1 [json_name="portalname"]; + int32 options = 2 [json_name="options"]; + Node query = 3 [json_name="query"]; +} + +message ClosePortalStmt +{ + string portalname = 1 [json_name="portalname"]; +} + +message FetchStmt +{ + FetchDirection direction = 1 [json_name="direction"]; + int64 how_many = 2 [json_name="howMany"]; + string portalname = 3 [json_name="portalname"]; + bool ismove = 4 [json_name="ismove"]; +} + +message IndexStmt +{ + string idxname = 1 [json_name="idxname"]; + RangeVar relation = 2 [json_name="relation"]; + string access_method = 3 [json_name="accessMethod"]; + string table_space = 4 [json_name="tableSpace"]; + repeated Node index_params = 5 [json_name="indexParams"]; + repeated Node index_including_params = 6 [json_name="indexIncludingParams"]; + repeated Node options = 7 [json_name="options"]; + Node where_clause = 8 [json_name="whereClause"]; + repeated Node exclude_op_names = 9 [json_name="excludeOpNames"]; + string idxcomment = 10 [json_name="idxcomment"]; + uint32 index_oid = 11 [json_name="indexOid"]; + uint32 old_number = 12 [json_name="oldNumber"]; + uint32 old_create_subid = 13 [json_name="oldCreateSubid"]; + uint32 old_first_relfilelocator_subid = 14 [json_name="oldFirstRelfilelocatorSubid"]; + bool unique = 15 [json_name="unique"]; + bool nulls_not_distinct = 16 [json_name="nulls_not_distinct"]; + bool primary = 17 [json_name="primary"]; + bool isconstraint = 18 [json_name="isconstraint"]; + bool deferrable = 19 [json_name="deferrable"]; + bool initdeferred = 20 [json_name="initdeferred"]; + bool transformed = 21 [json_name="transformed"]; + bool concurrent = 22 [json_name="concurrent"]; + bool if_not_exists = 23 [json_name="if_not_exists"]; + bool reset_default_tblspc = 24 [json_name="reset_default_tblspc"]; +} + +message CreateStatsStmt +{ + repeated Node defnames = 1 [json_name="defnames"]; + repeated Node stat_types = 2 [json_name="stat_types"]; + repeated Node exprs = 3 [json_name="exprs"]; + repeated Node relations = 4 [json_name="relations"]; + string stxcomment = 5 [json_name="stxcomment"]; + bool transformed = 6 [json_name="transformed"]; + bool if_not_exists = 7 [json_name="if_not_exists"]; +} + +message StatsElem +{ + string name = 1 [json_name="name"]; + Node expr = 2 [json_name="expr"]; +} + +message AlterStatsStmt +{ + repeated Node defnames = 1 [json_name="defnames"]; + Node stxstattarget = 2 [json_name="stxstattarget"]; + bool missing_ok = 3 [json_name="missing_ok"]; +} + +message CreateFunctionStmt +{ + bool is_procedure = 1 [json_name="is_procedure"]; + bool replace = 2 [json_name="replace"]; + repeated Node funcname = 3 [json_name="funcname"]; + repeated Node parameters = 4 [json_name="parameters"]; + TypeName return_type = 5 [json_name="returnType"]; + repeated Node options = 6 [json_name="options"]; + Node sql_body = 7 [json_name="sql_body"]; +} + +message FunctionParameter +{ + string name = 1 [json_name="name"]; + TypeName arg_type = 2 [json_name="argType"]; + FunctionParameterMode mode = 3 [json_name="mode"]; + Node defexpr = 4 [json_name="defexpr"]; +} + +message AlterFunctionStmt +{ + ObjectType objtype = 1 [json_name="objtype"]; + ObjectWithArgs func = 2 [json_name="func"]; + repeated Node actions = 3 [json_name="actions"]; +} + +message DoStmt +{ + repeated Node args = 1 [json_name="args"]; +} + +message InlineCodeBlock +{ + string source_text = 1 [json_name="source_text"]; + uint32 lang_oid = 2 [json_name="langOid"]; + bool lang_is_trusted = 3 [json_name="langIsTrusted"]; + bool atomic = 4 [json_name="atomic"]; +} + +message CallStmt +{ + FuncCall funccall = 1 [json_name="funccall"]; + FuncExpr funcexpr = 2 [json_name="funcexpr"]; + repeated Node outargs = 3 [json_name="outargs"]; +} + +message CallContext +{ + bool atomic = 1 [json_name="atomic"]; +} + +message RenameStmt +{ + ObjectType rename_type = 1 [json_name="renameType"]; + ObjectType relation_type = 2 [json_name="relationType"]; + RangeVar relation = 3 [json_name="relation"]; + Node object = 4 [json_name="object"]; + string subname = 5 [json_name="subname"]; + string newname = 6 [json_name="newname"]; + DropBehavior behavior = 7 [json_name="behavior"]; + bool missing_ok = 8 [json_name="missing_ok"]; +} + +message AlterObjectDependsStmt +{ + ObjectType object_type = 1 [json_name="objectType"]; + RangeVar relation = 2 [json_name="relation"]; + Node object = 3 [json_name="object"]; + String extname = 4 [json_name="extname"]; + bool remove = 5 [json_name="remove"]; +} + +message AlterObjectSchemaStmt +{ + ObjectType object_type = 1 [json_name="objectType"]; + RangeVar relation = 2 [json_name="relation"]; + Node object = 3 [json_name="object"]; + string newschema = 4 [json_name="newschema"]; + bool missing_ok = 5 [json_name="missing_ok"]; +} + +message AlterOwnerStmt +{ + ObjectType object_type = 1 [json_name="objectType"]; + RangeVar relation = 2 [json_name="relation"]; + Node object = 3 [json_name="object"]; + RoleSpec newowner = 4 [json_name="newowner"]; +} + +message AlterOperatorStmt +{ + ObjectWithArgs opername = 1 [json_name="opername"]; + repeated Node options = 2 [json_name="options"]; +} + +message AlterTypeStmt +{ + repeated Node type_name = 1 [json_name="typeName"]; + repeated Node options = 2 [json_name="options"]; +} + +message RuleStmt +{ + RangeVar relation = 1 [json_name="relation"]; + string rulename = 2 [json_name="rulename"]; + Node where_clause = 3 [json_name="whereClause"]; + CmdType event = 4 [json_name="event"]; + bool instead = 5 [json_name="instead"]; + repeated Node actions = 6 [json_name="actions"]; + bool replace = 7 [json_name="replace"]; +} + +message NotifyStmt +{ + string conditionname = 1 [json_name="conditionname"]; + string payload = 2 [json_name="payload"]; +} + +message ListenStmt +{ + string conditionname = 1 [json_name="conditionname"]; +} + +message UnlistenStmt +{ + string conditionname = 1 [json_name="conditionname"]; +} + +message TransactionStmt +{ + TransactionStmtKind kind = 1 [json_name="kind"]; + repeated Node options = 2 [json_name="options"]; + string savepoint_name = 3 [json_name="savepoint_name"]; + string gid = 4 [json_name="gid"]; + bool chain = 5 [json_name="chain"]; + int32 location = 6 [json_name="location"]; +} + +message CompositeTypeStmt +{ + RangeVar typevar = 1 [json_name="typevar"]; + repeated Node coldeflist = 2 [json_name="coldeflist"]; +} + +message CreateEnumStmt +{ + repeated Node type_name = 1 [json_name="typeName"]; + repeated Node vals = 2 [json_name="vals"]; +} + +message CreateRangeStmt +{ + repeated Node type_name = 1 [json_name="typeName"]; + repeated Node params = 2 [json_name="params"]; +} + +message AlterEnumStmt +{ + repeated Node type_name = 1 [json_name="typeName"]; + string old_val = 2 [json_name="oldVal"]; + string new_val = 3 [json_name="newVal"]; + string new_val_neighbor = 4 [json_name="newValNeighbor"]; + bool new_val_is_after = 5 [json_name="newValIsAfter"]; + bool skip_if_new_val_exists = 6 [json_name="skipIfNewValExists"]; +} + +message ViewStmt +{ + RangeVar view = 1 [json_name="view"]; + repeated Node aliases = 2 [json_name="aliases"]; + Node query = 3 [json_name="query"]; + bool replace = 4 [json_name="replace"]; + repeated Node options = 5 [json_name="options"]; + ViewCheckOption with_check_option = 6 [json_name="withCheckOption"]; +} + +message LoadStmt +{ + string filename = 1 [json_name="filename"]; +} + +message CreatedbStmt +{ + string dbname = 1 [json_name="dbname"]; + repeated Node options = 2 [json_name="options"]; +} + +message AlterDatabaseStmt +{ + string dbname = 1 [json_name="dbname"]; + repeated Node options = 2 [json_name="options"]; +} + +message AlterDatabaseRefreshCollStmt +{ + string dbname = 1 [json_name="dbname"]; +} + +message AlterDatabaseSetStmt +{ + string dbname = 1 [json_name="dbname"]; + VariableSetStmt setstmt = 2 [json_name="setstmt"]; +} + +message DropdbStmt +{ + string dbname = 1 [json_name="dbname"]; + bool missing_ok = 2 [json_name="missing_ok"]; + repeated Node options = 3 [json_name="options"]; +} + +message AlterSystemStmt +{ + VariableSetStmt setstmt = 1 [json_name="setstmt"]; +} + +message ClusterStmt +{ + RangeVar relation = 1 [json_name="relation"]; + string indexname = 2 [json_name="indexname"]; + repeated Node params = 3 [json_name="params"]; +} + +message VacuumStmt +{ + repeated Node options = 1 [json_name="options"]; + repeated Node rels = 2 [json_name="rels"]; + bool is_vacuumcmd = 3 [json_name="is_vacuumcmd"]; +} + +message VacuumRelation +{ + RangeVar relation = 1 [json_name="relation"]; + uint32 oid = 2 [json_name="oid"]; + repeated Node va_cols = 3 [json_name="va_cols"]; +} + +message ExplainStmt +{ + Node query = 1 [json_name="query"]; + repeated Node options = 2 [json_name="options"]; +} + +message CreateTableAsStmt +{ + Node query = 1 [json_name="query"]; + IntoClause into = 2 [json_name="into"]; + ObjectType objtype = 3 [json_name="objtype"]; + bool is_select_into = 4 [json_name="is_select_into"]; + bool if_not_exists = 5 [json_name="if_not_exists"]; +} + +message RefreshMatViewStmt +{ + bool concurrent = 1 [json_name="concurrent"]; + bool skip_data = 2 [json_name="skipData"]; + RangeVar relation = 3 [json_name="relation"]; +} + +message CheckPointStmt +{ +} + +message DiscardStmt +{ + DiscardMode target = 1 [json_name="target"]; +} + +message LockStmt +{ + repeated Node relations = 1 [json_name="relations"]; + int32 mode = 2 [json_name="mode"]; + bool nowait = 3 [json_name="nowait"]; +} + +message ConstraintsSetStmt +{ + repeated Node constraints = 1 [json_name="constraints"]; + bool deferred = 2 [json_name="deferred"]; +} + +message ReindexStmt +{ + ReindexObjectType kind = 1 [json_name="kind"]; + RangeVar relation = 2 [json_name="relation"]; + string name = 3 [json_name="name"]; + repeated Node params = 4 [json_name="params"]; +} + +message CreateConversionStmt +{ + repeated Node conversion_name = 1 [json_name="conversion_name"]; + string for_encoding_name = 2 [json_name="for_encoding_name"]; + string to_encoding_name = 3 [json_name="to_encoding_name"]; + repeated Node func_name = 4 [json_name="func_name"]; + bool def = 5 [json_name="def"]; +} + +message CreateCastStmt +{ + TypeName sourcetype = 1 [json_name="sourcetype"]; + TypeName targettype = 2 [json_name="targettype"]; + ObjectWithArgs func = 3 [json_name="func"]; + CoercionContext context = 4 [json_name="context"]; + bool inout = 5 [json_name="inout"]; +} + +message CreateTransformStmt +{ + bool replace = 1 [json_name="replace"]; + TypeName type_name = 2 [json_name="type_name"]; + string lang = 3 [json_name="lang"]; + ObjectWithArgs fromsql = 4 [json_name="fromsql"]; + ObjectWithArgs tosql = 5 [json_name="tosql"]; +} + +message PrepareStmt +{ + string name = 1 [json_name="name"]; + repeated Node argtypes = 2 [json_name="argtypes"]; + Node query = 3 [json_name="query"]; +} + +message ExecuteStmt +{ + string name = 1 [json_name="name"]; + repeated Node params = 2 [json_name="params"]; +} + +message DeallocateStmt +{ + string name = 1 [json_name="name"]; + bool isall = 2 [json_name="isall"]; + int32 location = 3 [json_name="location"]; +} + +message DropOwnedStmt +{ + repeated Node roles = 1 [json_name="roles"]; + DropBehavior behavior = 2 [json_name="behavior"]; +} + +message ReassignOwnedStmt +{ + repeated Node roles = 1 [json_name="roles"]; + RoleSpec newrole = 2 [json_name="newrole"]; +} + +message AlterTSDictionaryStmt +{ + repeated Node dictname = 1 [json_name="dictname"]; + repeated Node options = 2 [json_name="options"]; +} + +message AlterTSConfigurationStmt +{ + AlterTSConfigType kind = 1 [json_name="kind"]; + repeated Node cfgname = 2 [json_name="cfgname"]; + repeated Node tokentype = 3 [json_name="tokentype"]; + repeated Node dicts = 4 [json_name="dicts"]; + bool override = 5 [json_name="override"]; + bool replace = 6 [json_name="replace"]; + bool missing_ok = 7 [json_name="missing_ok"]; +} + +message PublicationTable +{ + RangeVar relation = 1 [json_name="relation"]; + Node where_clause = 2 [json_name="whereClause"]; + repeated Node columns = 3 [json_name="columns"]; +} + +message PublicationObjSpec +{ + PublicationObjSpecType pubobjtype = 1 [json_name="pubobjtype"]; + string name = 2 [json_name="name"]; + PublicationTable pubtable = 3 [json_name="pubtable"]; + int32 location = 4 [json_name="location"]; +} + +message CreatePublicationStmt +{ + string pubname = 1 [json_name="pubname"]; + repeated Node options = 2 [json_name="options"]; + repeated Node pubobjects = 3 [json_name="pubobjects"]; + bool for_all_tables = 4 [json_name="for_all_tables"]; +} + +message AlterPublicationStmt +{ + string pubname = 1 [json_name="pubname"]; + repeated Node options = 2 [json_name="options"]; + repeated Node pubobjects = 3 [json_name="pubobjects"]; + bool for_all_tables = 4 [json_name="for_all_tables"]; + AlterPublicationAction action = 5 [json_name="action"]; +} + +message CreateSubscriptionStmt +{ + string subname = 1 [json_name="subname"]; + string conninfo = 2 [json_name="conninfo"]; + repeated Node publication = 3 [json_name="publication"]; + repeated Node options = 4 [json_name="options"]; +} + +message AlterSubscriptionStmt +{ + AlterSubscriptionType kind = 1 [json_name="kind"]; + string subname = 2 [json_name="subname"]; + string conninfo = 3 [json_name="conninfo"]; + repeated Node publication = 4 [json_name="publication"]; + repeated Node options = 5 [json_name="options"]; +} + +message DropSubscriptionStmt +{ + string subname = 1 [json_name="subname"]; + bool missing_ok = 2 [json_name="missing_ok"]; + DropBehavior behavior = 3 [json_name="behavior"]; +} + +enum QuerySource +{ + QUERY_SOURCE_UNDEFINED = 0; + QSRC_ORIGINAL = 1; + QSRC_PARSER = 2; + QSRC_INSTEAD_RULE = 3; + QSRC_QUAL_INSTEAD_RULE = 4; + QSRC_NON_INSTEAD_RULE = 5; +} + +enum SortByDir +{ + SORT_BY_DIR_UNDEFINED = 0; + SORTBY_DEFAULT = 1; + SORTBY_ASC = 2; + SORTBY_DESC = 3; + SORTBY_USING = 4; +} + +enum SortByNulls +{ + SORT_BY_NULLS_UNDEFINED = 0; + SORTBY_NULLS_DEFAULT = 1; + SORTBY_NULLS_FIRST = 2; + SORTBY_NULLS_LAST = 3; +} + +enum SetQuantifier +{ + SET_QUANTIFIER_UNDEFINED = 0; + SET_QUANTIFIER_DEFAULT = 1; + SET_QUANTIFIER_ALL = 2; + SET_QUANTIFIER_DISTINCT = 3; +} + +enum A_Expr_Kind +{ + A_EXPR_KIND_UNDEFINED = 0; + AEXPR_OP = 1; + AEXPR_OP_ANY = 2; + AEXPR_OP_ALL = 3; + AEXPR_DISTINCT = 4; + AEXPR_NOT_DISTINCT = 5; + AEXPR_NULLIF = 6; + AEXPR_IN = 7; + AEXPR_LIKE = 8; + AEXPR_ILIKE = 9; + AEXPR_SIMILAR = 10; + AEXPR_BETWEEN = 11; + AEXPR_NOT_BETWEEN = 12; + AEXPR_BETWEEN_SYM = 13; + AEXPR_NOT_BETWEEN_SYM = 14; +} + +enum RoleSpecType +{ + ROLE_SPEC_TYPE_UNDEFINED = 0; + ROLESPEC_CSTRING = 1; + ROLESPEC_CURRENT_ROLE = 2; + ROLESPEC_CURRENT_USER = 3; + ROLESPEC_SESSION_USER = 4; + ROLESPEC_PUBLIC = 5; +} + +enum TableLikeOption +{ + TABLE_LIKE_OPTION_UNDEFINED = 0; + CREATE_TABLE_LIKE_COMMENTS = 1; + CREATE_TABLE_LIKE_COMPRESSION = 2; + CREATE_TABLE_LIKE_CONSTRAINTS = 3; + CREATE_TABLE_LIKE_DEFAULTS = 4; + CREATE_TABLE_LIKE_GENERATED = 5; + CREATE_TABLE_LIKE_IDENTITY = 6; + CREATE_TABLE_LIKE_INDEXES = 7; + CREATE_TABLE_LIKE_STATISTICS = 8; + CREATE_TABLE_LIKE_STORAGE = 9; + CREATE_TABLE_LIKE_ALL = 10; +} + +enum DefElemAction +{ + DEF_ELEM_ACTION_UNDEFINED = 0; + DEFELEM_UNSPEC = 1; + DEFELEM_SET = 2; + DEFELEM_ADD = 3; + DEFELEM_DROP = 4; +} + +enum PartitionStrategy +{ + PARTITION_STRATEGY_UNDEFINED = 0; + PARTITION_STRATEGY_LIST = 1; + PARTITION_STRATEGY_RANGE = 2; + PARTITION_STRATEGY_HASH = 3; +} + +enum PartitionRangeDatumKind +{ + PARTITION_RANGE_DATUM_KIND_UNDEFINED = 0; + PARTITION_RANGE_DATUM_MINVALUE = 1; + PARTITION_RANGE_DATUM_VALUE = 2; + PARTITION_RANGE_DATUM_MAXVALUE = 3; +} + +enum RTEKind +{ + RTEKIND_UNDEFINED = 0; + RTE_RELATION = 1; + RTE_SUBQUERY = 2; + RTE_JOIN = 3; + RTE_FUNCTION = 4; + RTE_TABLEFUNC = 5; + RTE_VALUES = 6; + RTE_CTE = 7; + RTE_NAMEDTUPLESTORE = 8; + RTE_RESULT = 9; +} + +enum WCOKind +{ + WCOKIND_UNDEFINED = 0; + WCO_VIEW_CHECK = 1; + WCO_RLS_INSERT_CHECK = 2; + WCO_RLS_UPDATE_CHECK = 3; + WCO_RLS_CONFLICT_CHECK = 4; + WCO_RLS_MERGE_UPDATE_CHECK = 5; + WCO_RLS_MERGE_DELETE_CHECK = 6; +} + +enum GroupingSetKind +{ + GROUPING_SET_KIND_UNDEFINED = 0; + GROUPING_SET_EMPTY = 1; + GROUPING_SET_SIMPLE = 2; + GROUPING_SET_ROLLUP = 3; + GROUPING_SET_CUBE = 4; + GROUPING_SET_SETS = 5; +} + +enum CTEMaterialize +{ + CTEMATERIALIZE_UNDEFINED = 0; + CTEMaterializeDefault = 1; + CTEMaterializeAlways = 2; + CTEMaterializeNever = 3; +} + +enum JsonQuotes +{ + JSON_QUOTES_UNDEFINED = 0; + JS_QUOTES_UNSPEC = 1; + JS_QUOTES_KEEP = 2; + JS_QUOTES_OMIT = 3; +} + +enum JsonTableColumnType +{ + JSON_TABLE_COLUMN_TYPE_UNDEFINED = 0; + JTC_FOR_ORDINALITY = 1; + JTC_REGULAR = 2; + JTC_EXISTS = 3; + JTC_FORMATTED = 4; + JTC_NESTED = 5; +} + +enum SetOperation +{ + SET_OPERATION_UNDEFINED = 0; + SETOP_NONE = 1; + SETOP_UNION = 2; + SETOP_INTERSECT = 3; + SETOP_EXCEPT = 4; +} + +enum ObjectType +{ + OBJECT_TYPE_UNDEFINED = 0; + OBJECT_ACCESS_METHOD = 1; + OBJECT_AGGREGATE = 2; + OBJECT_AMOP = 3; + OBJECT_AMPROC = 4; + OBJECT_ATTRIBUTE = 5; + OBJECT_CAST = 6; + OBJECT_COLUMN = 7; + OBJECT_COLLATION = 8; + OBJECT_CONVERSION = 9; + OBJECT_DATABASE = 10; + OBJECT_DEFAULT = 11; + OBJECT_DEFACL = 12; + OBJECT_DOMAIN = 13; + OBJECT_DOMCONSTRAINT = 14; + OBJECT_EVENT_TRIGGER = 15; + OBJECT_EXTENSION = 16; + OBJECT_FDW = 17; + OBJECT_FOREIGN_SERVER = 18; + OBJECT_FOREIGN_TABLE = 19; + OBJECT_FUNCTION = 20; + OBJECT_INDEX = 21; + OBJECT_LANGUAGE = 22; + OBJECT_LARGEOBJECT = 23; + OBJECT_MATVIEW = 24; + OBJECT_OPCLASS = 25; + OBJECT_OPERATOR = 26; + OBJECT_OPFAMILY = 27; + OBJECT_PARAMETER_ACL = 28; + OBJECT_POLICY = 29; + OBJECT_PROCEDURE = 30; + OBJECT_PUBLICATION = 31; + OBJECT_PUBLICATION_NAMESPACE = 32; + OBJECT_PUBLICATION_REL = 33; + OBJECT_ROLE = 34; + OBJECT_ROUTINE = 35; + OBJECT_RULE = 36; + OBJECT_SCHEMA = 37; + OBJECT_SEQUENCE = 38; + OBJECT_SUBSCRIPTION = 39; + OBJECT_STATISTIC_EXT = 40; + OBJECT_TABCONSTRAINT = 41; + OBJECT_TABLE = 42; + OBJECT_TABLESPACE = 43; + OBJECT_TRANSFORM = 44; + OBJECT_TRIGGER = 45; + OBJECT_TSCONFIGURATION = 46; + OBJECT_TSDICTIONARY = 47; + OBJECT_TSPARSER = 48; + OBJECT_TSTEMPLATE = 49; + OBJECT_TYPE = 50; + OBJECT_USER_MAPPING = 51; + OBJECT_VIEW = 52; +} + +enum DropBehavior +{ + DROP_BEHAVIOR_UNDEFINED = 0; + DROP_RESTRICT = 1; + DROP_CASCADE = 2; +} + +enum AlterTableType +{ + ALTER_TABLE_TYPE_UNDEFINED = 0; + AT_AddColumn = 1; + AT_AddColumnToView = 2; + AT_ColumnDefault = 3; + AT_CookedColumnDefault = 4; + AT_DropNotNull = 5; + AT_SetNotNull = 6; + AT_SetExpression = 7; + AT_DropExpression = 8; + AT_CheckNotNull = 9; + AT_SetStatistics = 10; + AT_SetOptions = 11; + AT_ResetOptions = 12; + AT_SetStorage = 13; + AT_SetCompression = 14; + AT_DropColumn = 15; + AT_AddIndex = 16; + AT_ReAddIndex = 17; + AT_AddConstraint = 18; + AT_ReAddConstraint = 19; + AT_ReAddDomainConstraint = 20; + AT_AlterConstraint = 21; + AT_ValidateConstraint = 22; + AT_AddIndexConstraint = 23; + AT_DropConstraint = 24; + AT_ReAddComment = 25; + AT_AlterColumnType = 26; + AT_AlterColumnGenericOptions = 27; + AT_ChangeOwner = 28; + AT_ClusterOn = 29; + AT_DropCluster = 30; + AT_SetLogged = 31; + AT_SetUnLogged = 32; + AT_DropOids = 33; + AT_SetAccessMethod = 34; + AT_SetTableSpace = 35; + AT_SetRelOptions = 36; + AT_ResetRelOptions = 37; + AT_ReplaceRelOptions = 38; + AT_EnableTrig = 39; + AT_EnableAlwaysTrig = 40; + AT_EnableReplicaTrig = 41; + AT_DisableTrig = 42; + AT_EnableTrigAll = 43; + AT_DisableTrigAll = 44; + AT_EnableTrigUser = 45; + AT_DisableTrigUser = 46; + AT_EnableRule = 47; + AT_EnableAlwaysRule = 48; + AT_EnableReplicaRule = 49; + AT_DisableRule = 50; + AT_AddInherit = 51; + AT_DropInherit = 52; + AT_AddOf = 53; + AT_DropOf = 54; + AT_ReplicaIdentity = 55; + AT_EnableRowSecurity = 56; + AT_DisableRowSecurity = 57; + AT_ForceRowSecurity = 58; + AT_NoForceRowSecurity = 59; + AT_GenericOptions = 60; + AT_AttachPartition = 61; + AT_DetachPartition = 62; + AT_DetachPartitionFinalize = 63; + AT_AddIdentity = 64; + AT_SetIdentity = 65; + AT_DropIdentity = 66; + AT_ReAddStatistics = 67; +} + +enum GrantTargetType +{ + GRANT_TARGET_TYPE_UNDEFINED = 0; + ACL_TARGET_OBJECT = 1; + ACL_TARGET_ALL_IN_SCHEMA = 2; + ACL_TARGET_DEFAULTS = 3; +} + +enum VariableSetKind +{ + VARIABLE_SET_KIND_UNDEFINED = 0; + VAR_SET_VALUE = 1; + VAR_SET_DEFAULT = 2; + VAR_SET_CURRENT = 3; + VAR_SET_MULTI = 4; + VAR_RESET = 5; + VAR_RESET_ALL = 6; +} + +enum ConstrType +{ + CONSTR_TYPE_UNDEFINED = 0; + CONSTR_NULL = 1; + CONSTR_NOTNULL = 2; + CONSTR_DEFAULT = 3; + CONSTR_IDENTITY = 4; + CONSTR_GENERATED = 5; + CONSTR_CHECK = 6; + CONSTR_PRIMARY = 7; + CONSTR_UNIQUE = 8; + CONSTR_EXCLUSION = 9; + CONSTR_FOREIGN = 10; + CONSTR_ATTR_DEFERRABLE = 11; + CONSTR_ATTR_NOT_DEFERRABLE = 12; + CONSTR_ATTR_DEFERRED = 13; + CONSTR_ATTR_IMMEDIATE = 14; +} + +enum ImportForeignSchemaType +{ + IMPORT_FOREIGN_SCHEMA_TYPE_UNDEFINED = 0; + FDW_IMPORT_SCHEMA_ALL = 1; + FDW_IMPORT_SCHEMA_LIMIT_TO = 2; + FDW_IMPORT_SCHEMA_EXCEPT = 3; +} + +enum RoleStmtType +{ + ROLE_STMT_TYPE_UNDEFINED = 0; + ROLESTMT_ROLE = 1; + ROLESTMT_USER = 2; + ROLESTMT_GROUP = 3; +} + +enum FetchDirection +{ + FETCH_DIRECTION_UNDEFINED = 0; + FETCH_FORWARD = 1; + FETCH_BACKWARD = 2; + FETCH_ABSOLUTE = 3; + FETCH_RELATIVE = 4; +} + +enum FunctionParameterMode +{ + FUNCTION_PARAMETER_MODE_UNDEFINED = 0; + FUNC_PARAM_IN = 1; + FUNC_PARAM_OUT = 2; + FUNC_PARAM_INOUT = 3; + FUNC_PARAM_VARIADIC = 4; + FUNC_PARAM_TABLE = 5; + FUNC_PARAM_DEFAULT = 6; +} + +enum TransactionStmtKind +{ + TRANSACTION_STMT_KIND_UNDEFINED = 0; + TRANS_STMT_BEGIN = 1; + TRANS_STMT_START = 2; + TRANS_STMT_COMMIT = 3; + TRANS_STMT_ROLLBACK = 4; + TRANS_STMT_SAVEPOINT = 5; + TRANS_STMT_RELEASE = 6; + TRANS_STMT_ROLLBACK_TO = 7; + TRANS_STMT_PREPARE = 8; + TRANS_STMT_COMMIT_PREPARED = 9; + TRANS_STMT_ROLLBACK_PREPARED = 10; +} + +enum ViewCheckOption +{ + VIEW_CHECK_OPTION_UNDEFINED = 0; + NO_CHECK_OPTION = 1; + LOCAL_CHECK_OPTION = 2; + CASCADED_CHECK_OPTION = 3; +} + +enum DiscardMode +{ + DISCARD_MODE_UNDEFINED = 0; + DISCARD_ALL = 1; + DISCARD_PLANS = 2; + DISCARD_SEQUENCES = 3; + DISCARD_TEMP = 4; +} + +enum ReindexObjectType +{ + REINDEX_OBJECT_TYPE_UNDEFINED = 0; + REINDEX_OBJECT_INDEX = 1; + REINDEX_OBJECT_TABLE = 2; + REINDEX_OBJECT_SCHEMA = 3; + REINDEX_OBJECT_SYSTEM = 4; + REINDEX_OBJECT_DATABASE = 5; +} + +enum AlterTSConfigType +{ + ALTER_TSCONFIG_TYPE_UNDEFINED = 0; + ALTER_TSCONFIG_ADD_MAPPING = 1; + ALTER_TSCONFIG_ALTER_MAPPING_FOR_TOKEN = 2; + ALTER_TSCONFIG_REPLACE_DICT = 3; + ALTER_TSCONFIG_REPLACE_DICT_FOR_TOKEN = 4; + ALTER_TSCONFIG_DROP_MAPPING = 5; +} + +enum PublicationObjSpecType +{ + PUBLICATION_OBJ_SPEC_TYPE_UNDEFINED = 0; + PUBLICATIONOBJ_TABLE = 1; + PUBLICATIONOBJ_TABLES_IN_SCHEMA = 2; + PUBLICATIONOBJ_TABLES_IN_CUR_SCHEMA = 3; + PUBLICATIONOBJ_CONTINUATION = 4; +} + +enum AlterPublicationAction +{ + ALTER_PUBLICATION_ACTION_UNDEFINED = 0; + AP_AddObjects = 1; + AP_DropObjects = 2; + AP_SetObjects = 3; +} + +enum AlterSubscriptionType +{ + ALTER_SUBSCRIPTION_TYPE_UNDEFINED = 0; + ALTER_SUBSCRIPTION_OPTIONS = 1; + ALTER_SUBSCRIPTION_CONNECTION = 2; + ALTER_SUBSCRIPTION_SET_PUBLICATION = 3; + ALTER_SUBSCRIPTION_ADD_PUBLICATION = 4; + ALTER_SUBSCRIPTION_DROP_PUBLICATION = 5; + ALTER_SUBSCRIPTION_REFRESH = 6; + ALTER_SUBSCRIPTION_ENABLED = 7; + ALTER_SUBSCRIPTION_SKIP = 8; +} + +enum OverridingKind +{ + OVERRIDING_KIND_UNDEFINED = 0; + OVERRIDING_NOT_SET = 1; + OVERRIDING_USER_VALUE = 2; + OVERRIDING_SYSTEM_VALUE = 3; +} + +enum OnCommitAction +{ + ON_COMMIT_ACTION_UNDEFINED = 0; + ONCOMMIT_NOOP = 1; + ONCOMMIT_PRESERVE_ROWS = 2; + ONCOMMIT_DELETE_ROWS = 3; + ONCOMMIT_DROP = 4; +} + +enum TableFuncType +{ + TABLE_FUNC_TYPE_UNDEFINED = 0; + TFT_XMLTABLE = 1; + TFT_JSON_TABLE = 2; +} + +enum ParamKind +{ + PARAM_KIND_UNDEFINED = 0; + PARAM_EXTERN = 1; + PARAM_EXEC = 2; + PARAM_SUBLINK = 3; + PARAM_MULTIEXPR = 4; +} + +enum CoercionContext +{ + COERCION_CONTEXT_UNDEFINED = 0; + COERCION_IMPLICIT = 1; + COERCION_ASSIGNMENT = 2; + COERCION_PLPGSQL = 3; + COERCION_EXPLICIT = 4; +} + +enum CoercionForm +{ + COERCION_FORM_UNDEFINED = 0; + COERCE_EXPLICIT_CALL = 1; + COERCE_EXPLICIT_CAST = 2; + COERCE_IMPLICIT_CAST = 3; + COERCE_SQL_SYNTAX = 4; +} + +enum BoolExprType +{ + BOOL_EXPR_TYPE_UNDEFINED = 0; + AND_EXPR = 1; + OR_EXPR = 2; + NOT_EXPR = 3; +} + +enum SubLinkType +{ + SUB_LINK_TYPE_UNDEFINED = 0; + EXISTS_SUBLINK = 1; + ALL_SUBLINK = 2; + ANY_SUBLINK = 3; + ROWCOMPARE_SUBLINK = 4; + EXPR_SUBLINK = 5; + MULTIEXPR_SUBLINK = 6; + ARRAY_SUBLINK = 7; + CTE_SUBLINK = 8; +} + +enum RowCompareType +{ + ROW_COMPARE_TYPE_UNDEFINED = 0; + ROWCOMPARE_LT = 1; + ROWCOMPARE_LE = 2; + ROWCOMPARE_EQ = 3; + ROWCOMPARE_GE = 4; + ROWCOMPARE_GT = 5; + ROWCOMPARE_NE = 6; +} + +enum MinMaxOp +{ + MIN_MAX_OP_UNDEFINED = 0; + IS_GREATEST = 1; + IS_LEAST = 2; +} + +enum SQLValueFunctionOp +{ + SQLVALUE_FUNCTION_OP_UNDEFINED = 0; + SVFOP_CURRENT_DATE = 1; + SVFOP_CURRENT_TIME = 2; + SVFOP_CURRENT_TIME_N = 3; + SVFOP_CURRENT_TIMESTAMP = 4; + SVFOP_CURRENT_TIMESTAMP_N = 5; + SVFOP_LOCALTIME = 6; + SVFOP_LOCALTIME_N = 7; + SVFOP_LOCALTIMESTAMP = 8; + SVFOP_LOCALTIMESTAMP_N = 9; + SVFOP_CURRENT_ROLE = 10; + SVFOP_CURRENT_USER = 11; + SVFOP_USER = 12; + SVFOP_SESSION_USER = 13; + SVFOP_CURRENT_CATALOG = 14; + SVFOP_CURRENT_SCHEMA = 15; +} + +enum XmlExprOp +{ + XML_EXPR_OP_UNDEFINED = 0; + IS_XMLCONCAT = 1; + IS_XMLELEMENT = 2; + IS_XMLFOREST = 3; + IS_XMLPARSE = 4; + IS_XMLPI = 5; + IS_XMLROOT = 6; + IS_XMLSERIALIZE = 7; + IS_DOCUMENT = 8; +} + +enum XmlOptionType +{ + XML_OPTION_TYPE_UNDEFINED = 0; + XMLOPTION_DOCUMENT = 1; + XMLOPTION_CONTENT = 2; +} + +enum JsonEncoding +{ + JSON_ENCODING_UNDEFINED = 0; + JS_ENC_DEFAULT = 1; + JS_ENC_UTF8 = 2; + JS_ENC_UTF16 = 3; + JS_ENC_UTF32 = 4; +} + +enum JsonFormatType +{ + JSON_FORMAT_TYPE_UNDEFINED = 0; + JS_FORMAT_DEFAULT = 1; + JS_FORMAT_JSON = 2; + JS_FORMAT_JSONB = 3; +} + +enum JsonConstructorType +{ + JSON_CONSTRUCTOR_TYPE_UNDEFINED = 0; + JSCTOR_JSON_OBJECT = 1; + JSCTOR_JSON_ARRAY = 2; + JSCTOR_JSON_OBJECTAGG = 3; + JSCTOR_JSON_ARRAYAGG = 4; + JSCTOR_JSON_PARSE = 5; + JSCTOR_JSON_SCALAR = 6; + JSCTOR_JSON_SERIALIZE = 7; +} + +enum JsonValueType +{ + JSON_VALUE_TYPE_UNDEFINED = 0; + JS_TYPE_ANY = 1; + JS_TYPE_OBJECT = 2; + JS_TYPE_ARRAY = 3; + JS_TYPE_SCALAR = 4; +} + +enum JsonWrapper +{ + JSON_WRAPPER_UNDEFINED = 0; + JSW_UNSPEC = 1; + JSW_NONE = 2; + JSW_CONDITIONAL = 3; + JSW_UNCONDITIONAL = 4; +} + +enum JsonBehaviorType +{ + JSON_BEHAVIOR_TYPE_UNDEFINED = 0; + JSON_BEHAVIOR_NULL = 1; + JSON_BEHAVIOR_ERROR = 2; + JSON_BEHAVIOR_EMPTY = 3; + JSON_BEHAVIOR_TRUE = 4; + JSON_BEHAVIOR_FALSE = 5; + JSON_BEHAVIOR_UNKNOWN = 6; + JSON_BEHAVIOR_EMPTY_ARRAY = 7; + JSON_BEHAVIOR_EMPTY_OBJECT = 8; + JSON_BEHAVIOR_DEFAULT = 9; +} + +enum JsonExprOp +{ + JSON_EXPR_OP_UNDEFINED = 0; + JSON_EXISTS_OP = 1; + JSON_QUERY_OP = 2; + JSON_VALUE_OP = 3; + JSON_TABLE_OP = 4; +} + +enum NullTestType +{ + NULL_TEST_TYPE_UNDEFINED = 0; + IS_NULL = 1; + IS_NOT_NULL = 2; +} + +enum BoolTestType +{ + BOOL_TEST_TYPE_UNDEFINED = 0; + IS_TRUE = 1; + IS_NOT_TRUE = 2; + IS_FALSE = 3; + IS_NOT_FALSE = 4; + IS_UNKNOWN = 5; + IS_NOT_UNKNOWN = 6; +} + +enum MergeMatchKind +{ + MERGE_MATCH_KIND_UNDEFINED = 0; + MERGE_WHEN_MATCHED = 1; + MERGE_WHEN_NOT_MATCHED_BY_SOURCE = 2; + MERGE_WHEN_NOT_MATCHED_BY_TARGET = 3; +} + +enum CmdType +{ + CMD_TYPE_UNDEFINED = 0; + CMD_UNKNOWN = 1; + CMD_SELECT = 2; + CMD_UPDATE = 3; + CMD_INSERT = 4; + CMD_DELETE = 5; + CMD_MERGE = 6; + CMD_UTILITY = 7; + CMD_NOTHING = 8; +} + +enum JoinType +{ + JOIN_TYPE_UNDEFINED = 0; + JOIN_INNER = 1; + JOIN_LEFT = 2; + JOIN_FULL = 3; + JOIN_RIGHT = 4; + JOIN_SEMI = 5; + JOIN_ANTI = 6; + JOIN_RIGHT_ANTI = 7; + JOIN_UNIQUE_OUTER = 8; + JOIN_UNIQUE_INNER = 9; +} + +enum AggStrategy +{ + AGG_STRATEGY_UNDEFINED = 0; + AGG_PLAIN = 1; + AGG_SORTED = 2; + AGG_HASHED = 3; + AGG_MIXED = 4; +} + +enum AggSplit +{ + AGG_SPLIT_UNDEFINED = 0; + AGGSPLIT_SIMPLE = 1; + AGGSPLIT_INITIAL_SERIAL = 2; + AGGSPLIT_FINAL_DESERIAL = 3; +} + +enum SetOpCmd +{ + SET_OP_CMD_UNDEFINED = 0; + SETOPCMD_INTERSECT = 1; + SETOPCMD_INTERSECT_ALL = 2; + SETOPCMD_EXCEPT = 3; + SETOPCMD_EXCEPT_ALL = 4; +} + +enum SetOpStrategy +{ + SET_OP_STRATEGY_UNDEFINED = 0; + SETOP_SORTED = 1; + SETOP_HASHED = 2; +} + +enum OnConflictAction +{ + ON_CONFLICT_ACTION_UNDEFINED = 0; + ONCONFLICT_NONE = 1; + ONCONFLICT_NOTHING = 2; + ONCONFLICT_UPDATE = 3; +} + +enum LimitOption +{ + LIMIT_OPTION_UNDEFINED = 0; + LIMIT_OPTION_DEFAULT = 1; + LIMIT_OPTION_COUNT = 2; + LIMIT_OPTION_WITH_TIES = 3; +} + +enum LockClauseStrength +{ + LOCK_CLAUSE_STRENGTH_UNDEFINED = 0; + LCS_NONE = 1; + LCS_FORKEYSHARE = 2; + LCS_FORSHARE = 3; + LCS_FORNOKEYUPDATE = 4; + LCS_FORUPDATE = 5; +} + +enum LockWaitPolicy +{ + LOCK_WAIT_POLICY_UNDEFINED = 0; + LockWaitBlock = 1; + LockWaitSkip = 2; + LockWaitError = 3; +} + +enum LockTupleMode +{ + LOCK_TUPLE_MODE_UNDEFINED = 0; + LockTupleKeyShare = 1; + LockTupleShare = 2; + LockTupleNoKeyExclusive = 3; + LockTupleExclusive = 4; +} + +message ScanToken { + int32 start = 1; + int32 end = 2; + Token token = 4; + KeywordKind keyword_kind = 5; +} + +enum KeywordKind { + NO_KEYWORD = 0; + UNRESERVED_KEYWORD = 1; + COL_NAME_KEYWORD = 2; + TYPE_FUNC_NAME_KEYWORD = 3; + RESERVED_KEYWORD = 4; +} + +enum Token { + NUL = 0; + // Single-character tokens that are returned 1:1 (identical with "self" list in scan.l) + // Either supporting syntax, or single-character operators (some can be both) + // Also see https://www.postgresql.org/docs/12/sql-syntax-lexical.html#SQL-SYNTAX-SPECIAL-CHARS + ASCII_36 = 36; // "$" + ASCII_37 = 37; // "%" + ASCII_40 = 40; // "(" + ASCII_41 = 41; // ")" + ASCII_42 = 42; // "*" + ASCII_43 = 43; // "+" + ASCII_44 = 44; // "," + ASCII_45 = 45; // "-" + ASCII_46 = 46; // "." + ASCII_47 = 47; // "/" + ASCII_58 = 58; // ":" + ASCII_59 = 59; // ";" + ASCII_60 = 60; // "<" + ASCII_61 = 61; // "=" + ASCII_62 = 62; // ">" + ASCII_63 = 63; // "?" + ASCII_91 = 91; // "[" + ASCII_92 = 92; // "\" + ASCII_93 = 93; // "]" + ASCII_94 = 94; // "^" + // Named tokens in scan.l + IDENT = 258; + UIDENT = 259; + FCONST = 260; + SCONST = 261; + USCONST = 262; + BCONST = 263; + XCONST = 264; + Op = 265; + ICONST = 266; + PARAM = 267; + TYPECAST = 268; + DOT_DOT = 269; + COLON_EQUALS = 270; + EQUALS_GREATER = 271; + LESS_EQUALS = 272; + GREATER_EQUALS = 273; + NOT_EQUALS = 274; + SQL_COMMENT = 275; + C_COMMENT = 276; + ABORT_P = 277; + ABSENT = 278; + ABSOLUTE_P = 279; + ACCESS = 280; + ACTION = 281; + ADD_P = 282; + ADMIN = 283; + AFTER = 284; + AGGREGATE = 285; + ALL = 286; + ALSO = 287; + ALTER = 288; + ALWAYS = 289; + ANALYSE = 290; + ANALYZE = 291; + AND = 292; + ANY = 293; + ARRAY = 294; + AS = 295; + ASC = 296; + ASENSITIVE = 297; + ASSERTION = 298; + ASSIGNMENT = 299; + ASYMMETRIC = 300; + ATOMIC = 301; + AT = 302; + ATTACH = 303; + ATTRIBUTE = 304; + AUTHORIZATION = 305; + BACKWARD = 306; + BEFORE = 307; + BEGIN_P = 308; + BETWEEN = 309; + BIGINT = 310; + BINARY = 311; + BIT = 312; + BOOLEAN_P = 313; + BOTH = 314; + BREADTH = 315; + BY = 316; + CACHE = 317; + CALL = 318; + CALLED = 319; + CASCADE = 320; + CASCADED = 321; + CASE = 322; + CAST = 323; + CATALOG_P = 324; + CHAIN = 325; + CHAR_P = 326; + CHARACTER = 327; + CHARACTERISTICS = 328; + CHECK = 329; + CHECKPOINT = 330; + CLASS = 331; + CLOSE = 332; + CLUSTER = 333; + COALESCE = 334; + COLLATE = 335; + COLLATION = 336; + COLUMN = 337; + COLUMNS = 338; + COMMENT = 339; + COMMENTS = 340; + COMMIT = 341; + COMMITTED = 342; + COMPRESSION = 343; + CONCURRENTLY = 344; + CONDITIONAL = 345; + CONFIGURATION = 346; + CONFLICT = 347; + CONNECTION = 348; + CONSTRAINT = 349; + CONSTRAINTS = 350; + CONTENT_P = 351; + CONTINUE_P = 352; + CONVERSION_P = 353; + COPY = 354; + COST = 355; + CREATE = 356; + CROSS = 357; + CSV = 358; + CUBE = 359; + CURRENT_P = 360; + CURRENT_CATALOG = 361; + CURRENT_DATE = 362; + CURRENT_ROLE = 363; + CURRENT_SCHEMA = 364; + CURRENT_TIME = 365; + CURRENT_TIMESTAMP = 366; + CURRENT_USER = 367; + CURSOR = 368; + CYCLE = 369; + DATA_P = 370; + DATABASE = 371; + DAY_P = 372; + DEALLOCATE = 373; + DEC = 374; + DECIMAL_P = 375; + DECLARE = 376; + DEFAULT = 377; + DEFAULTS = 378; + DEFERRABLE = 379; + DEFERRED = 380; + DEFINER = 381; + DELETE_P = 382; + DELIMITER = 383; + DELIMITERS = 384; + DEPENDS = 385; + DEPTH = 386; + DESC = 387; + DETACH = 388; + DICTIONARY = 389; + DISABLE_P = 390; + DISCARD = 391; + DISTINCT = 392; + DO = 393; + DOCUMENT_P = 394; + DOMAIN_P = 395; + DOUBLE_P = 396; + DROP = 397; + EACH = 398; + ELSE = 399; + EMPTY_P = 400; + ENABLE_P = 401; + ENCODING = 402; + ENCRYPTED = 403; + END_P = 404; + ENUM_P = 405; + ERROR_P = 406; + ESCAPE = 407; + EVENT = 408; + EXCEPT = 409; + EXCLUDE = 410; + EXCLUDING = 411; + EXCLUSIVE = 412; + EXECUTE = 413; + EXISTS = 414; + EXPLAIN = 415; + EXPRESSION = 416; + EXTENSION = 417; + EXTERNAL = 418; + EXTRACT = 419; + FALSE_P = 420; + FAMILY = 421; + FETCH = 422; + FILTER = 423; + FINALIZE = 424; + FIRST_P = 425; + FLOAT_P = 426; + FOLLOWING = 427; + FOR = 428; + FORCE = 429; + FOREIGN = 430; + FORMAT = 431; + FORWARD = 432; + FREEZE = 433; + FROM = 434; + FULL = 435; + FUNCTION = 436; + FUNCTIONS = 437; + GENERATED = 438; + GLOBAL = 439; + GRANT = 440; + GRANTED = 441; + GREATEST = 442; + GROUP_P = 443; + GROUPING = 444; + GROUPS = 445; + HANDLER = 446; + HAVING = 447; + HEADER_P = 448; + HOLD = 449; + HOUR_P = 450; + IDENTITY_P = 451; + IF_P = 452; + ILIKE = 453; + IMMEDIATE = 454; + IMMUTABLE = 455; + IMPLICIT_P = 456; + IMPORT_P = 457; + IN_P = 458; + INCLUDE = 459; + INCLUDING = 460; + INCREMENT = 461; + INDENT = 462; + INDEX = 463; + INDEXES = 464; + INHERIT = 465; + INHERITS = 466; + INITIALLY = 467; + INLINE_P = 468; + INNER_P = 469; + INOUT = 470; + INPUT_P = 471; + INSENSITIVE = 472; + INSERT = 473; + INSTEAD = 474; + INT_P = 475; + INTEGER = 476; + INTERSECT = 477; + INTERVAL = 478; + INTO = 479; + INVOKER = 480; + IS = 481; + ISNULL = 482; + ISOLATION = 483; + JOIN = 484; + JSON = 485; + JSON_ARRAY = 486; + JSON_ARRAYAGG = 487; + JSON_EXISTS = 488; + JSON_OBJECT = 489; + JSON_OBJECTAGG = 490; + JSON_QUERY = 491; + JSON_SCALAR = 492; + JSON_SERIALIZE = 493; + JSON_TABLE = 494; + JSON_VALUE = 495; + KEEP = 496; + KEY = 497; + KEYS = 498; + LABEL = 499; + LANGUAGE = 500; + LARGE_P = 501; + LAST_P = 502; + LATERAL_P = 503; + LEADING = 504; + LEAKPROOF = 505; + LEAST = 506; + LEFT = 507; + LEVEL = 508; + LIKE = 509; + LIMIT = 510; + LISTEN = 511; + LOAD = 512; + LOCAL = 513; + LOCALTIME = 514; + LOCALTIMESTAMP = 515; + LOCATION = 516; + LOCK_P = 517; + LOCKED = 518; + LOGGED = 519; + MAPPING = 520; + MATCH = 521; + MATCHED = 522; + MATERIALIZED = 523; + MAXVALUE = 524; + MERGE = 525; + MERGE_ACTION = 526; + METHOD = 527; + MINUTE_P = 528; + MINVALUE = 529; + MODE = 530; + MONTH_P = 531; + MOVE = 532; + NAME_P = 533; + NAMES = 534; + NATIONAL = 535; + NATURAL = 536; + NCHAR = 537; + NESTED = 538; + NEW = 539; + NEXT = 540; + NFC = 541; + NFD = 542; + NFKC = 543; + NFKD = 544; + NO = 545; + NONE = 546; + NORMALIZE = 547; + NORMALIZED = 548; + NOT = 549; + NOTHING = 550; + NOTIFY = 551; + NOTNULL = 552; + NOWAIT = 553; + NULL_P = 554; + NULLIF = 555; + NULLS_P = 556; + NUMERIC = 557; + OBJECT_P = 558; + OF = 559; + OFF = 560; + OFFSET = 561; + OIDS = 562; + OLD = 563; + OMIT = 564; + ON = 565; + ONLY = 566; + OPERATOR = 567; + OPTION = 568; + OPTIONS = 569; + OR = 570; + ORDER = 571; + ORDINALITY = 572; + OTHERS = 573; + OUT_P = 574; + OUTER_P = 575; + OVER = 576; + OVERLAPS = 577; + OVERLAY = 578; + OVERRIDING = 579; + OWNED = 580; + OWNER = 581; + PARALLEL = 582; + PARAMETER = 583; + PARSER = 584; + PARTIAL = 585; + PARTITION = 586; + PASSING = 587; + PASSWORD = 588; + PATH = 589; + PLACING = 590; + PLAN = 591; + PLANS = 592; + POLICY = 593; + POSITION = 594; + PRECEDING = 595; + PRECISION = 596; + PRESERVE = 597; + PREPARE = 598; + PREPARED = 599; + PRIMARY = 600; + PRIOR = 601; + PRIVILEGES = 602; + PROCEDURAL = 603; + PROCEDURE = 604; + PROCEDURES = 605; + PROGRAM = 606; + PUBLICATION = 607; + QUOTE = 608; + QUOTES = 609; + RANGE = 610; + READ = 611; + REAL = 612; + REASSIGN = 613; + RECHECK = 614; + RECURSIVE = 615; + REF_P = 616; + REFERENCES = 617; + REFERENCING = 618; + REFRESH = 619; + REINDEX = 620; + RELATIVE_P = 621; + RELEASE = 622; + RENAME = 623; + REPEATABLE = 624; + REPLACE = 625; + REPLICA = 626; + RESET = 627; + RESTART = 628; + RESTRICT = 629; + RETURN = 630; + RETURNING = 631; + RETURNS = 632; + REVOKE = 633; + RIGHT = 634; + ROLE = 635; + ROLLBACK = 636; + ROLLUP = 637; + ROUTINE = 638; + ROUTINES = 639; + ROW = 640; + ROWS = 641; + RULE = 642; + SAVEPOINT = 643; + SCALAR = 644; + SCHEMA = 645; + SCHEMAS = 646; + SCROLL = 647; + SEARCH = 648; + SECOND_P = 649; + SECURITY = 650; + SELECT = 651; + SEQUENCE = 652; + SEQUENCES = 653; + SERIALIZABLE = 654; + SERVER = 655; + SESSION = 656; + SESSION_USER = 657; + SET = 658; + SETS = 659; + SETOF = 660; + SHARE = 661; + SHOW = 662; + SIMILAR = 663; + SIMPLE = 664; + SKIP = 665; + SMALLINT = 666; + SNAPSHOT = 667; + SOME = 668; + SOURCE = 669; + SQL_P = 670; + STABLE = 671; + STANDALONE_P = 672; + START = 673; + STATEMENT = 674; + STATISTICS = 675; + STDIN = 676; + STDOUT = 677; + STORAGE = 678; + STORED = 679; + STRICT_P = 680; + STRING_P = 681; + STRIP_P = 682; + SUBSCRIPTION = 683; + SUBSTRING = 684; + SUPPORT = 685; + SYMMETRIC = 686; + SYSID = 687; + SYSTEM_P = 688; + SYSTEM_USER = 689; + TABLE = 690; + TABLES = 691; + TABLESAMPLE = 692; + TABLESPACE = 693; + TARGET = 694; + TEMP = 695; + TEMPLATE = 696; + TEMPORARY = 697; + TEXT_P = 698; + THEN = 699; + TIES = 700; + TIME = 701; + TIMESTAMP = 702; + TO = 703; + TRAILING = 704; + TRANSACTION = 705; + TRANSFORM = 706; + TREAT = 707; + TRIGGER = 708; + TRIM = 709; + TRUE_P = 710; + TRUNCATE = 711; + TRUSTED = 712; + TYPE_P = 713; + TYPES_P = 714; + UESCAPE = 715; + UNBOUNDED = 716; + UNCONDITIONAL = 717; + UNCOMMITTED = 718; + UNENCRYPTED = 719; + UNION = 720; + UNIQUE = 721; + UNKNOWN = 722; + UNLISTEN = 723; + UNLOGGED = 724; + UNTIL = 725; + UPDATE = 726; + USER = 727; + USING = 728; + VACUUM = 729; + VALID = 730; + VALIDATE = 731; + VALIDATOR = 732; + VALUE_P = 733; + VALUES = 734; + VARCHAR = 735; + VARIADIC = 736; + VARYING = 737; + VERBOSE = 738; + VERSION_P = 739; + VIEW = 740; + VIEWS = 741; + VOLATILE = 742; + WHEN = 743; + WHERE = 744; + WHITESPACE_P = 745; + WINDOW = 746; + WITH = 747; + WITHIN = 748; + WITHOUT = 749; + WORK = 750; + WRAPPER = 751; + WRITE = 752; + XML_P = 753; + XMLATTRIBUTES = 754; + XMLCONCAT = 755; + XMLELEMENT = 756; + XMLEXISTS = 757; + XMLFOREST = 758; + XMLNAMESPACES = 759; + XMLPARSE = 760; + XMLPI = 761; + XMLROOT = 762; + XMLSERIALIZE = 763; + XMLTABLE = 764; + YEAR_P = 765; + YES_P = 766; + ZONE = 767; + FORMAT_LA = 768; + NOT_LA = 769; + NULLS_LA = 770; + WITH_LA = 771; + WITHOUT_LA = 772; + MODE_TYPE_NAME = 773; + MODE_PLPGSQL_EXPR = 774; + MODE_PLPGSQL_ASSIGN1 = 775; + MODE_PLPGSQL_ASSIGN2 = 776; + MODE_PLPGSQL_ASSIGN3 = 777; + UMINUS = 778; +} diff --git a/crates/pgt_query_macros/src/iter_mut.rs b/crates/pgt_query_macros/src/iter_mut.rs new file mode 100644 index 00000000..b0bc10de --- /dev/null +++ b/crates/pgt_query_macros/src/iter_mut.rs @@ -0,0 +1,142 @@ +use proc_macro2::TokenStream; +use quote::{format_ident, quote}; + +use crate::proto_analyser::{FieldType, Node, ProtoAnalyzer}; + +pub fn iter_mut_mod(analyser: ProtoAnalyzer) -> proc_macro2::TokenStream { + let enum_variants = analyser.enum_variants(); + let nodes = analyser.nodes(); + + let mut node_variant_names = Vec::new(); + let mut node_property_handlers = Vec::new(); + + // Create a map from type name to enum variant name + let mut type_to_variant: std::collections::HashMap = + std::collections::HashMap::new(); + for variant in &enum_variants { + type_to_variant.insert(variant.type_name.clone(), variant.name.clone()); + } + + for node in &nodes { + // Use the enum variant name from the Node enum + if let Some(variant_name) = type_to_variant.get(&node.name) { + let variant_ident = format_ident!("{}", variant_name); + node_variant_names.push(variant_ident); + + let property_handlers = property_handlers(node); + node_property_handlers.push(property_handlers); + } + } + + quote! { + use std::collections::VecDeque; + + /// An iterator that provides mutable access to all nodes in an AST tree. + /// + /// This iterator performs a depth-first traversal of the AST, yielding mutable + /// references to each node. It uses unsafe operations internally to work with + /// raw pointers in the AST structure. + /// + /// # Safety Requirements + /// + /// Users of this iterator must ensure: + /// + /// - The root `NodeMut` passed to `new()` must point to a valid, properly + /// constructed AST that remains alive for the iterator's lifetime + /// - No other code concurrently accesses or modifies the AST while this + /// iterator is in use (exclusive access required) + /// - The AST structure must not be modified through other means while + /// iterating (e.g., don't modify parent nodes while iterating children) + /// + /// # Panics + /// + /// This iterator may panic or cause undefined behavior if the safety + /// requirements above are violated. + /// ``` + pub struct NodeMutIterator { + stack: VecDeque, + } + + impl NodeMutIterator { + /// Creates a new iterator starting from the given root node. + /// + /// # Safety + /// + /// The caller must ensure that `roots` points to valid AST nodes + /// and that the safety requirements documented on `NodeMutIterator` + /// are met throughout the iterator's lifetime. + pub fn new(root: NodeMut) -> Self { + Self { + stack: VecDeque::from([root]), + } + } + } + + impl Iterator for NodeMutIterator { + type Item = NodeMut; + + fn next(&mut self) -> Option { + if self.stack.is_empty() { + return None; + } + + let node = self.stack.pop_front().unwrap(); + + unsafe { + match node { + #(NodeMut::#node_variant_names(n) => {#node_property_handlers}),*, + _ => { + // Some node types don't have any child nodes to traverse + } + }; + } + + Some(node) + } + } + } +} + +fn property_handlers(node: &Node) -> TokenStream { + let handlers: Vec = node + .fields + .iter() + .filter_map(|field| { + let field_name = format_ident!("{}", field.name.as_str()); + if matches!(field.r#type, FieldType::Node(_)) && field.repeated { + Some(quote! { + n.#field_name + .iter_mut() + .for_each(|x| { + if let Some(n) = x.node.as_mut() { + self.stack.push_back(n.to_mut()); + } + }); + }) + } else if matches!(field.r#type, FieldType::Node(_)) && !field.is_one_of { + if field.r#type == FieldType::Node(None) { + Some(quote! { + if let Some(n) = n.#field_name.as_mut() { + if let Some(n) = n.node.as_mut() { + self.stack.push_back(n.to_mut()); + } + } + }) + } else { + Some(quote! { + if let Some(field_node) = n.#field_name.as_mut() { + self.stack.push_back(field_node.to_mut()); + } + }) + } + } else { + None // Filter out non-node fields + } + }) + .collect(); + + quote! { + let n = n.as_mut().unwrap(); + #(#handlers)* + } +} diff --git a/crates/pgt_query_macros/src/iter_ref.rs b/crates/pgt_query_macros/src/iter_ref.rs new file mode 100644 index 00000000..8e232340 --- /dev/null +++ b/crates/pgt_query_macros/src/iter_ref.rs @@ -0,0 +1,105 @@ +use proc_macro2::TokenStream; +use quote::{format_ident, quote}; + +use crate::proto_analyser::{FieldType, Node, ProtoAnalyzer}; + +pub fn iter_ref_mod(analyser: ProtoAnalyzer) -> proc_macro2::TokenStream { + let enum_variants = analyser.enum_variants(); + let nodes = analyser.nodes(); + + let mut node_variant_names = Vec::new(); + let mut node_property_handlers = Vec::new(); + + let mut type_to_variant: std::collections::HashMap = + std::collections::HashMap::new(); + for variant in &enum_variants { + type_to_variant.insert(variant.type_name.clone(), variant.name.clone()); + } + + for node in &nodes { + if let Some(variant_name) = type_to_variant.get(&node.name) { + let variant_ident = format_ident!("{}", variant_name); + node_variant_names.push(variant_ident); + + let property_handlers = property_handlers(node); + node_property_handlers.push(quote! { + #(#property_handlers)* + }); + } + } + + quote! { + use std::collections::VecDeque; + + pub struct NodeRefIterator<'a>{ + stack: VecDeque>, + } + + impl<'a> NodeRefIterator<'a> { + pub fn new(root: NodeRef<'a>) -> Self { + Self { + stack: VecDeque::from([root]), + } + } + } + + impl<'a> Iterator for NodeRefIterator<'a> { + type Item = NodeRef<'a>; + + fn next(&mut self) -> Option { + if self.stack.is_empty() { + return None; + } + + let node = self.stack.pop_front().unwrap(); + + match &node { + #(NodeRef::#node_variant_names(n) => {#node_property_handlers}),*, + _ => { + // Some node types don't have any child nodes to traverse + } + }; + + Some(node) + } + } + } +} + +fn property_handlers(node: &Node) -> Vec { + node.fields + .iter() + .filter_map(|field| { + let field_name = format_ident!("{}", field.name.as_str()); + if matches!(field.r#type, FieldType::Node(_)) && field.repeated { + Some(quote! { + n.#field_name + .iter() + .for_each(|x| { + if let Some(n) = x.node.as_ref() { + self.stack.push_back(n.to_ref()); + } + }); + }) + } else if matches!(field.r#type, FieldType::Node(_)) && !field.is_one_of { + if field.r#type == FieldType::Node(None) { + Some(quote! { + if let Some(n) = &n.#field_name { + if let Some(n) = n.node.as_ref() { + self.stack.push_back(n.to_ref()); + } + } + }) + } else { + Some(quote! { + if let Some(field_node) = &n.#field_name { + self.stack.push_back(field_node.to_ref()); + } + }) + } + } else { + None // Filter out non-node fields + } + }) + .collect() +} diff --git a/crates/pgt_query_macros/src/lib.rs b/crates/pgt_query_macros/src/lib.rs new file mode 100644 index 00000000..718da161 --- /dev/null +++ b/crates/pgt_query_macros/src/lib.rs @@ -0,0 +1,106 @@ +use iter_mut::iter_mut_mod; +use iter_ref::iter_ref_mod; +use node_enum::node_enum_mod; +use node_mut::node_mut_mod; +use node_ref::node_ref_mod; +use node_structs::node_structs_mod; +use proto_analyser::ProtoAnalyzer; +use quote::quote; +use std::path; + +mod iter_mut; +mod iter_ref; +mod node_enum; +mod node_mut; +mod node_ref; +mod node_structs; +mod proto_analyser; + +#[proc_macro] +pub fn node_ref_codegen(_input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let analyser = ProtoAnalyzer::from(&proto_file_path()).unwrap(); + + let node_ref = node_ref_mod(analyser); + + quote! { + use crate::*; + + #node_ref + } + .into() +} + +#[proc_macro] +pub fn node_mut_codegen(_input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let analyser = ProtoAnalyzer::from(&proto_file_path()).unwrap(); + + let node_mut = node_mut_mod(analyser); + + quote! { + use crate::*; + + #node_mut + } + .into() +} + +#[proc_macro] +pub fn node_structs_codegen(_input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let analyser = ProtoAnalyzer::from(&proto_file_path()).unwrap(); + + let conversions = node_structs_mod(analyser); + + quote! { + use crate::*; + + #conversions + } + .into() +} + +#[proc_macro] +pub fn node_enum_codegen(_input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let analyser = ProtoAnalyzer::from(&proto_file_path()).unwrap(); + + let node_enum = node_enum_mod(analyser); + + quote! { + use crate::*; + + #node_enum + } + .into() +} + +#[proc_macro] +pub fn iter_ref_codegen(_input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let analyser = ProtoAnalyzer::from(&proto_file_path()).unwrap(); + + let iterator = iter_ref_mod(analyser); + + quote! { + use crate::*; + + #iterator + } + .into() +} + +#[proc_macro] +pub fn iter_mut_codegen(_input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let analyser = ProtoAnalyzer::from(&proto_file_path()).unwrap(); + + let iterator = iter_mut_mod(analyser); + + quote! { + use crate::*; + + #iterator + } + .into() +} + +fn proto_file_path() -> path::PathBuf { + // Use the path set by the build script + path::PathBuf::from(env!("PG_QUERY_PROTO_PATH")) +} diff --git a/crates/pgt_query_macros/src/node_enum.rs b/crates/pgt_query_macros/src/node_enum.rs new file mode 100644 index 00000000..0801bbab --- /dev/null +++ b/crates/pgt_query_macros/src/node_enum.rs @@ -0,0 +1,44 @@ +use quote::{format_ident, quote}; + +use crate::proto_analyser::ProtoAnalyzer; + +pub fn node_enum_mod(analyser: ProtoAnalyzer) -> proc_macro2::TokenStream { + let node_variants = analyser.enum_variants(); + + let mut to_ref_matches = Vec::new(); + let mut to_mut_matches = Vec::new(); + + for variant in &node_variants { + let variant_ident = format_ident!("{}", &variant.name); + + to_ref_matches.push(quote! { + NodeEnum::#variant_ident(n) => NodeRef::#variant_ident(&n) + }); + + if variant.boxed { + to_mut_matches.push(quote! { + NodeEnum::#variant_ident(n) => NodeMut::#variant_ident(&mut **n as *mut _) + }); + } else { + to_mut_matches.push(quote! { + NodeEnum::#variant_ident(n) => NodeMut::#variant_ident(n as *mut _) + }); + } + } + + quote! { + impl NodeEnum { + pub fn to_ref(&self) -> NodeRef { + match self { + #(#to_ref_matches,)* + } + } + + pub fn to_mut(&mut self) -> NodeMut { + match self { + #(#to_mut_matches,)* + } + } + } + } +} diff --git a/crates/pgt_query_macros/src/node_mut.rs b/crates/pgt_query_macros/src/node_mut.rs new file mode 100644 index 00000000..52120c1a --- /dev/null +++ b/crates/pgt_query_macros/src/node_mut.rs @@ -0,0 +1,50 @@ +use quote::{format_ident, quote}; + +use crate::proto_analyser::ProtoAnalyzer; + +pub fn node_mut_mod(analyser: ProtoAnalyzer) -> proc_macro2::TokenStream { + let node_variants = analyser.enum_variants(); + + let mut to_enum_matches = Vec::new(); + let mut node_enum_variants = Vec::new(); + + for variant in &node_variants { + let variant_ident = format_ident!("{}", &variant.name); + let type_ident = format_ident!("{}", &variant.type_name); + + if variant.boxed { + // For boxed variants, we need to box the cloned value + to_enum_matches.push(quote! { + NodeMut::#variant_ident(n) => Ok(NodeEnum::#variant_ident(Box::new(n.as_ref().ok_or(err)?.clone()))) + }); + } else { + // For non-boxed variants, clone directly + to_enum_matches.push(quote! { + NodeMut::#variant_ident(n) => Ok(NodeEnum::#variant_ident(n.as_ref().ok_or(err)?.clone())) + }); + } + + node_enum_variants.push(quote! { + #variant_ident(*mut protobuf::#type_ident) + }); + } + + quote! { + #[derive(Debug, Copy, Clone)] + pub enum NodeMut { + #(#node_enum_variants, )* + } + + impl NodeMut { + pub fn to_enum(self) -> Result { + unsafe { + let err = Error::InvalidPointer; + match self { + #(#to_enum_matches,)* + _ => Err(Error::InvalidPointer), + } + } + } + } + } +} diff --git a/crates/pgt_query_macros/src/node_ref.rs b/crates/pgt_query_macros/src/node_ref.rs new file mode 100644 index 00000000..64f9b7c4 --- /dev/null +++ b/crates/pgt_query_macros/src/node_ref.rs @@ -0,0 +1,46 @@ +use quote::{format_ident, quote}; + +use crate::proto_analyser::ProtoAnalyzer; + +pub fn node_ref_mod(analyser: ProtoAnalyzer) -> proc_macro2::TokenStream { + let node_variants = analyser.enum_variants(); + + let mut to_enum_matches = Vec::new(); + let mut node_enum_variants = Vec::new(); + + for variant in &node_variants { + let variant_ident = format_ident!("{}", &variant.name); + let type_ident = format_ident!("{}", &variant.type_name); + + if variant.boxed { + // For boxed variants, we need to box the cloned value + to_enum_matches.push(quote! { + NodeRef::#variant_ident(n) => NodeEnum::#variant_ident(::prost::alloc::boxed::Box::new((*n).clone())) + }); + } else { + // For non-boxed variants, clone directly + to_enum_matches.push(quote! { + NodeRef::#variant_ident(n) => NodeEnum::#variant_ident((*n).clone()) + }); + } + + node_enum_variants.push(quote! { + #variant_ident(&'a protobuf::#type_ident) + }); + } + + quote! { + #[derive(Debug, Copy, Clone)] + pub enum NodeRef<'a> { + #(#node_enum_variants,)* + } + + impl<'a> NodeRef<'a> { + pub fn to_enum(self) -> NodeEnum { + match self { + #(#to_enum_matches,)* + } + } + } + } +} diff --git a/crates/pgt_query_macros/src/node_structs.rs b/crates/pgt_query_macros/src/node_structs.rs new file mode 100644 index 00000000..52fca2e0 --- /dev/null +++ b/crates/pgt_query_macros/src/node_structs.rs @@ -0,0 +1,30 @@ +use quote::{format_ident, quote}; + +use crate::proto_analyser::ProtoAnalyzer; + +pub fn node_structs_mod(analyser: ProtoAnalyzer) -> proc_macro2::TokenStream { + let node_variants = analyser.enum_variants(); + + let mut impls = Vec::new(); + + for variant in &node_variants { + let node_ident = format_ident!("{}", &variant.name); + let type_ident = format_ident!("{}", &variant.type_name); + + impls.push(quote! { + impl protobuf::#type_ident { + pub fn to_ref(&self) -> NodeRef { + NodeRef::#node_ident(self) + } + + pub fn to_mut(&mut self) -> NodeMut { + NodeMut::#node_ident(self) + } + } + }); + } + + quote! { + #(#impls)* + } +} diff --git a/crates/pgt_query_macros/src/proto_analyser.rs b/crates/pgt_query_macros/src/proto_analyser.rs new file mode 100644 index 00000000..26a18b60 --- /dev/null +++ b/crates/pgt_query_macros/src/proto_analyser.rs @@ -0,0 +1,252 @@ +use std::{ + collections::{HashMap, HashSet}, + path::Path, +}; + +use convert_case::{Case, Casing}; +use prost_reflect::{ + DescriptorError, DescriptorPool, FieldDescriptor, MessageDescriptor, + prost_types::{ + FieldDescriptorProto, + field_descriptor_proto::{Label, Type}, + }, +}; + +pub(crate) struct ProtoAnalyzer { + pool: DescriptorPool, + message_graph: HashMap>, +} + +pub(crate) struct EnumVariant { + pub name: String, + pub type_name: String, + pub boxed: bool, +} + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum FieldType { + Node(Option), + Enum(String), + Literal, +} + +pub(crate) struct Field { + pub name: String, + pub r#type: FieldType, + pub repeated: bool, + pub is_one_of: bool, +} + +pub(crate) struct Node { + pub name: String, + #[allow(dead_code)] + pub enum_variant_name: String, + pub fields: Vec, +} + +impl ProtoAnalyzer { + pub fn from(proto_file: &Path) -> Result { + let include_path = proto_file + .parent() + .expect("Proto file must have a parent directory"); + + // protox::compile expects the proto file to be relative to the include path + let file_name = proto_file + .file_name() + .expect("Proto file must have a file name"); + + let pool = DescriptorPool::from_file_descriptor_set( + protox::compile([file_name], [include_path]).expect("unable to parse"), + )?; + + let mut analyzer = ProtoAnalyzer { + pool, + message_graph: HashMap::new(), + }; + + // Build the message graph + analyzer.build_message_graph(); + + Ok(analyzer) + } + + pub fn nodes(&self) -> Vec { + let mut nodes = Vec::new(); + + for msg in self.pool.all_messages() { + if ["ParseResult", "ScanResult", "Node", "ScanToken"].contains(&msg.name()) { + continue; + } + let fields = msg + .fields() + .map(|f| { + let field_type = match f.field_descriptor_proto().r#type() { + Type::Message => match f.field_descriptor_proto().type_name() { + ".pg_query.Node" => FieldType::Node(None), + name => { + FieldType::Node(Some(name.to_string().replace(".pg_query.", ""))) + } + }, + Type::Enum => FieldType::Enum( + f.field_descriptor_proto() + .type_name() + .to_string() + .replace(".pg_query.", ""), + ), + _ => FieldType::Literal, + }; + + Field { + name: f.name().to_string(), + r#type: field_type, + repeated: f.is_list(), + is_one_of: f.containing_oneof().is_some(), + } + }) + .collect(); + + nodes.push(Node { + name: msg.name().to_string(), + enum_variant_name: msg.name().to_case(Case::Pascal), // Convert to PascalCase for enum variant name + fields, + }); + } + + nodes + } + + pub fn enum_variants(&self) -> Vec { + let node = self + .pool + .get_message_by_name(".pg_query.Node") + .expect("Node message not found"); + + let mut variants = Vec::new(); + for field in node.fields() { + // The prost-generated variant name is derived from the field name using snake_case to PascalCase conversion + // For example: ctesearch_clause -> CtesearchClause + let field_name = field.name(); + let variant_name = field_name.to_case(Case::Pascal); + + // Get the actual proto type name (the message type) + let proto_type_name = field + .field_descriptor_proto() + .type_name() + .split('.') + .next_back() + .unwrap_or(&variant_name); + + // The Rust type name is the proto type name converted to PascalCase + // For example: CTESearchClause -> CteSearchClause + let type_name = proto_type_name.to_case(Case::Pascal); + + let boxed = self.is_field_boxed(&field, &node); + + variants.push(EnumVariant { + name: variant_name, + type_name, + boxed, + }); + } + + variants + } + + /// Build a graph of message dependencies for cycle detection + fn build_message_graph(&mut self) { + // Collect all messages first to avoid borrowing issues + let mut all_messages = Vec::new(); + for file in self.pool.files() { + for message in file.messages() { + all_messages.push(message); + } + } + + // Now add them to the graph + for message in all_messages { + self.add_message_to_graph(&message); + } + } + + /// Add a message and its dependencies to the graph + fn add_message_to_graph(&mut self, message: &MessageDescriptor) { + let msg_fq_name = format!(".{}", message.full_name()); + let mut dependencies = Vec::new(); + + // Check all fields for message type dependencies + for field in message.fields() { + if let Some(field_message) = field.kind().as_message() { + // Only add non-repeated message fields as dependencies + // since repeated fields are already heap allocated in Vec + if !field.is_list() { + let field_fq_name = format!(".{}", field_message.full_name()); + dependencies.push(field_fq_name); + } + } + } + + self.message_graph.insert(msg_fq_name, dependencies); + + // Recursively add nested messages + for nested in message.child_messages() { + self.add_message_to_graph(&nested); + } + } + + /// Detect if a field will be boxed by prost due to recursive nesting + fn is_field_boxed(&self, field: &FieldDescriptor, parent_message: &MessageDescriptor) -> bool { + // Check if this is a message field that should be boxed + let parent_fq_name = format!(".{}", parent_message.full_name()); + self.is_boxed(&parent_fq_name, field.field_descriptor_proto()) + } + + /// Check if there's a path from parent_message to field_type in the message graph + /// This indicates that field_type is transitively contained within parent_message + fn is_nested(&self, parent_message_name: &str, field_type_name: &str) -> bool { + self.has_path(parent_message_name, field_type_name, &mut HashSet::new()) + } + + /// Recursive helper to find if there's a path from 'from' to 'to' in the message graph + fn has_path(&self, from: &str, to: &str, visited: &mut HashSet) -> bool { + // If we've already visited this node, return false to avoid cycles + if visited.contains(from) { + return false; + } + + // If we've reached the target, we found a path + if from == to { + return true; + } + + visited.insert(from.to_string()); + + // Check all dependencies of the current message + if let Some(dependencies) = self.message_graph.get(from) { + for dep in dependencies { + if self.has_path(dep, to, visited) { + return true; + } + } + } + + false + } + + /// Returns whether the Rust type for this message field is `Box<_>`. + fn is_boxed(&self, fq_message_name: &str, field: &FieldDescriptorProto) -> bool { + if field.label() == Label::Repeated { + // Repeated field are stored in Vec, therefore it is already heap allocated + return false; + } + let fd_type = field.r#type(); + if fd_type == Type::Message || fd_type == Type::Group { + // The field should be boxed if the field type transitively contains the parent message + // This prevents infinitely sized type definitions + if let Some(field_type_name) = field.type_name.as_ref() { + // IMPORTANT: Check if field_type_name contains fq_message_name (not the other way around) + return self.is_nested(field_type_name, fq_message_name); + } + } + false + } +} diff --git a/crates/pgt_query_proto_parser/src/lib.rs b/crates/pgt_query_proto_parser/src/lib.rs deleted file mode 100644 index 12f8cf9c..00000000 --- a/crates/pgt_query_proto_parser/src/lib.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! A parser for the libg_query proto file -//! -//! This crate provides a parser for the libg_query proto file, and a struct to represent and interact with the parsed file. - -mod proto_file; -mod proto_parser; - -pub use crate::proto_file::{Field, FieldType, Node, ProtoFile, Token}; -pub use crate::proto_parser::ProtoParser; diff --git a/crates/pgt_query_proto_parser/src/proto_file.rs b/crates/pgt_query_proto_parser/src/proto_file.rs deleted file mode 100644 index 2cc32798..00000000 --- a/crates/pgt_query_proto_parser/src/proto_file.rs +++ /dev/null @@ -1,60 +0,0 @@ -/// The FieldTypes of a protobuf message -#[derive(Debug, Eq, PartialEq)] -pub enum FieldType { - Node, - Double, - Float, - Int64, - Uint64, - Int32, - Fixed64, - Fixed32, - Bool, - String, - Group, - Message, - Bytes, - Uint32, - Enum, - Sfixed32, - Sfixed64, - Sint32, - Sint64, -} - -/// A libg_query token -#[derive(Debug)] -pub struct Token { - pub name: String, - pub value: i32, -} - -/// A libg_query field -#[derive(Debug)] -pub struct Field { - pub name: String, - pub node_name: Option, - pub enum_variant_name: Option, - pub field_type: FieldType, - pub repeated: bool, - pub is_one_of: bool, -} - -/// A libg_query node -#[derive(Debug)] -pub struct Node { - pub name: String, - pub fields: Vec, -} - -/// The libg_query proto file -pub struct ProtoFile { - pub tokens: Vec, - pub nodes: Vec, -} - -impl ProtoFile { - pub fn node(&self, name: &str) -> Option<&Node> { - self.nodes.iter().find(|n| n.name == name) - } -} diff --git a/crates/pgt_query_proto_parser/src/proto_parser.rs b/crates/pgt_query_proto_parser/src/proto_parser.rs deleted file mode 100644 index 56f93c6e..00000000 --- a/crates/pgt_query_proto_parser/src/proto_parser.rs +++ /dev/null @@ -1,179 +0,0 @@ -use convert_case::{Case, Casing}; -use protobuf::descriptor::{FileDescriptorProto, field_descriptor_proto::Label}; -use protobuf_parse::Parser; -use std::{ffi::OsStr, path::Path}; - -use crate::proto_file::{Field, FieldType, Node, ProtoFile, Token}; - -/// The parser for the libg_query proto file -pub struct ProtoParser { - inner: FileDescriptorProto, -} - -impl ProtoParser { - pub fn new(file_path: &impl AsRef) -> Self { - let proto_file = Path::new(file_path); - let proto_dir = proto_file.parent().unwrap(); - - let result = Parser::new() - .pure() - .include(proto_dir) - .input(proto_file) - .parse_and_typecheck() - .unwrap(); - - ProtoParser { - inner: result.file_descriptors[0].clone(), - } - } - - pub fn parse(&self) -> ProtoFile { - ProtoFile { - tokens: self.tokens(), - nodes: self.nodes(), - } - } - - fn tokens(&self) -> Vec { - self.inner - .enum_type - .iter() - .find(|e| e.name == Some("Token".into())) - .unwrap() - .value - .iter() - .map(|e| Token { - // token names in proto are UPPERCASE_SNAKE_CASE - name: e.name.clone().unwrap().to_case(Case::UpperCamel), - value: e.number.unwrap(), - }) - .collect() - } - - fn get_enum_variant_name(&self, type_name: &str) -> Option { - let variant = self - .inner - .message_type - .iter() - .find(|e| e.name == Some("Node".into())) - .unwrap() - .field - .iter() - .find(|e| e.type_name().split(".").last().unwrap() == type_name); - variant.map(|v| v.name.clone().unwrap().to_case(Case::UpperCamel)) - } - - fn nodes(&self) -> Vec { - self.inner - .message_type - .iter() - .find(|e| e.name == Some("Node".into())) - .unwrap() - .field - .iter() - .map(|e| { - let name: String = e.name.to_owned().unwrap().to_case(Case::UpperCamel); - let node = self - .inner - .message_type - .iter() - .find(|n| { - n.name.clone().unwrap().to_case(Case::UpperCamel) - == e.json_name.as_ref().unwrap().to_case(Case::UpperCamel) - }) - .unwrap(); - - let mut fields: Vec = Vec::new(); - // from node fields - fields.append(&mut - node - .field - .iter() - .filter_map(|e| { - // skip one of fields, they are handled separately - if e.has_oneof_index() { - return None; - } - // use label and type to get the field type - let type_name: FieldType = match e.type_name() { - "" => match e.type_() { - protobuf::descriptor::field_descriptor_proto::Type::TYPE_DOUBLE => FieldType::Double, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_FLOAT => FieldType::Float, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_INT64 => FieldType::Int64, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_UINT64 => FieldType::Uint64, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_INT32 => FieldType::Int32, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_FIXED64 => FieldType::Fixed64, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_FIXED32 => FieldType::Fixed32, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_BOOL => FieldType::Bool, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_STRING => FieldType::String, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_GROUP => FieldType::Group, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_MESSAGE => FieldType::Message, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_BYTES => FieldType::Bytes, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_UINT32 => FieldType::Uint32, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_ENUM => FieldType::Enum, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_SFIXED32 => FieldType::Sfixed32, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_SFIXED64 => FieldType::Sfixed64, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_SINT32 => FieldType::Sint32, - protobuf::descriptor::field_descriptor_proto::Type::TYPE_SINT64 => FieldType::Sint64, - }, - _ => { - if !e.type_name().starts_with(".pg_query") { - panic!("Unknown type: {}", e.type_name()); - - } - if e.type_() == protobuf::descriptor::field_descriptor_proto::Type::TYPE_ENUM { - FieldType::Enum - } else { - FieldType::Node - } - }, - }; - let mut node_name = None; - let mut enum_variant_name = None; - if e.type_name().starts_with(".pg_query") { - let n = e.type_name().split(".").last().unwrap().to_string(); - node_name = Some(n.clone()); - if n != "Node" { - enum_variant_name = self.get_enum_variant_name(e.type_name().split(".").last().unwrap().to_string().as_str()); - } - } - // TODO: node name must be derived from the property name in the node - // enum - Some(Field { - name: e.name.clone().unwrap(), - node_name, - enum_variant_name, - field_type: type_name, - repeated: e.label() == Label::LABEL_REPEATED, - is_one_of: false, - }) - }) - .collect() - ); - - // one of declarations - fields.append(&mut - node - .oneof_decl - .iter() - .map(|e| { - Field { - name: e.name.clone().unwrap(), - node_name: Some("Node".to_string()), - enum_variant_name: None, - field_type: FieldType::Node, - repeated: false, - is_one_of: true, - } - }) - .collect() - ); - Node { - // token names in proto are UPPERCASE_SNAKE_CASE - name: name.clone(), - fields, - } - }) - .collect() - } -} diff --git a/crates/pgt_statement_splitter/Cargo.toml b/crates/pgt_statement_splitter/Cargo.toml index bdd892a6..45a42ebc 100644 --- a/crates/pgt_statement_splitter/Cargo.toml +++ b/crates/pgt_statement_splitter/Cargo.toml @@ -14,7 +14,7 @@ version = "0.0.0" [dependencies] pgt_diagnostics = { workspace = true } pgt_lexer.workspace = true -pgt_query_ext.workspace = true +pgt_query.workspace = true pgt_text_size.workspace = true regex.workspace = true diff --git a/crates/pgt_type_resolver/Cargo.toml b/crates/pgt_type_resolver/Cargo.toml index 5d2a8eb1..9a190fdf 100644 --- a/crates/pgt_type_resolver/Cargo.toml +++ b/crates/pgt_type_resolver/Cargo.toml @@ -12,7 +12,7 @@ version = "0.0.0" [dependencies] -pgt_query_ext.workspace = true +pgt_query.workspace = true pgt_schema_cache.workspace = true [dev-dependencies] diff --git a/crates/pgt_type_resolver/src/functions.rs b/crates/pgt_type_resolver/src/functions.rs index 1b0036b5..208af30d 100644 --- a/crates/pgt_type_resolver/src/functions.rs +++ b/crates/pgt_type_resolver/src/functions.rs @@ -6,7 +6,7 @@ use crate::{ }; pub fn resolve_func_call<'b>( - node: &pgt_query_ext::protobuf::FuncCall, + node: &pgt_query::protobuf::FuncCall, schema_cache: &'b SchemaCache, ) -> Option<&'b Function> { let (schema, name) = resolve_func_identifier(node); @@ -30,7 +30,7 @@ pub fn resolve_func_call<'b>( if fns.len() == 1 { Some(fns[0]) } else { None } } -fn resolve_func_identifier(node: &pgt_query_ext::protobuf::FuncCall) -> (Option, String) { +fn resolve_func_identifier(node: &pgt_query::protobuf::FuncCall) -> (Option, String) { match node.funcname.as_slice() { [name] => (None, get_string_from_node(name)), [schema, name] => ( diff --git a/crates/pgt_type_resolver/src/types.rs b/crates/pgt_type_resolver/src/types.rs index b5560114..85e1d8d2 100644 --- a/crates/pgt_type_resolver/src/types.rs +++ b/crates/pgt_type_resolver/src/types.rs @@ -5,9 +5,9 @@ pub(crate) enum PossibleType { AnyOf(Vec), } -pub fn resolve_type(node: &pgt_query_ext::NodeEnum, schema_cache: &SchemaCache) -> PossibleType { +pub fn resolve_type(node: &pgt_query::NodeEnum, schema_cache: &SchemaCache) -> PossibleType { match node { - pgt_query_ext::NodeEnum::AConst(n) => { + pgt_query::NodeEnum::AConst(n) => { if n.isnull { PossibleType::Null } else { @@ -16,7 +16,7 @@ pub fn resolve_type(node: &pgt_query_ext::NodeEnum, schema_cache: &SchemaCache) .as_ref() .expect("expected non-nullable AConst to have a value") { - pgt_query_ext::protobuf::a_const::Val::Ival(_) => { + pgt_query::protobuf::a_const::Val::Ival(_) => { let types: Vec = ["int2", "int4", "int8"] .iter() .map(|s| s.to_string()) @@ -33,7 +33,7 @@ pub fn resolve_type(node: &pgt_query_ext::NodeEnum, schema_cache: &SchemaCache) .collect(), ) } - pgt_query_ext::protobuf::a_const::Val::Fval(_) => { + pgt_query::protobuf::a_const::Val::Fval(_) => { let types: Vec = ["float4", "float8"].iter().map(|s| s.to_string()).collect(); @@ -46,7 +46,7 @@ pub fn resolve_type(node: &pgt_query_ext::NodeEnum, schema_cache: &SchemaCache) .collect(), ) } - pgt_query_ext::protobuf::a_const::Val::Boolval(_) => PossibleType::AnyOf( + pgt_query::protobuf::a_const::Val::Boolval(_) => PossibleType::AnyOf( schema_cache .types .iter() @@ -54,7 +54,7 @@ pub fn resolve_type(node: &pgt_query_ext::NodeEnum, schema_cache: &SchemaCache) .map(|t| t.id) .collect(), ), - pgt_query_ext::protobuf::a_const::Val::Sval(v) => { + pgt_query::protobuf::a_const::Val::Sval(v) => { let types: Vec = ["text", "varchar"].iter().map(|s| s.to_string()).collect(); @@ -70,7 +70,7 @@ pub fn resolve_type(node: &pgt_query_ext::NodeEnum, schema_cache: &SchemaCache) .collect(), ) } - pgt_query_ext::protobuf::a_const::Val::Bsval(_) => todo!(), + pgt_query::protobuf::a_const::Val::Bsval(_) => todo!(), } } } diff --git a/crates/pgt_type_resolver/src/util.rs b/crates/pgt_type_resolver/src/util.rs index f10cf5bb..d31d1fa8 100644 --- a/crates/pgt_type_resolver/src/util.rs +++ b/crates/pgt_type_resolver/src/util.rs @@ -1,6 +1,6 @@ -pub(crate) fn get_string_from_node(node: &pgt_query_ext::protobuf::Node) -> String { +pub(crate) fn get_string_from_node(node: &pgt_query::protobuf::Node) -> String { match &node.node { - Some(pgt_query_ext::NodeEnum::String(s)) => s.sval.to_string(), + Some(pgt_query::NodeEnum::String(s)) => s.sval.to_string(), _ => "".to_string(), } } diff --git a/crates/pgt_typecheck/Cargo.toml b/crates/pgt_typecheck/Cargo.toml index 175ecd59..f61f6a37 100644 --- a/crates/pgt_typecheck/Cargo.toml +++ b/crates/pgt_typecheck/Cargo.toml @@ -14,7 +14,7 @@ version = "0.0.0" [dependencies] pgt_console.workspace = true pgt_diagnostics.workspace = true -pgt_query_ext.workspace = true +pgt_query.workspace = true pgt_schema_cache.workspace = true pgt_text_size.workspace = true pgt_treesitter.workspace = true diff --git a/crates/pgt_typecheck/src/lib.rs b/crates/pgt_typecheck/src/lib.rs index e1dcd259..a3dde01d 100644 --- a/crates/pgt_typecheck/src/lib.rs +++ b/crates/pgt_typecheck/src/lib.rs @@ -14,7 +14,7 @@ pub use typed_identifier::{IdentifierType, TypedIdentifier}; pub struct TypecheckParams<'a> { pub conn: &'a PgPool, pub sql: &'a str, - pub ast: &'a pgt_query_ext::NodeEnum, + pub ast: &'a pgt_query::NodeEnum, pub tree: &'a tree_sitter::Tree, pub schema_cache: &'a pgt_schema_cache::SchemaCache, pub identifiers: Vec, @@ -39,11 +39,11 @@ pub async fn check_sql( // Check if the AST is not a supported statement type if !matches!( params.ast, - pgt_query_ext::NodeEnum::SelectStmt(_) - | pgt_query_ext::NodeEnum::InsertStmt(_) - | pgt_query_ext::NodeEnum::UpdateStmt(_) - | pgt_query_ext::NodeEnum::DeleteStmt(_) - | pgt_query_ext::NodeEnum::CommonTableExpr(_) + pgt_query::NodeEnum::SelectStmt(_) + | pgt_query::NodeEnum::InsertStmt(_) + | pgt_query::NodeEnum::UpdateStmt(_) + | pgt_query::NodeEnum::DeleteStmt(_) + | pgt_query::NodeEnum::CommonTableExpr(_) ) { return Ok(None); } diff --git a/crates/pgt_typecheck/tests/diagnostics.rs b/crates/pgt_typecheck/tests/diagnostics.rs index a7448503..f21d9ef9 100644 --- a/crates/pgt_typecheck/tests/diagnostics.rs +++ b/crates/pgt_typecheck/tests/diagnostics.rs @@ -23,7 +23,10 @@ async fn test(name: &str, query: &str, setup: Option<&str>, test_db: &PgPool) { .await .expect("Failed to load Schema Cache"); - let root = pgt_query_ext::parse(query).unwrap(); + let root = pgt_query::parse(query) + .unwrap() + .into_root() + .expect("Failed to parse query"); let tree = parser.parse(query, None).unwrap(); let conn = &test_db; diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index e78f4391..4acc0600 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -26,6 +26,7 @@ pgt_console = { workspace = true } pgt_diagnostics = { workspace = true } pgt_fs = { workspace = true, features = ["serde"] } pgt_lexer = { workspace = true } +pgt_query = { workspace = true } pgt_query_ext = { workspace = true } pgt_schema_cache = { workspace = true } pgt_statement_splitter = { workspace = true } diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index c9f880ec..c8cdc1f1 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -191,12 +191,7 @@ impl<'a> StatementMapper<'a> for DefaultMapper { pub struct ExecuteStatementMapper; impl<'a> StatementMapper<'a> for ExecuteStatementMapper { - type Output = ( - StatementId, - TextRange, - String, - Option, - ); + type Output = (StatementId, TextRange, String, Option); fn map(&self, parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { let ast_result = parser.ast_db.get_or_cache_ast(&id); @@ -214,7 +209,7 @@ impl<'a> StatementMapper<'a> for TypecheckDiagnosticsMapper { type Output = ( StatementId, TextRange, - Option, + Option, Arc, Option, ); diff --git a/crates/pgt_workspace/src/workspace/server/function_utils.rs b/crates/pgt_workspace/src/workspace/server/function_utils.rs index cf02ceb1..74e76ff2 100644 --- a/crates/pgt_workspace/src/workspace/server/function_utils.rs +++ b/crates/pgt_workspace/src/workspace/server/function_utils.rs @@ -1,6 +1,6 @@ /// Helper function to find a specific option value from function options pub fn find_option_value( - create_fn: &pgt_query_ext::protobuf::CreateFunctionStmt, + create_fn: &pgt_query::protobuf::CreateFunctionStmt, option_name: &str, ) -> Option { create_fn @@ -8,18 +8,18 @@ pub fn find_option_value( .iter() .filter_map(|opt_wrapper| opt_wrapper.node.as_ref()) .find_map(|opt| { - if let pgt_query_ext::NodeEnum::DefElem(def_elem) = opt { + if let pgt_query::NodeEnum::DefElem(def_elem) = opt { if def_elem.defname == option_name { def_elem .arg .iter() .filter_map(|arg_wrapper| arg_wrapper.node.as_ref()) .find_map(|arg| { - if let pgt_query_ext::NodeEnum::String(s) = arg { + if let pgt_query::NodeEnum::String(s) = arg { Some(s.sval.clone()) - } else if let pgt_query_ext::NodeEnum::List(l) = arg { + } else if let pgt_query::NodeEnum::List(l) = arg { l.items.iter().find_map(|item_wrapper| { - if let Some(pgt_query_ext::NodeEnum::String(s)) = + if let Some(pgt_query::NodeEnum::String(s)) = item_wrapper.node.as_ref() { Some(s.sval.clone()) @@ -40,11 +40,11 @@ pub fn find_option_value( }) } -pub fn parse_name(nodes: &[pgt_query_ext::protobuf::Node]) -> Option<(Option, String)> { +pub fn parse_name(nodes: &[pgt_query::protobuf::Node]) -> Option<(Option, String)> { let names = nodes .iter() .map(|n| match &n.node { - Some(pgt_query_ext::NodeEnum::String(s)) => Some(s.sval.clone()), + Some(pgt_query::NodeEnum::String(s)) => Some(s.sval.clone()), _ => None, }) .collect::>(); diff --git a/crates/pgt_workspace/src/workspace/server/pg_query.rs b/crates/pgt_workspace/src/workspace/server/pg_query.rs index ba471dfa..e90dd41b 100644 --- a/crates/pgt_workspace/src/workspace/server/pg_query.rs +++ b/crates/pgt_workspace/src/workspace/server/pg_query.rs @@ -11,7 +11,7 @@ use super::statement_identifier::StatementId; const DEFAULT_CACHE_SIZE: usize = 1000; pub struct PgQueryStore { - ast_db: Mutex>>>, + ast_db: Mutex>>>, plpgsql_db: Mutex>>, } @@ -30,14 +30,22 @@ impl PgQueryStore { pub fn get_or_cache_ast( &self, statement: &StatementId, - ) -> Arc> { + ) -> Arc> { let mut cache = self.ast_db.lock().unwrap(); if let Some(existing) = cache.get(statement) { return existing.clone(); } - let r = Arc::new(pgt_query_ext::parse(statement.content()).map_err(SyntaxDiagnostic::from)); + let r = Arc::new( + pgt_query::parse(statement.content()) + .map_err(SyntaxDiagnostic::from) + .and_then(|ast| { + ast.into_root().ok_or_else(|| { + SyntaxDiagnostic::new("No root node found in parse result", None) + }) + }), + ); cache.put(statement.clone(), r.clone()); r } @@ -49,7 +57,7 @@ impl PgQueryStore { let ast = self.get_or_cache_ast(statement); let create_fn = match ast.as_ref() { - Ok(pgt_query_ext::NodeEnum::CreateFunctionStmt(node)) => node, + Ok(pgt_query::NodeEnum::CreateFunctionStmt(node)) => node, _ => return None, }; @@ -72,7 +80,7 @@ impl PgQueryStore { let range = TextRange::new(start.try_into().unwrap(), end.try_into().unwrap()); - let r = pgt_query_ext::parse_plpgsql(statement.content()) + let r = pgt_query::parse_plpgsql(statement.content()) .map_err(|err| SyntaxDiagnostic::new(err.to_string(), Some(range))); cache.put(statement.clone(), r.clone()); diff --git a/crates/pgt_workspace/src/workspace/server/sql_function.rs b/crates/pgt_workspace/src/workspace/server/sql_function.rs index 6161dda7..0b230edc 100644 --- a/crates/pgt_workspace/src/workspace/server/sql_function.rs +++ b/crates/pgt_workspace/src/workspace/server/sql_function.rs @@ -30,9 +30,9 @@ pub struct SQLFunctionBody { } /// Extracts the function signature from a SQL function definition -pub fn get_sql_fn_signature(ast: &pgt_query_ext::NodeEnum) -> Option { +pub fn get_sql_fn_signature(ast: &pgt_query::NodeEnum) -> Option { let create_fn = match ast { - pgt_query_ext::NodeEnum::CreateFunctionStmt(cf) => cf, + pgt_query::NodeEnum::CreateFunctionStmt(cf) => cf, _ => return None, }; @@ -49,7 +49,7 @@ pub fn get_sql_fn_signature(ast: &pgt_query_ext::NodeEnum) -> Option Option Option { +pub fn get_sql_fn_body(ast: &pgt_query::NodeEnum, content: &str) -> Option { let create_fn = match ast { - pgt_query_ext::NodeEnum::CreateFunctionStmt(cf) => cf, + pgt_query::NodeEnum::CreateFunctionStmt(cf) => cf, _ => return None, }; @@ -120,7 +120,7 @@ mod tests { IMMUTABLE RETURNS NULL ON NULL INPUT;"; - let ast = pgt_query_ext::parse(input).unwrap(); + let ast = pgt_query::parse(input).unwrap().into_root().unwrap(); let sig = get_sql_fn_signature(&ast); @@ -146,7 +146,7 @@ mod tests { IMMUTABLE RETURNS NULL ON NULL INPUT;"; - let ast = pgt_query_ext::parse(input).unwrap(); + let ast = pgt_query::parse(input).unwrap().into_root().unwrap(); let sig = get_sql_fn_signature(&ast); diff --git a/docs/codegen/Cargo.toml b/docs/codegen/Cargo.toml index 96092a7a..bf650ac9 100644 --- a/docs/codegen/Cargo.toml +++ b/docs/codegen/Cargo.toml @@ -26,6 +26,7 @@ pgt_cli = { workspace = true } pgt_analyse = { workspace = true } pgt_analyser = { workspace = true } pgt_diagnostics = { workspace = true } +pgt_query = { workspace = true } pgt_query_ext = { workspace = true } pgt_workspace = { workspace = true } pgt_statement_splitter = { workspace = true } diff --git a/docs/codegen/src/rules_docs.rs b/docs/codegen/src/rules_docs.rs index 1d4b86a9..67626237 100644 --- a/docs/codegen/src/rules_docs.rs +++ b/docs/codegen/src/rules_docs.rs @@ -444,28 +444,30 @@ fn print_diagnostics( // split and parse each statement let stmts = pgt_statement_splitter::split(code); for stmt_range in stmts.ranges { - match pgt_query_ext::parse(&code[stmt_range]) { + match pgt_query::parse(&code[stmt_range]) { Ok(ast) => { - for rule_diag in analyser.run(pgt_analyser::AnalyserParams { - schema_cache: None, - stmts: vec![AnalysableStatement { - range: stmt_range, - root: ast, - }], - }) { - let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); - - let category = diag.category().expect("linter diagnostic has no code"); - let severity = settings.get_severity_from_rule_code(category).expect( + if let Some(root) = ast.into_root() { + for rule_diag in analyser.run(pgt_analyser::AnalyserParams { + schema_cache: None, + stmts: vec![AnalysableStatement { + range: stmt_range, + root, + }], + }) { + let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); + + let category = diag.category().expect("linter diagnostic has no code"); + let severity = settings.get_severity_from_rule_code(category).expect( "If you see this error, it means you need to run cargo codegen-configuration", ); - let error = diag - .with_severity(severity) - .with_file_path(&file_path) - .with_file_source_code(code); + let error = diag + .with_severity(severity) + .with_file_path(&file_path) + .with_file_source_code(code); - write_diagnostic(code, error)?; + write_diagnostic(code, error)?; + } } } Err(e) => { diff --git a/libpg_query b/libpg_query deleted file mode 160000 index 1c1a32ed..00000000 --- a/libpg_query +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1c1a32ed2f4c7799830d50bf4cb159222aafec48 diff --git a/xtask/rules_check/Cargo.toml b/xtask/rules_check/Cargo.toml index 3f0198d1..abd02a05 100644 --- a/xtask/rules_check/Cargo.toml +++ b/xtask/rules_check/Cargo.toml @@ -11,6 +11,7 @@ pgt_analyse = { workspace = true } pgt_analyser = { workspace = true } pgt_console = { workspace = true } pgt_diagnostics = { workspace = true } +pgt_query = { workspace = true } pgt_query_ext = { workspace = true } pgt_statement_splitter = { workspace = true } pgt_workspace = { workspace = true } diff --git a/xtask/rules_check/src/lib.rs b/xtask/rules_check/src/lib.rs index 0c57d06f..dfdd24ba 100644 --- a/xtask/rules_check/src/lib.rs +++ b/xtask/rules_check/src/lib.rs @@ -128,28 +128,30 @@ fn assert_lint( let result = pgt_statement_splitter::split(code); for stmt_range in result.ranges { - match pgt_query_ext::parse(&code[stmt_range]) { + match pgt_query::parse(&code[stmt_range]) { Ok(ast) => { - for rule_diag in analyser.run(pgt_analyser::AnalyserParams { - schema_cache: None, - stmts: vec![AnalysableStatement { - range: stmt_range, - root: ast, - }], - }) { - let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); - - let category = diag.category().expect("linter diagnostic has no code"); - let severity = settings.get_severity_from_rule_code(category).expect( + if let Some(root) = ast.into_root() { + for rule_diag in analyser.run(pgt_analyser::AnalyserParams { + schema_cache: None, + stmts: vec![AnalysableStatement { + range: stmt_range, + root, + }], + }) { + let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); + + let category = diag.category().expect("linter diagnostic has no code"); + let severity = settings.get_severity_from_rule_code(category).expect( "If you see this error, it means you need to run cargo codegen-configuration", ); - let error = diag - .with_severity(severity) - .with_file_path(&file_path) - .with_file_source_code(code); + let error = diag + .with_severity(severity) + .with_file_path(&file_path) + .with_file_source_code(code); - write_diagnostic(code, error)?; + write_diagnostic(code, error)?; + } } } Err(e) => { From b651c75726b58ae1dd3a52571ff76ac6ac907f70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Wed, 23 Jul 2025 10:38:39 +0200 Subject: [PATCH 111/114] chore: add extensions to schema cache (#468) --- ...a23f1440c250590de976c5c46c5edf6746faf.json | 44 +++++++++++++++++++ crates/pgt_schema_cache/src/extensions.rs | 22 ++++++++++ crates/pgt_schema_cache/src/lib.rs | 2 + .../src/queries/extensions.sql | 10 +++++ crates/pgt_schema_cache/src/schema_cache.rs | 8 ---- 5 files changed, 78 insertions(+), 8 deletions(-) create mode 100644 .sqlx/query-3ebf3d74eb9d0448d675882c7f8a23f1440c250590de976c5c46c5edf6746faf.json create mode 100644 crates/pgt_schema_cache/src/extensions.rs create mode 100644 crates/pgt_schema_cache/src/queries/extensions.sql diff --git a/.sqlx/query-3ebf3d74eb9d0448d675882c7f8a23f1440c250590de976c5c46c5edf6746faf.json b/.sqlx/query-3ebf3d74eb9d0448d675882c7f8a23f1440c250590de976c5c46c5edf6746faf.json new file mode 100644 index 00000000..1b922062 --- /dev/null +++ b/.sqlx/query-3ebf3d74eb9d0448d675882c7f8a23f1440c250590de976c5c46c5edf6746faf.json @@ -0,0 +1,44 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n e.name as \"name!\",\n n.nspname AS schema,\n e.default_version as \"default_version!\",\n x.extversion AS installed_version,\n e.comment\nFROM\n pg_available_extensions() e(name, default_version, comment)\n LEFT JOIN pg_extension x ON e.name = x.extname\n LEFT JOIN pg_namespace n ON x.extnamespace = n.oid\n", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name!", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "schema", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "default_version!", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "installed_version", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "comment", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null, + true, + null, + true, + null + ] + }, + "hash": "3ebf3d74eb9d0448d675882c7f8a23f1440c250590de976c5c46c5edf6746faf" +} diff --git a/crates/pgt_schema_cache/src/extensions.rs b/crates/pgt_schema_cache/src/extensions.rs new file mode 100644 index 00000000..8494397c --- /dev/null +++ b/crates/pgt_schema_cache/src/extensions.rs @@ -0,0 +1,22 @@ +use sqlx::PgPool; + +use crate::schema_cache::SchemaCacheItem; + +#[derive(Debug, Default)] +pub struct Extension { + pub name: String, + pub schema: Option, + pub default_version: String, + pub installed_version: Option, + pub comment: Option, +} + +impl SchemaCacheItem for Extension { + type Item = Extension; + + async fn load(pool: &PgPool) -> Result, sqlx::Error> { + sqlx::query_file_as!(Extension, "src/queries/extensions.sql") + .fetch_all(pool) + .await + } +} diff --git a/crates/pgt_schema_cache/src/lib.rs b/crates/pgt_schema_cache/src/lib.rs index b67f9412..6440cd01 100644 --- a/crates/pgt_schema_cache/src/lib.rs +++ b/crates/pgt_schema_cache/src/lib.rs @@ -3,6 +3,7 @@ #![allow(dead_code)] mod columns; +mod extensions; mod functions; mod policies; mod roles; @@ -14,6 +15,7 @@ mod types; mod versions; pub use columns::*; +pub use extensions::Extension; pub use functions::{Behavior, Function, FunctionArg, FunctionArgs, ProcKind}; pub use policies::{Policy, PolicyCommand}; pub use roles::*; diff --git a/crates/pgt_schema_cache/src/queries/extensions.sql b/crates/pgt_schema_cache/src/queries/extensions.sql new file mode 100644 index 00000000..aedc71b2 --- /dev/null +++ b/crates/pgt_schema_cache/src/queries/extensions.sql @@ -0,0 +1,10 @@ +SELECT + e.name as "name!", + n.nspname AS schema, + e.default_version as "default_version!", + x.extversion AS installed_version, + e.comment +FROM + pg_available_extensions() e(name, default_version, comment) + LEFT JOIN pg_extension x ON e.name = x.extname + LEFT JOIN pg_namespace n ON x.extnamespace = n.oid diff --git a/crates/pgt_schema_cache/src/schema_cache.rs b/crates/pgt_schema_cache/src/schema_cache.rs index 8fb9683b..df7239ea 100644 --- a/crates/pgt_schema_cache/src/schema_cache.rs +++ b/crates/pgt_schema_cache/src/schema_cache.rs @@ -49,14 +49,6 @@ impl SchemaCache { }) } - /// Applies an AST node to the repository - /// - /// For example, alter table add column will add the column to the table if it does not exist - /// yet - pub fn mutate(&mut self) { - unimplemented!(); - } - pub fn find_table(&self, name: &str, schema: Option<&str>) -> Option<&Table> { self.tables .iter() From bb686b96e6ea78eebd576d6a992935925820f990 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Fri, 1 Aug 2025 08:29:16 +0200 Subject: [PATCH 112/114] feat: plpgsql check (#469) adds support for plpgsql_check. the approach is even simpler as described in the issue: if we encounter a create function statement, we start a transaction, run the statement, then run plpgsql_check on it, and then rollback the transaction. the remaining code is just about translating the location information we get from the extension into a good range. todo: - [x] integrate into workspace api - [x] to get span, move from the first word / occurrence in the line to the next semicolon - [x] handle return trigger by checking for all variations - [x] make sure we do not report create fn syntax errors if plpgsql_check is enabled - [x] check why "not a known variable" is not getting the right span closes #190 --- .github/workflows/pull_request.yml | 59 +- ...ae6f3075c4f754cd379b0555c205fff95a95c.json | 50 ++ ...117910b19f540f19393b76aa6434e9d1d8502.json | 8 +- ...75ba8951faa1be2ea6b2bf6714b1aa9127a6f.json | 44 - Cargo.lock | 19 + Cargo.toml | 1 + Dockerfile | 16 + .../src/categories.rs | 1 + crates/pgt_plpgsql_check/Cargo.toml | 30 + crates/pgt_plpgsql_check/src/diagnostics.rs | 245 ++++++ crates/pgt_plpgsql_check/src/lib.rs | 794 ++++++++++++++++++ crates/pgt_query_ext/src/lib.rs | 1 + .../src/utils.rs} | 43 + .../pgt_schema_cache/src/queries/triggers.sql | 33 +- crates/pgt_schema_cache/src/schema_cache.rs | 20 +- crates/pgt_schema_cache/src/triggers.rs | 39 +- crates/pgt_typecheck/src/lib.rs | 14 - crates/pgt_workspace/Cargo.toml | 1 + crates/pgt_workspace/src/workspace/server.rs | 65 +- .../src/workspace/server.tests.rs | 73 +- .../src/workspace/server/pg_query.rs | 5 +- .../src/workspace/server/sql_function.rs | 12 +- docker-compose.yml | 2 +- test.sql | 8 + 24 files changed, 1446 insertions(+), 137 deletions(-) create mode 100644 .sqlx/query-277e47bf46f8331549f55c8a0ebae6f3075c4f754cd379b0555c205fff95a95c.json delete mode 100644 .sqlx/query-df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f.json create mode 100644 Dockerfile create mode 100644 crates/pgt_plpgsql_check/Cargo.toml create mode 100644 crates/pgt_plpgsql_check/src/diagnostics.rs create mode 100644 crates/pgt_plpgsql_check/src/lib.rs rename crates/{pgt_workspace/src/workspace/server/function_utils.rs => pgt_query_ext/src/utils.rs} (63%) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 8aa24265..20218378 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -79,15 +79,6 @@ jobs: lint: name: Lint Project runs-on: ubuntu-latest - services: - postgres: - image: postgres:latest - env: - POSTGRES_USER: postgres - POSTGRES_PASSWORD: postgres - POSTGRES_DB: postgres - ports: - - 5432:5432 steps: - name: Checkout PR Branch uses: actions/checkout@v4 @@ -103,6 +94,24 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # we need to use the same database as we do locally for sqlx prepare to output the same hashes + - name: Build and start PostgreSQL with plpgsql_check + run: | + docker build -t postgres-plpgsql-check:latest . + docker run -d --name postgres \ + -e POSTGRES_USER=postgres \ + -e POSTGRES_PASSWORD=postgres \ + -e POSTGRES_DB=postgres \ + -p 5432:5432 \ + postgres-plpgsql-check:latest + # Wait for postgres to be ready + for _ in {1..30}; do + if docker exec postgres pg_isready -U postgres; then + break + fi + sleep 1 + done + - name: Setup sqlx-cli run: cargo install sqlx-cli @@ -154,13 +163,37 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # running containers via `services` only works on linux - # https://github.com/actions/runner/issues/1866 - - name: Setup postgres + # For Linux, use custom Docker image with plpgsql_check + - name: Build and start PostgreSQL with plpgsql_check + if: runner.os == 'Linux' + run: | + docker build -t postgres-plpgsql-check:latest . + docker run -d --name postgres \ + -e POSTGRES_USER=postgres \ + -e POSTGRES_PASSWORD=postgres \ + -e POSTGRES_DB=postgres \ + -p 5432:5432 \ + postgres-plpgsql-check:latest + # Wait for postgres to be ready + for _ in {1..30}; do + if docker exec postgres pg_isready -U postgres; then + break + fi + sleep 1 + done + # For Windows, use the action since PostgreSQL Docker image doesn't support Windows containers + - name: Setup postgres (Windows) + if: runner.os == 'Windows' id: postgres uses: ikalnytskyi/action-setup-postgres@v7 - name: Print Roles - run: psql ${{ steps.postgres.outputs.connection-uri }} -c "select rolname from pg_roles;" + run: | + if [[ "$RUNNER_OS" == "Linux" ]]; then + docker exec postgres psql -U postgres -c "select rolname from pg_roles;" + else + psql ${{ steps.postgres.outputs.connection-uri }} -c "select rolname from pg_roles;" + fi + shell: bash - name: Run tests run: cargo test --workspace diff --git a/.sqlx/query-277e47bf46f8331549f55c8a0ebae6f3075c4f754cd379b0555c205fff95a95c.json b/.sqlx/query-277e47bf46f8331549f55c8a0ebae6f3075c4f754cd379b0555c205fff95a95c.json new file mode 100644 index 00000000..db3f4a73 --- /dev/null +++ b/.sqlx/query-277e47bf46f8331549f55c8a0ebae6f3075c4f754cd379b0555c205fff95a95c.json @@ -0,0 +1,50 @@ +{ + "db_name": "PostgreSQL", + "query": "-- we need to join tables from the pg_catalog since \"TRUNCATE\" triggers are\n-- not available in the information_schema.trigger table.\nselect\n t.tgname as \"name!\",\n c.relname as \"table_name!\",\n p.proname as \"proc_name!\",\n proc_ns.nspname as \"proc_schema!\",\n table_ns.nspname as \"table_schema!\",\n t.tgtype as \"details_bitmask!\"\nfrom\n pg_catalog.pg_trigger t\nleft join pg_catalog.pg_proc p on t.tgfoid = p.oid\nleft join pg_catalog.pg_class c on t.tgrelid = c.oid\nleft join pg_catalog.pg_namespace table_ns on c.relnamespace = table_ns.oid\nleft join pg_catalog.pg_namespace proc_ns on p.pronamespace = proc_ns.oid\nwhere\n t.tgisinternal = false and\n t.tgconstraint = 0;\n", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name!", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "table_name!", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "proc_name!", + "type_info": "Name" + }, + { + "ordinal": 3, + "name": "proc_schema!", + "type_info": "Name" + }, + { + "ordinal": 4, + "name": "table_schema!", + "type_info": "Name" + }, + { + "ordinal": 5, + "name": "details_bitmask!", + "type_info": "Int2" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true, + true, + true, + true, + false + ] + }, + "hash": "277e47bf46f8331549f55c8a0ebae6f3075c4f754cd379b0555c205fff95a95c" +} diff --git a/.sqlx/query-4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502.json b/.sqlx/query-4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502.json index 4980f4f3..400f031d 100644 --- a/.sqlx/query-4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502.json +++ b/.sqlx/query-4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502.json @@ -90,9 +90,9 @@ "nullable": [ null, true, + false, true, - true, - true, + false, null, null, null, @@ -101,9 +101,9 @@ null, null, null, - true, + false, null, - true + false ] }, "hash": "4ea19fee016f1daeafdc466647d117910b19f540f19393b76aa6434e9d1d8502" diff --git a/.sqlx/query-df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f.json b/.sqlx/query-df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f.json deleted file mode 100644 index b6fd2fc8..00000000 --- a/.sqlx/query-df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "-- we need to join tables from the pg_catalog since \"TRUNCATE\" triggers are \n-- not available in the information_schema.trigger table.\nselect \n t.tgname as \"name!\",\n c.relname as \"table_name!\",\n p.proname as \"proc_name!\",\n n.nspname as \"schema_name!\",\n t.tgtype as \"details_bitmask!\"\nfrom \n pg_catalog.pg_trigger t \n left join pg_catalog.pg_proc p on t.tgfoid = p.oid\n left join pg_catalog.pg_class c on t.tgrelid = c.oid\n left join pg_catalog.pg_namespace n on c.relnamespace = n.oid\nwhere \n -- triggers enforcing constraints (e.g. unique fields) should not be included.\n t.tgisinternal = false and \n t.tgconstraint = 0;\n", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "name!", - "type_info": "Name" - }, - { - "ordinal": 1, - "name": "table_name!", - "type_info": "Name" - }, - { - "ordinal": 2, - "name": "proc_name!", - "type_info": "Name" - }, - { - "ordinal": 3, - "name": "schema_name!", - "type_info": "Name" - }, - { - "ordinal": 4, - "name": "details_bitmask!", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - true, - true, - true, - false - ] - }, - "hash": "df57cc22f7d63847abce1d0d15675ba8951faa1be2ea6b2bf6714b1aa9127a6f" -} diff --git a/Cargo.lock b/Cargo.lock index d76baca3..49143908 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2976,6 +2976,24 @@ dependencies = [ "quote", ] +[[package]] +name = "pgt_plpgsql_check" +version = "0.0.0" +dependencies = [ + "pgt_console", + "pgt_diagnostics", + "pgt_query", + "pgt_query_ext", + "pgt_schema_cache", + "pgt_test_utils", + "pgt_text_size", + "regex", + "serde", + "serde_json", + "sqlx", + "tree-sitter", +] + [[package]] name = "pgt_query" version = "0.0.0" @@ -3163,6 +3181,7 @@ dependencies = [ "pgt_diagnostics", "pgt_fs", "pgt_lexer", + "pgt_plpgsql_check", "pgt_query", "pgt_query_ext", "pgt_schema_cache", diff --git a/Cargo.toml b/Cargo.toml index e243ab3e..d68aafe0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,6 +76,7 @@ pgt_lexer = { path = "./crates/pgt_lexer", version = "0.0.0" } pgt_lexer_codegen = { path = "./crates/pgt_lexer_codegen", version = "0.0.0" } pgt_lsp = { path = "./crates/pgt_lsp", version = "0.0.0" } pgt_markup = { path = "./crates/pgt_markup", version = "0.0.0" } +pgt_plpgsql_check = { path = "./crates/pgt_plpgsql_check", version = "0.0.0" } pgt_query = { path = "./crates/pgt_query", version = "0.0.0" } pgt_query_ext = { path = "./crates/pgt_query_ext", version = "0.0.0" } pgt_query_macros = { path = "./crates/pgt_query_macros", version = "0.0.0" } diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..10353bb2 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,16 @@ +FROM postgres:15 + +# Install build dependencies +RUN apt-get update && \ + apt-get install -y postgresql-server-dev-15 gcc make git && \ + cd /tmp && \ + git clone https://github.com/okbob/plpgsql_check.git && \ + cd plpgsql_check && \ + make && \ + make install && \ + apt-get remove -y postgresql-server-dev-15 gcc make git && \ + apt-get autoremove -y && \ + rm -rf /tmp/plpgsql_check /var/lib/apt/lists/* + +# Add initialization script directly +RUN echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" > /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file diff --git a/crates/pgt_diagnostics_categories/src/categories.rs b/crates/pgt_diagnostics_categories/src/categories.rs index b9d29698..14df90b9 100644 --- a/crates/pgt_diagnostics_categories/src/categories.rs +++ b/crates/pgt_diagnostics_categories/src/categories.rs @@ -32,6 +32,7 @@ define_categories! { "flags/invalid", "project", "typecheck", + "plpgsql_check", "internalError/panic", "syntax", "dummy", diff --git a/crates/pgt_plpgsql_check/Cargo.toml b/crates/pgt_plpgsql_check/Cargo.toml new file mode 100644 index 00000000..75d1a52b --- /dev/null +++ b/crates/pgt_plpgsql_check/Cargo.toml @@ -0,0 +1,30 @@ +[package] +authors.workspace = true +categories.workspace = true +description = "" +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +name = "pgt_plpgsql_check" +repository.workspace = true +version = "0.0.0" + + +[dependencies] +pgt_console = { workspace = true } +pgt_diagnostics = { workspace = true } +pgt_query = { workspace = true } +pgt_query_ext = { workspace = true } +pgt_schema_cache = { workspace = true } +pgt_text_size = { workspace = true } +regex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +sqlx = { workspace = true } +tree-sitter = { workspace = true } + +[dev-dependencies] +pgt_test_utils = { workspace = true } + +[lib] diff --git a/crates/pgt_plpgsql_check/src/diagnostics.rs b/crates/pgt_plpgsql_check/src/diagnostics.rs new file mode 100644 index 00000000..a0daec13 --- /dev/null +++ b/crates/pgt_plpgsql_check/src/diagnostics.rs @@ -0,0 +1,245 @@ +use std::io; + +use pgt_console::markup; +use pgt_diagnostics::{Advices, Diagnostic, LogCategory, MessageAndDescription, Severity, Visit}; +use pgt_text_size::TextRange; + +use crate::{PlpgSqlCheckIssue, PlpgSqlCheckResult}; + +/// Find the first occurrence of target text that is not within string literals +fn find_text_outside_strings(text: &str, target: &str) -> Option { + let text_lower = text.to_lowercase(); + let target_lower = target.to_lowercase(); + let mut in_string = false; + let mut quote_char = '\0'; + let bytes = text_lower.as_bytes(); + let mut i = 0; + + while i < bytes.len() { + let ch = bytes[i] as char; + + if !in_string { + // Check if we're starting a string literal + if ch == '\'' || ch == '"' { + in_string = true; + quote_char = ch; + } else { + // Check if we found our target at this position + if text_lower[i..].starts_with(&target_lower) { + // Check if this is a complete word (not part of another identifier) + let is_word_start = + i == 0 || !bytes[i - 1].is_ascii_alphanumeric() && bytes[i - 1] != b'_'; + let target_end = i + target_lower.len(); + let is_word_end = target_end >= bytes.len() + || (!bytes[target_end].is_ascii_alphanumeric() + && bytes[target_end] != b'_'); + + if is_word_start && is_word_end { + return Some(i); + } + } + } + } else { + // We're inside a string literal + if ch == quote_char { + // Check if it's escaped (look for double quotes/apostrophes) + if i + 1 < bytes.len() && bytes[i + 1] as char == quote_char { + // Skip the escaped quote + i += 1; + } else { + // End of string literal + in_string = false; + quote_char = '\0'; + } + } + } + + i += 1; + } + + None +} + +/// A specialized diagnostic for plpgsql_check. +#[derive(Clone, Debug, Diagnostic)] +#[diagnostic(category = "plpgsql_check")] +pub struct PlPgSqlCheckDiagnostic { + #[location(span)] + pub span: Option, + #[description] + #[message] + pub message: MessageAndDescription, + #[advice] + pub advices: PlPgSqlCheckAdvices, + #[severity] + pub severity: Severity, +} + +#[derive(Debug, Clone)] +pub struct PlPgSqlCheckAdvices { + pub code: Option, + /// the relation (table or view) where the issue was found, if applicable + /// only applicable for trigger functions + pub relation: Option, +} + +impl Advices for PlPgSqlCheckAdvices { + fn record(&self, visitor: &mut dyn Visit) -> io::Result<()> { + // Show the error code if available + if let Some(code) = &self.code { + visitor.record_log( + LogCategory::Error, + &markup! { "SQL State: " {code} }, + )?; + } + + // Show relation information if available + if let Some(relation) = &self.relation { + visitor.record_log( + LogCategory::Info, + &markup! { "Relation: " {relation} }, + )?; + } + + Ok(()) + } +} + +/// Convert plpgsql_check results into diagnostics with optional relation info for triggers +pub fn create_diagnostics_from_check_result( + result: &PlpgSqlCheckResult, + fn_body: &str, + offset: usize, + relation: Option, +) -> Vec { + result + .issues + .iter() + .map(|issue| { + let severity = match issue.level.as_str() { + "error" => Severity::Error, + "warning" => Severity::Warning, + "notice" => Severity::Hint, + _ => Severity::Information, + }; + + PlPgSqlCheckDiagnostic { + message: issue.message.clone().into(), + severity, + span: resolve_span(issue, fn_body, offset), + advices: PlPgSqlCheckAdvices { + code: issue.sql_state.clone(), + relation: relation.clone(), + }, + } + }) + .collect() +} + +fn resolve_span(issue: &PlpgSqlCheckIssue, fn_body: &str, offset: usize) -> Option { + let stmt = match issue.statement.as_ref() { + Some(s) => s, + None => { + return Some(TextRange::new( + (offset as u32).into(), + ((offset + fn_body.len()) as u32).into(), + )); + } + }; + + let line_number = stmt + .line_number + .parse::() + .expect("Expected line number to be a valid usize"); + + let text = &stmt.text; + + // calculate the offset to the target line + let line_offset: usize = fn_body + .lines() + .take(line_number - 1) + .map(|line| line.len() + 1) // +1 for newline + .sum(); + + // find the position within the target line + let line = fn_body.lines().nth(line_number - 1)?; + let start = line + .to_lowercase() + .find(&text.to_lowercase()) + .unwrap_or_else(|| { + line.char_indices() + .find_map(|(i, c)| if !c.is_whitespace() { Some(i) } else { None }) + .unwrap_or(0) + }); + + let stmt_offset = line_offset + start; + + if let Some(q) = &issue.query { + // first find the query within the fn body *after* stmt_offset, ignoring string literals + let query_start = find_text_outside_strings(&fn_body[stmt_offset..], &q.text) + .map(|pos| pos + stmt_offset); + + // the position is *within* the query text + let pos = q + .position + .parse::() + .expect("Expected query position to be a valid usize") + - 1; // -1 because the position is 1-based + + let start = query_start? + pos; + + // the range of the diagnostics is the token that `pos` is on + // Find the end of the current token by looking for whitespace or SQL delimiters + let remaining = &fn_body[start..]; + let end = remaining + .char_indices() + .find(|(_, c)| { + c.is_whitespace() || matches!(c, ',' | ';' | ')' | '(' | '=' | '<' | '>') + }) + .map(|(i, _c)| { + i // just the token end, don't include delimiters + }) + .unwrap_or(remaining.len()); + + return Some(TextRange::new( + ((offset + start) as u32).into(), + ((offset + start + end) as u32).into(), + )); + } + + // if no query is present, the end range covers + // - if text is "IF" or "ELSIF", then until the next "THEN" + // - TODO: check "LOOP", "CASE", "WHILE", "EXPECTION" and others + // - else: until the next semicolon or end of line + + if text.to_uppercase() == "IF" || text.to_uppercase() == "ELSIF" { + // Find the position of the next "THEN" after the statement + let remaining = &fn_body[stmt_offset..]; + if let Some(then_pos) = remaining.to_uppercase().find("THEN") { + let end = then_pos + "THEN".len(); + return Some(TextRange::new( + ((offset + stmt_offset) as u32).into(), + ((offset + stmt_offset + end) as u32).into(), + )); + } + } + + // if no specific end is found, use the next semicolon or the end of the line + let remaining = &fn_body[stmt_offset..]; + let end = remaining + .char_indices() + .find(|(_, c)| matches!(c, ';' | '\n' | '\r')) + .map(|(i, c)| { + if c == ';' { + i + 1 // include the semicolon + } else { + i // just the end of the line + } + }) + .unwrap_or(remaining.len()); + + Some(TextRange::new( + ((offset + stmt_offset) as u32).into(), + ((offset + stmt_offset + end) as u32).into(), + )) +} diff --git a/crates/pgt_plpgsql_check/src/lib.rs b/crates/pgt_plpgsql_check/src/lib.rs new file mode 100644 index 00000000..05e2f570 --- /dev/null +++ b/crates/pgt_plpgsql_check/src/lib.rs @@ -0,0 +1,794 @@ +mod diagnostics; + +pub use diagnostics::PlPgSqlCheckDiagnostic; +use diagnostics::create_diagnostics_from_check_result; +use pgt_query::protobuf::CreateFunctionStmt; +use regex::Regex; +use serde::Deserialize; +pub use sqlx::postgres::PgSeverity; +use sqlx::{Acquire, PgPool, Postgres, Transaction}; + +#[derive(Debug)] +pub struct PlPgSqlCheckParams<'a> { + pub conn: &'a PgPool, + pub sql: &'a str, + pub ast: &'a pgt_query::NodeEnum, + pub schema_cache: &'a pgt_schema_cache::SchemaCache, +} + +#[derive(Debug, Deserialize)] +pub struct PlpgSqlCheckResult { + pub function: String, + pub issues: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct PlpgSqlCheckIssue { + pub level: String, + pub message: String, + pub statement: Option, + pub query: Option, + #[serde(rename = "sqlState")] + pub sql_state: Option, +} + +#[derive(Debug, Deserialize)] +pub struct Statement { + #[serde(rename = "lineNumber")] + pub line_number: String, + pub text: String, +} + +#[derive(Debug, Deserialize)] +pub struct Query { + pub position: String, + pub text: String, +} + +/// check if the given node is a plpgsql function that should be checked +fn should_check_function<'a>( + ast: &'a pgt_query::NodeEnum, + schema_cache: &pgt_schema_cache::SchemaCache, +) -> Option<&'a CreateFunctionStmt> { + let create_fn = match ast { + pgt_query::NodeEnum::CreateFunctionStmt(stmt) => stmt, + _ => return None, + }; + + if pgt_query_ext::utils::find_option_value(create_fn, "language") != Some("plpgsql".to_string()) + { + return None; + } + + if !schema_cache + .extensions + .iter() + .any(|e| e.name == "plpgsql_check") + { + return None; + } + + Some(create_fn) +} + +/// check if a function is a trigger function +fn is_trigger_function(create_fn: &CreateFunctionStmt) -> bool { + create_fn + .return_type + .as_ref() + .map(|n| { + matches!( + pgt_query_ext::utils::parse_name(&n.names), + Some((None, name)) if name == "trigger" + ) + }) + .unwrap_or(false) +} + +/// build the function identifier string used by plpgsql_check +fn build_function_identifier( + create_fn: &CreateFunctionStmt, + fn_schema: &Option, + fn_name: &str, +) -> String { + let args = create_fn + .parameters + .iter() + .filter_map(|arg| { + let node = match &arg.node { + Some(pgt_query::NodeEnum::FunctionParameter(n)) => n, + _ => return None, + }; + let type_name_node = node.arg_type.as_ref()?; + let type_name = match pgt_query_ext::utils::parse_name(&type_name_node.names) { + Some((schema, name)) => match schema { + Some(s) => format!("{}.{}", s, name), + None => name, + }, + None => return None, + }; + + if !type_name_node.array_bounds.is_empty() { + Some(format!("{}[]", type_name)) + } else { + Some(type_name) + } + }) + .collect::>(); + + let fn_qualified_name = match fn_schema { + Some(schema) => format!("{}.{}", schema, fn_name), + None => fn_name.to_string(), + }; + + if args.is_empty() { + fn_qualified_name + } else { + format!("{}({})", fn_qualified_name, args.join(", ")) + } +} + +pub async fn check_plpgsql( + params: PlPgSqlCheckParams<'_>, +) -> Result, sqlx::Error> { + let create_fn = match should_check_function(params.ast, params.schema_cache) { + Some(stmt) => stmt, + None => return Ok(vec![]), + }; + + let (fn_schema, fn_name) = match pgt_query_ext::utils::parse_name(&create_fn.funcname) { + Some(n) => n, + None => return Ok(vec![]), + }; + + let fn_identifier = build_function_identifier(create_fn, &fn_schema, &fn_name); + + let fn_body = pgt_query_ext::utils::find_option_value(create_fn, "as") + .ok_or_else(|| sqlx::Error::Protocol("Failed to find function body".to_string()))?; + let offset = params + .sql + .find(&fn_body) + .ok_or_else(|| sqlx::Error::Protocol("Failed to find function body in SQL".to_string()))?; + let is_trigger = is_trigger_function(create_fn); + + let mut conn = params.conn.acquire().await?; + conn.close_on_drop(); + + let mut tx: Transaction<'_, Postgres> = conn.begin().await?; + + // disable function body checking to rely on plpgsql_check + sqlx::query("SET LOCAL check_function_bodies = off") + .execute(&mut *tx) + .await?; + + // make sure we run with "or replace" + let sql_with_replace = if !create_fn.replace { + let re = Regex::new(r"(?i)\bCREATE\s+FUNCTION\b").unwrap(); + re.replace(params.sql, "CREATE OR REPLACE FUNCTION") + .to_string() + } else { + params.sql.to_string() + }; + + // create the function - this should always succeed + sqlx::query(&sql_with_replace).execute(&mut *tx).await?; + + // run plpgsql_check and collect results with their relations + let results_with_relations: Vec<(String, Option)> = if is_trigger { + let mut results = Vec::new(); + + for trigger in params.schema_cache.triggers.iter() { + if trigger.proc_name == fn_name + && (fn_schema.is_none() || fn_schema.as_deref() == Some(&trigger.proc_schema)) + { + let relation = format!("{}.{}", trigger.table_schema, trigger.table_name); + + let result: Option = sqlx::query_scalar(&format!( + "select plpgsql_check_function('{}', '{}', format := 'json')", + fn_identifier, relation + )) + .fetch_optional(&mut *tx) + .await? + .flatten(); + + if let Some(result) = result { + results.push((result, Some(relation))); + } + } + } + + results + } else { + let result: Option = sqlx::query_scalar(&format!( + "select plpgsql_check_function('{}', format := 'json')", + fn_identifier + )) + .fetch_optional(&mut *tx) + .await? + .flatten(); + + if let Some(result) = result { + vec![(result, None)] + } else { + vec![] + } + }; + + tx.rollback().await?; + + // Parse results and create diagnostics + let mut diagnostics = Vec::new(); + for (result_json, relation) in results_with_relations { + let check_result: PlpgSqlCheckResult = serde_json::from_str(&result_json).map_err(|e| { + sqlx::Error::Protocol(format!("Failed to parse plpgsql_check result: {}", e)) + })?; + + let mut result_diagnostics = + create_diagnostics_from_check_result(&check_result, &fn_body, offset, relation); + diagnostics.append(&mut result_diagnostics); + } + + Ok(diagnostics) +} + +#[cfg(all(test, not(target_os = "windows")))] +mod tests { + use sqlx::{Executor, PgPool}; + + /// Test helper to run plpgsql_check and return diagnostics with span text + async fn run_plpgsql_check_test( + test_db: &PgPool, + setup_sql: &str, + create_fn_sql: &str, + ) -> Result<(Vec, Vec>), Box> + { + test_db.execute(setup_sql).await?; + + let ast = pgt_query::parse(create_fn_sql)? + .into_root() + .ok_or("Failed to parse SQL root")?; + let schema_cache = pgt_schema_cache::SchemaCache::load(test_db).await?; + + let diagnostics = super::check_plpgsql(super::PlPgSqlCheckParams { + conn: test_db, + sql: create_fn_sql, + ast: &ast, + schema_cache: &schema_cache, + }) + .await?; + + let span_texts = diagnostics + .iter() + .map(|diag| { + diag.span.as_ref().map(|s| { + let start = usize::from(s.start()); + let end = usize::from(s.end()); + create_fn_sql[start..end].to_string() + }) + }) + .collect(); + + Ok((diagnostics, span_texts)) + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_if_expr(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + + CREATE TABLE t1(a int, b int); + "#; + + let create_fn_sql = r#" + CREATE OR REPLACE FUNCTION public.f1() + RETURNS void + LANGUAGE plpgsql + AS $function$ + declare r t1 := (select t1 from t1 where a = 1); + BEGIN + if r.c is null or + true is false + then -- there is bug - table t1 missing "c" column + RAISE NOTICE 'c is null'; + end if; + END; + $function$; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert_eq!(diagnostics.len(), 1); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert_eq!( + span_texts[0].as_deref(), + Some("if r.c is null or\n true is false\n then") + ); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_missing_var(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + + CREATE TABLE t1(a int, b int); + "#; + + let create_fn_sql = r#" + CREATE OR REPLACE FUNCTION public.f1() + RETURNS void + LANGUAGE plpgsql + AS $function$ + BEGIN + SELECT 1 from t1 where a = v_c; + END; + $function$; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + assert_eq!(diagnostics.len(), 1); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert_eq!(span_texts[0].as_deref(), Some("v_c")); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_missing_col_if_stmt(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + + CREATE TABLE t1(a int, b int); + "#; + + let create_fn_sql = r#" + CREATE OR REPLACE FUNCTION public.f1() + RETURNS void + LANGUAGE plpgsql + AS $function$ + BEGIN + if (select c from t1 where id = 1) is null then -- there is bug - table t1 missing "c" column + RAISE NOTICE 'c is null'; + end if; + END; + $function$; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + assert_eq!(diagnostics.len(), 1); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert_eq!(span_texts[0].as_deref(), Some("c")); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + + CREATE TABLE t1(a int, b int); + "#; + + let create_fn_sql = r#" + CREATE OR REPLACE FUNCTION public.f1() + RETURNS void + LANGUAGE plpgsql + AS $function$ + DECLARE r record; + BEGIN + FOR r IN SELECT * FROM t1 + LOOP + RAISE NOTICE '%', r.c; -- there is bug - table t1 missing "c" column + END LOOP; + END; + $function$; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert_eq!(diagnostics.len(), 1); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert_eq!(span_texts[0].as_deref(), Some("RAISE NOTICE '%', r.c;")); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_stacked_diagnostics(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + "#; + + let create_fn_sql = r#" + create or replace function fxtest() + returns void as $$ + declare + v_sqlstate text; + v_message text; + v_context text; + begin + get stacked diagnostics + v_sqlstate = returned_sqlstate, + v_message = message_text, + v_context = pg_exception_context; + end; + $$ language plpgsql; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert!(!diagnostics.is_empty()); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert_eq!(span_texts[0].as_deref(), Some("get stacked diagnostics")); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_constant_refcursor(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + create table rc_test(a int); + "#; + + let create_fn_sql = r#" + create function return_constant_refcursor() returns refcursor as $$ + declare + rc constant refcursor; + begin + open rc for select a from rc_test; + return rc; + end + $$ language plpgsql; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert!(!diagnostics.is_empty()); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert_eq!( + span_texts[0].as_deref(), + Some("open rc for select a from rc_test;") + ); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_constant_assignment(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + + create procedure p1(a int, out b int) + as $$ + begin + b := a + 10; + end; + $$ language plpgsql; + "#; + + let create_fn_sql = r#" + create function f1() + returns void as $$ + declare b constant int; + begin + call p1(10, b); + end; + $$ language plpgsql; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert!(!diagnostics.is_empty()); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert_eq!(span_texts[0].as_deref(), Some("call p1(10, b);")); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_missing_procedure(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + "#; + + let create_fn_sql = r#" + create function f1() + returns void as $$ + declare b constant int; + begin + call p1(10, b); + end; + $$ language plpgsql; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert!(!diagnostics.is_empty()); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert_eq!(span_texts[0].as_deref(), Some("p1")); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_dml_in_stable_function(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + create table t1(a int, b int); + "#; + + let create_fn_sql = r#" + create function f1() + returns void as $$ + begin + if false then + insert into t1 values(10,20); + update t1 set a = 10; + delete from t1; + end if; + end; + $$ language plpgsql stable; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert_eq!(diagnostics.len(), 1); + assert!(span_texts[0].is_some()); + + assert_eq!(diagnostics[0].advices.code.as_deref(), Some("0A000")); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_record_field_assignment(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + + create function g1() returns table(a int, b int) as $$ + begin + return query select 1, 2; + end; + $$ language plpgsql; + "#; + + let create_fn_sql = r#" + create or replace function f1() + returns void as $$ + declare r record; + begin + for r in select * from g1() + loop + r.c := 20; + end loop; + end; + $$ language plpgsql; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert!(!diagnostics.is_empty()); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert!(span_texts[0].is_some()); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_trigger_basic(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + + CREATE TABLE users( + id serial primary key, + name text not null, + email text + ); + + CREATE OR REPLACE FUNCTION public.log_user_changes() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + BEGIN + -- Intentional error: referencing non-existent column + INSERT INTO audit_log(table_name, changed_id, old_email, new_email) + VALUES ('users', NEW.id, OLD.email, NEW.email); + RETURN NEW; + END; + $function$; + + CREATE TRIGGER trg_users_audit + AFTER UPDATE ON users + FOR EACH ROW + EXECUTE FUNCTION public.log_user_changes(); + "#; + + let create_fn_sql = r#" + CREATE OR REPLACE FUNCTION public.log_user_changes() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + BEGIN + -- Intentional error: referencing non-existent column + INSERT INTO audit_log(table_name, changed_id, old_email, new_email) + VALUES ('users', NEW.id, OLD.email, NEW.email); + RETURN NEW; + END; + $function$; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert!(!diagnostics.is_empty()); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert!(diagnostics[0].advices.relation.is_some()); + assert_eq!( + diagnostics[0].advices.relation.as_deref(), + Some("public.users") + ); + assert_eq!(span_texts[0].as_deref(), Some("audit_log")); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_trigger_missing_column(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + + CREATE TABLE products( + id serial primary key, + name text not null, + price numeric(10,2) + ); + + CREATE OR REPLACE FUNCTION public.validate_product() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + BEGIN + -- Error: referencing non-existent column + IF NEW.category IS NULL THEN + RAISE EXCEPTION 'Category is required'; + END IF; + RETURN NEW; + END; + $function$; + + CREATE TRIGGER trg_product_validation + BEFORE INSERT OR UPDATE ON products + FOR EACH ROW + EXECUTE FUNCTION public.validate_product(); + "#; + + let create_fn_sql = r#" + CREATE OR REPLACE FUNCTION public.validate_product() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + BEGIN + -- Error: referencing non-existent column + IF NEW.category IS NULL THEN + RAISE EXCEPTION 'Category is required'; + END IF; + RETURN NEW; + END; + $function$; + "#; + + let (diagnostics, span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert!(!diagnostics.is_empty()); + assert!(matches!( + diagnostics[0].severity, + pgt_diagnostics::Severity::Error + )); + assert!(span_texts[0].as_deref().unwrap().contains("category")); + assert_eq!( + diagnostics[0].advices.relation.as_deref(), + Some("public.products") + ); + } + + #[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] + async fn test_plpgsql_check_trigger_multiple_tables(test_db: PgPool) { + let setup = r#" + create extension if not exists plpgsql_check; + + CREATE TABLE table_a( + id serial primary key, + name text + ); + + CREATE TABLE table_b( + id serial primary key, + description text + ); + + CREATE OR REPLACE FUNCTION public.generic_audit() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + BEGIN + -- Error: referencing column that doesn't exist in both tables + INSERT INTO audit_log(table_name, record_id, old_status) + VALUES (TG_TABLE_NAME, NEW.id, OLD.status); + RETURN NEW; + END; + $function$; + + CREATE TRIGGER trg_audit_a + AFTER UPDATE ON table_a + FOR EACH ROW + EXECUTE FUNCTION public.generic_audit(); + + CREATE TRIGGER trg_audit_b + AFTER UPDATE ON table_b + FOR EACH ROW + EXECUTE FUNCTION public.generic_audit(); + "#; + + let create_fn_sql = r#" + CREATE OR REPLACE FUNCTION public.generic_audit() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + BEGIN + -- Error: referencing column that doesn't exist in both tables + INSERT INTO audit_log(table_name, record_id, old_status) + VALUES (TG_TABLE_NAME, NEW.id, OLD.status); + RETURN NEW; + END; + $function$; + "#; + + let (diagnostics, _span_texts) = run_plpgsql_check_test(&test_db, setup, create_fn_sql) + .await + .expect("Failed to run plpgsql_check test"); + + assert!(!diagnostics.is_empty()); + assert!(diagnostics.len() >= 2); + + let relations: Vec<_> = diagnostics + .iter() + .filter_map(|d| d.advices.relation.as_ref()) + .collect(); + assert!(relations.contains(&&"public.table_a".to_string())); + assert!(relations.contains(&&"public.table_b".to_string())); + } +} diff --git a/crates/pgt_query_ext/src/lib.rs b/crates/pgt_query_ext/src/lib.rs index 4c630487..b0288da8 100644 --- a/crates/pgt_query_ext/src/lib.rs +++ b/crates/pgt_query_ext/src/lib.rs @@ -1 +1,2 @@ pub mod diagnostics; +pub mod utils; diff --git a/crates/pgt_workspace/src/workspace/server/function_utils.rs b/crates/pgt_query_ext/src/utils.rs similarity index 63% rename from crates/pgt_workspace/src/workspace/server/function_utils.rs rename to crates/pgt_query_ext/src/utils.rs index 74e76ff2..6dedebea 100644 --- a/crates/pgt_workspace/src/workspace/server/function_utils.rs +++ b/crates/pgt_query_ext/src/utils.rs @@ -55,3 +55,46 @@ pub fn parse_name(nodes: &[pgt_query::protobuf::Node]) -> Option<(Option _ => None, } } + +#[cfg(test)] +mod tests { + use crate::utils::{find_option_value, parse_name}; + + #[test] + fn test_find_option_value() { + let input = " + CREATE OR REPLACE FUNCTION public.f1() + RETURNS boolean + LANGUAGE plpgsql + AS $function$ + declare r t1 := (select t1 from t1 where a = 1); + BEGIN + if r.c is null or + true is false + then -- there is bug - table t1 missing \"c\" column + RAISE NOTICE 'c is null'; + end if; + END; + $function$; +" + .trim(); + + let ast = pgt_query::parse(input).unwrap().into_root().unwrap(); + let create_fn = match &ast { + pgt_query::NodeEnum::CreateFunctionStmt(stmt) => stmt, + _ => panic!("Expected CreateFunctionStmt"), + }; + + assert_eq!( + find_option_value(create_fn, "language"), + Some("plpgsql".to_string()) + ); + + assert!(find_option_value(create_fn, "as").is_some(),); + + assert_eq!( + parse_name(&create_fn.return_type.as_ref().unwrap().names), + Some((Some("pg_catalog".to_string()), "bool".to_string())) + ); + } +} diff --git a/crates/pgt_schema_cache/src/queries/triggers.sql b/crates/pgt_schema_cache/src/queries/triggers.sql index c28cc39f..895d1be0 100644 --- a/crates/pgt_schema_cache/src/queries/triggers.sql +++ b/crates/pgt_schema_cache/src/queries/triggers.sql @@ -1,17 +1,18 @@ --- we need to join tables from the pg_catalog since "TRUNCATE" triggers are +-- we need to join tables from the pg_catalog since "TRUNCATE" triggers are -- not available in the information_schema.trigger table. -select - t.tgname as "name!", - c.relname as "table_name!", - p.proname as "proc_name!", - n.nspname as "schema_name!", - t.tgtype as "details_bitmask!" -from - pg_catalog.pg_trigger t - left join pg_catalog.pg_proc p on t.tgfoid = p.oid - left join pg_catalog.pg_class c on t.tgrelid = c.oid - left join pg_catalog.pg_namespace n on c.relnamespace = n.oid -where - -- triggers enforcing constraints (e.g. unique fields) should not be included. - t.tgisinternal = false and - t.tgconstraint = 0; +select + t.tgname as "name!", + c.relname as "table_name!", + p.proname as "proc_name!", + proc_ns.nspname as "proc_schema!", + table_ns.nspname as "table_schema!", + t.tgtype as "details_bitmask!" +from + pg_catalog.pg_trigger t +left join pg_catalog.pg_proc p on t.tgfoid = p.oid +left join pg_catalog.pg_class c on t.tgrelid = c.oid +left join pg_catalog.pg_namespace table_ns on c.relnamespace = table_ns.oid +left join pg_catalog.pg_namespace proc_ns on p.pronamespace = proc_ns.oid +where + t.tgisinternal = false and + t.tgconstraint = 0; diff --git a/crates/pgt_schema_cache/src/schema_cache.rs b/crates/pgt_schema_cache/src/schema_cache.rs index df7239ea..84bcd77c 100644 --- a/crates/pgt_schema_cache/src/schema_cache.rs +++ b/crates/pgt_schema_cache/src/schema_cache.rs @@ -7,7 +7,7 @@ use crate::schemas::Schema; use crate::tables::Table; use crate::types::PostgresType; use crate::versions::Version; -use crate::{Role, Trigger}; +use crate::{Extension, Role, Trigger}; #[derive(Debug, Default)] pub struct SchemaCache { @@ -18,13 +18,25 @@ pub struct SchemaCache { pub versions: Vec, pub columns: Vec, pub policies: Vec, + pub extensions: Vec, pub triggers: Vec, pub roles: Vec, } impl SchemaCache { pub async fn load(pool: &PgPool) -> Result { - let (schemas, tables, functions, types, versions, columns, policies, triggers, roles) = futures_util::try_join!( + let ( + schemas, + tables, + functions, + types, + versions, + columns, + policies, + triggers, + roles, + extensions, + ) = futures_util::try_join!( Schema::load(pool), Table::load(pool), Function::load(pool), @@ -33,7 +45,8 @@ impl SchemaCache { Column::load(pool), Policy::load(pool), Trigger::load(pool), - Role::load(pool) + Role::load(pool), + Extension::load(pool), )?; Ok(SchemaCache { @@ -46,6 +59,7 @@ impl SchemaCache { policies, triggers, roles, + extensions, }) } diff --git a/crates/pgt_schema_cache/src/triggers.rs b/crates/pgt_schema_cache/src/triggers.rs index 2b2a3aff..d0a4788a 100644 --- a/crates/pgt_schema_cache/src/triggers.rs +++ b/crates/pgt_schema_cache/src/triggers.rs @@ -82,20 +82,22 @@ impl TryFrom for TriggerTiming { pub struct TriggerQueried { name: String, table_name: String, - schema_name: String, + table_schema: String, proc_name: String, + proc_schema: String, details_bitmask: i16, } #[derive(Debug, PartialEq, Eq)] pub struct Trigger { - name: String, - table_name: String, - schema_name: String, - proc_name: String, - affected: TriggerAffected, - timing: TriggerTiming, - events: Vec, + pub name: String, + pub table_name: String, + pub table_schema: String, + pub proc_name: String, + pub proc_schema: String, + pub affected: TriggerAffected, + pub timing: TriggerTiming, + pub events: Vec, } impl From for Trigger { @@ -104,7 +106,8 @@ impl From for Trigger { name: value.name, table_name: value.table_name, proc_name: value.proc_name, - schema_name: value.schema_name, + proc_schema: value.proc_schema, + table_schema: value.table_schema, affected: value.details_bitmask.into(), timing: value.details_bitmask.try_into().unwrap(), events: TriggerEvents::from(value.details_bitmask).0, @@ -141,7 +144,7 @@ mod tests { id serial primary key, name text ); - + create or replace function public.log_user_insert() returns trigger as $$ begin @@ -149,17 +152,17 @@ mod tests { return new; end; $$ language plpgsql; - + create trigger trg_users_insert before insert on public.users for each row execute function public.log_user_insert(); - + create trigger trg_users_update after update or insert on public.users for each statement execute function public.log_user_insert(); - + create trigger trg_users_delete before delete on public.users for each row @@ -186,7 +189,7 @@ mod tests { .iter() .find(|t| t.name == "trg_users_insert") .unwrap(); - assert_eq!(insert_trigger.schema_name, "public"); + assert_eq!(insert_trigger.table_schema, "public"); assert_eq!(insert_trigger.table_name, "users"); assert_eq!(insert_trigger.timing, TriggerTiming::Before); assert_eq!(insert_trigger.affected, TriggerAffected::Row); @@ -197,7 +200,7 @@ mod tests { .iter() .find(|t| t.name == "trg_users_update") .unwrap(); - assert_eq!(insert_trigger.schema_name, "public"); + assert_eq!(insert_trigger.table_schema, "public"); assert_eq!(insert_trigger.table_name, "users"); assert_eq!(update_trigger.timing, TriggerTiming::After); assert_eq!(update_trigger.affected, TriggerAffected::Statement); @@ -209,7 +212,7 @@ mod tests { .iter() .find(|t| t.name == "trg_users_delete") .unwrap(); - assert_eq!(insert_trigger.schema_name, "public"); + assert_eq!(insert_trigger.table_schema, "public"); assert_eq!(insert_trigger.table_name, "users"); assert_eq!(delete_trigger.timing, TriggerTiming::Before); assert_eq!(delete_trigger.affected, TriggerAffected::Row); @@ -275,7 +278,7 @@ mod tests { .iter() .find(|t| t.name == "trg_docs_instead_update") .unwrap(); - assert_eq!(instead_trigger.schema_name, "public"); + assert_eq!(instead_trigger.table_schema, "public"); assert_eq!(instead_trigger.table_name, "docs_view"); assert_eq!(instead_trigger.timing, TriggerTiming::Instead); assert_eq!(instead_trigger.affected, TriggerAffected::Row); @@ -286,7 +289,7 @@ mod tests { .iter() .find(|t| t.name == "trg_docs_truncate") .unwrap(); - assert_eq!(truncate_trigger.schema_name, "public"); + assert_eq!(truncate_trigger.table_schema, "public"); assert_eq!(truncate_trigger.table_name, "docs"); assert_eq!(truncate_trigger.timing, TriggerTiming::After); assert_eq!(truncate_trigger.affected, TriggerAffected::Statement); diff --git a/crates/pgt_typecheck/src/lib.rs b/crates/pgt_typecheck/src/lib.rs index a3dde01d..ceb36b94 100644 --- a/crates/pgt_typecheck/src/lib.rs +++ b/crates/pgt_typecheck/src/lib.rs @@ -3,7 +3,6 @@ mod typed_identifier; pub use diagnostics::TypecheckDiagnostic; use diagnostics::create_type_error; -use pgt_text_size::TextRange; use sqlx::postgres::PgDatabaseError; pub use sqlx::postgres::PgSeverity; use sqlx::{Executor, PgPool}; @@ -20,19 +19,6 @@ pub struct TypecheckParams<'a> { pub identifiers: Vec, } -#[derive(Debug, Clone)] -pub struct TypeError { - pub message: String, - pub code: String, - pub severity: PgSeverity, - pub position: Option, - pub range: Option, - pub table: Option, - pub column: Option, - pub data_type: Option, - pub constraint: Option, -} - pub async fn check_sql( params: TypecheckParams<'_>, ) -> Result, sqlx::Error> { diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index 4acc0600..efded47c 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -26,6 +26,7 @@ pgt_console = { workspace = true } pgt_diagnostics = { workspace = true } pgt_fs = { workspace = true, features = ["serde"] } pgt_lexer = { workspace = true } +pgt_plpgsql_check = { workspace = true } pgt_query = { workspace = true } pgt_query_ext = { workspace = true } pgt_schema_cache = { workspace = true } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index c6ed0827..f4a9561f 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -55,7 +55,6 @@ mod async_helper; mod connection_key; mod connection_manager; pub(crate) mod document; -mod function_utils; mod migration; mod pg_query; mod schema_cache_manager; @@ -454,7 +453,8 @@ impl Workspace for WorkspaceServer { let path_clone = params.path.clone(); let schema_cache = self.schema_cache.load(pool.clone())?; let input = doc.iter(TypecheckDiagnosticsMapper).collect::>(); - // sorry for the ugly code :( + + // Combined async context for both typecheck and plpgsql_check let async_results = run_async(async move { stream::iter(input) .map(|(id, range, ast, cst, sign)| { @@ -462,8 +462,11 @@ impl Workspace for WorkspaceServer { let path = path_clone.clone(); let schema_cache = Arc::clone(&schema_cache); async move { + let mut diagnostics = Vec::new(); + if let Some(ast) = ast { - pgt_typecheck::check_sql(TypecheckParams { + // Type checking + let typecheck_result = pgt_typecheck::check_sql(TypecheckParams { conn: &pool, sql: id.content(), ast: &ast, @@ -486,18 +489,40 @@ impl Workspace for WorkspaceServer { }) .unwrap_or_default(), }) + .await; + + if let Ok(Some(diag)) = typecheck_result { + let r = diag.location().span.map(|span| span + range.start()); + diagnostics.push( + diag.with_file_path(path.as_path().display().to_string()) + .with_file_span(r.unwrap_or(range)), + ); + } + + // plpgsql_check + let plpgsql_check_results = pgt_plpgsql_check::check_plpgsql( + pgt_plpgsql_check::PlPgSqlCheckParams { + conn: &pool, + sql: id.content(), + ast: &ast, + schema_cache: schema_cache.as_ref(), + }, + ) .await - .map(|d| { - d.map(|d| { - let r = d.location().span.map(|span| span + range.start()); + .unwrap_or_else(|_| vec![]); + println!("{:#?}", plpgsql_check_results); + + for d in plpgsql_check_results { + let r = d.span.map(|span| span + range.start()); + diagnostics.push( d.with_file_path(path.as_path().display().to_string()) - .with_file_span(r.unwrap_or(range)) - }) - }) - } else { - Ok(None) + .with_file_span(r.unwrap_or(range)), + ); + } } + + Ok::, sqlx::Error>(diagnostics) } }) .buffer_unordered(10) @@ -506,8 +531,8 @@ impl Workspace for WorkspaceServer { })?; for result in async_results.into_iter() { - let result = result?; - if let Some(diag) = result { + let diagnostics_batch = result?; + for diag in diagnostics_batch { diagnostics.push(SDiagnostic::new(diag)); } } @@ -548,6 +573,20 @@ impl Workspace for WorkspaceServer { analysable_stmts.push(node); } if let Some(diag) = diagnostic { + // ignore the syntax error if we already have more specialized diagnostics for the + // same statement. + // this is important for create function statements, where we might already have detailed + // diagnostics from plpgsql_check. + if diagnostics.iter().any(|d| { + d.location().span.is_some_and(|async_loc| { + diag.location() + .span + .is_some_and(|syntax_loc| syntax_loc.contains_range(async_loc)) + }) + }) { + continue; + } + diagnostics.push(SDiagnostic::new( diag.with_file_path(path.clone()) .with_severity(Severity::Error), diff --git a/crates/pgt_workspace/src/workspace/server.tests.rs b/crates/pgt_workspace/src/workspace/server.tests.rs index 0578f90f..ef5ba267 100644 --- a/crates/pgt_workspace/src/workspace/server.tests.rs +++ b/crates/pgt_workspace/src/workspace/server.tests.rs @@ -8,7 +8,7 @@ use pgt_configuration::{ use pgt_diagnostics::Diagnostic; use pgt_fs::PgTPath; use pgt_text_size::TextRange; -use sqlx::PgPool; +use sqlx::{Executor, PgPool}; use crate::{ Workspace, WorkspaceError, @@ -206,3 +206,74 @@ async fn correctly_ignores_files() { assert!(execute_statement_result.is_ok_and(|res| res == ExecuteStatementResult::default())); } + +#[cfg(all(test, not(target_os = "windows")))] +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_dedupe_diagnostics(test_db: PgPool) { + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + + let workspace = get_test_workspace(Some(conf)).expect("Unable to create test workspace"); + + let path = PgTPath::new("test.sql"); + + let setup_sql = "CREATE EXTENSION IF NOT EXISTS plpgsql_check;"; + test_db.execute(setup_sql).await.expect("setup sql failed"); + + let content = r#" + CREATE OR REPLACE FUNCTION public.f1() + RETURNS void + LANGUAGE plpgsql + AS $function$ + decare r text; + BEGIN + select '1' into into r; + END; + $function$; + "#; + + workspace + .open_file(OpenFileParams { + path: path.clone(), + content: content.into(), + version: 1, + }) + .expect("Unable to open test file"); + + let diagnostics = workspace + .pull_diagnostics(crate::workspace::PullDiagnosticsParams { + path: path.clone(), + categories: RuleCategories::all(), + max_diagnostics: 100, + only: vec![], + skip: vec![], + }) + .expect("Unable to pull diagnostics") + .diagnostics; + + assert_eq!(diagnostics.len(), 1, "Expected one diagnostic"); + + let diagnostic = &diagnostics[0]; + + assert_eq!( + diagnostic.category().map(|c| c.name()), + Some("plpgsql_check") + ); + + assert_eq!( + diagnostic.location().span, + Some(TextRange::new(115.into(), 210.into())) + ); +} diff --git a/crates/pgt_workspace/src/workspace/server/pg_query.rs b/crates/pgt_workspace/src/workspace/server/pg_query.rs index e90dd41b..05f1425d 100644 --- a/crates/pgt_workspace/src/workspace/server/pg_query.rs +++ b/crates/pgt_workspace/src/workspace/server/pg_query.rs @@ -5,7 +5,6 @@ use lru::LruCache; use pgt_query_ext::diagnostics::*; use pgt_text_size::TextRange; -use super::function_utils::find_option_value; use super::statement_identifier::StatementId; const DEFAULT_CACHE_SIZE: usize = 1000; @@ -61,7 +60,7 @@ impl PgQueryStore { _ => return None, }; - let language = find_option_value(create_fn, "language")?; + let language = pgt_query_ext::utils::find_option_value(create_fn, "language")?; if language != "plpgsql" { return None; @@ -73,7 +72,7 @@ impl PgQueryStore { return Some(existing.clone()); } - let sql_body = find_option_value(create_fn, "as")?; + let sql_body = pgt_query_ext::utils::find_option_value(create_fn, "as")?; let start = statement.content().find(&sql_body)?; let end = start + sql_body.len(); diff --git a/crates/pgt_workspace/src/workspace/server/sql_function.rs b/crates/pgt_workspace/src/workspace/server/sql_function.rs index 0b230edc..4a1463b7 100644 --- a/crates/pgt_workspace/src/workspace/server/sql_function.rs +++ b/crates/pgt_workspace/src/workspace/server/sql_function.rs @@ -1,7 +1,5 @@ use pgt_text_size::TextRange; -use super::function_utils::{find_option_value, parse_name}; - #[derive(Debug, Clone)] pub struct ArgType { pub schema: Option, @@ -37,14 +35,14 @@ pub fn get_sql_fn_signature(ast: &pgt_query::NodeEnum) -> Option Option Option Option Date: Thu, 14 Aug 2025 12:37:34 +0200 Subject: [PATCH 113/114] fix: add support for named param type `$name` (#475) --- crates/pgt_tokenizer/src/lib.rs | 47 ++++++++++++------- ...enizer__tests__named_param_dollar_raw.snap | 23 +++++++++ crates/pgt_tokenizer/src/token.rs | 3 ++ crates/pgt_workspace/src/workspace/server.rs | 2 - 4 files changed, 55 insertions(+), 20 deletions(-) create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_dollar_raw.snap diff --git a/crates/pgt_tokenizer/src/lib.rs b/crates/pgt_tokenizer/src/lib.rs index 80b66363..83b9ba44 100644 --- a/crates/pgt_tokenizer/src/lib.rs +++ b/crates/pgt_tokenizer/src/lib.rs @@ -186,9 +186,29 @@ impl Cursor<'_> { '$' => { // Dollar quoted strings if is_ident_start(self.first()) || self.first() == '$' { - self.dollar_quoted_string() + // Get the start sequence of the dollar quote, i.e., 'foo' in $foo$hello$foo$ + // if ident does not continue and there is no terminating dollar + // sign, we have a positional param `$name` + let mut start = vec![]; + loop { + match self.first() { + '$' => { + self.bump(); + break self.dollar_quoted_string(start); + } + c if is_ident_cont(c) => { + self.bump(); + start.push(c); + } + _ => { + break TokenKind::NamedParam { + kind: NamedParamKind::DollarRaw, + }; + } + } + } } else { - // Parameters + // positional parameter, e.g. `$1` while self.first().is_ascii_digit() { self.bump(); } @@ -490,22 +510,7 @@ impl Cursor<'_> { } // https://www.postgresql.org/docs/16/sql-syntax-lexical.html#SQL-SYNTAX-DOLLAR-QUOTING - fn dollar_quoted_string(&mut self) -> TokenKind { - // Get the start sequence of the dollar quote, i.e., 'foo' in - // $foo$hello$foo$ - let mut start = vec![]; - while let Some(c) = self.bump() { - match c { - '$' => { - self.bump(); - break; - } - _ => { - start.push(c); - } - } - } - + fn dollar_quoted_string(&mut self, start: Vec) -> TokenKind { // we have a dollar quoted string deliminated with `$$` if start.is_empty() { loop { @@ -658,6 +663,12 @@ mod tests { assert_debug_snapshot!(result); } + #[test] + fn named_param_dollar_raw() { + let result = lex("select 1 from c where id = $id;"); + assert_debug_snapshot!(result); + } + #[test] fn named_param_colon_raw() { let result = lex("select 1 from c where id = :id;"); diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_dollar_raw.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_dollar_raw.snap new file mode 100644 index 00000000..db0f9412 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_dollar_raw.snap @@ -0,0 +1,23 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "select" @ Ident, + " " @ Space, + "1" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + " " @ Space, + "from" @ Ident, + " " @ Space, + "c" @ Ident, + " " @ Space, + "where" @ Ident, + " " @ Space, + "id" @ Ident, + " " @ Space, + "=" @ Eq, + " " @ Space, + "$id" @ NamedParam { kind: DollarRaw }, + ";" @ Semi, +] diff --git a/crates/pgt_tokenizer/src/token.rs b/crates/pgt_tokenizer/src/token.rs index e3dbaee2..da98a229 100644 --- a/crates/pgt_tokenizer/src/token.rs +++ b/crates/pgt_tokenizer/src/token.rs @@ -132,6 +132,9 @@ pub enum NamedParamKind { /// /// Used in: psql ColonIdentifier { terminated: bool }, + + /// e.g. `$name` + DollarRaw, } /// Parsed token. diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index f4a9561f..f0a39dbf 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -511,8 +511,6 @@ impl Workspace for WorkspaceServer { .await .unwrap_or_else(|_| vec![]); - println!("{:#?}", plpgsql_check_results); - for d in plpgsql_check_results { let r = d.span.map(|span| span + range.start()); diagnostics.push( From 6b0a9f864468852e714d875d15128a7d45efc7a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Steinr=C3=B6tter?= Date: Sun, 17 Aug 2025 15:10:15 +0200 Subject: [PATCH 114/114] fix: positional params (#473) pre-processes the query string to replace named params like `@name` with positional params like `$1`, because only the latter is supported by the postgres parser. not super happy with the way it is used for the type checker but also did not want to put that function in another crate. also removed a `println!` statement leftover from a previous pr of me. also fixed a bug in the lexer that tokenized a cast as a named parameter. fixes #405, #454 --- Cargo.lock | 1 + crates/pgt_lexer/src/lexer.rs | 1 + crates/pgt_lexer_codegen/src/syntax_kind.rs | 1 + crates/pgt_tokenizer/src/lib.rs | 70 ++++++++++++------ ..._tests__named_param_colon_raw_vs_cast.snap | 25 +++++++ crates/pgt_tokenizer/src/token.rs | 2 + crates/pgt_workspace/Cargo.toml | 1 + crates/pgt_workspace/src/workspace/server.rs | 3 +- .../src/workspace/server.tests.rs | 54 ++++++++++++++ .../src/workspace/server/pg_query.rs | 73 ++++++++++++++++++- 10 files changed, 205 insertions(+), 26 deletions(-) create mode 100644 crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_raw_vs_cast.snap diff --git a/Cargo.lock b/Cargo.lock index 49143908..94b591f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3189,6 +3189,7 @@ dependencies = [ "pgt_suppressions", "pgt_test_utils", "pgt_text_size", + "pgt_tokenizer", "pgt_typecheck", "pgt_workspace_macros", "rustc-hash 2.1.0", diff --git a/crates/pgt_lexer/src/lexer.rs b/crates/pgt_lexer/src/lexer.rs index ad6db297..3e691229 100644 --- a/crates/pgt_lexer/src/lexer.rs +++ b/crates/pgt_lexer/src/lexer.rs @@ -111,6 +111,7 @@ impl<'a> Lexer<'a> { pgt_tokenizer::TokenKind::Tilde => SyntaxKind::TILDE, pgt_tokenizer::TokenKind::Question => SyntaxKind::QUESTION, pgt_tokenizer::TokenKind::Colon => SyntaxKind::COLON, + pgt_tokenizer::TokenKind::DoubleColon => SyntaxKind::DOUBLE_COLON, pgt_tokenizer::TokenKind::Eq => SyntaxKind::EQ, pgt_tokenizer::TokenKind::Bang => SyntaxKind::BANG, pgt_tokenizer::TokenKind::Lt => SyntaxKind::L_ANGLE, diff --git a/crates/pgt_lexer_codegen/src/syntax_kind.rs b/crates/pgt_lexer_codegen/src/syntax_kind.rs index c671e451..3a005437 100644 --- a/crates/pgt_lexer_codegen/src/syntax_kind.rs +++ b/crates/pgt_lexer_codegen/src/syntax_kind.rs @@ -37,6 +37,7 @@ const PUNCT: &[(&str, &str)] = &[ ("_", "UNDERSCORE"), (".", "DOT"), (":", "COLON"), + ("::", "DOUBLE_COLON"), ("=", "EQ"), ("!", "BANG"), ("-", "MINUS"), diff --git a/crates/pgt_tokenizer/src/lib.rs b/crates/pgt_tokenizer/src/lib.rs index 83b9ba44..16093db8 100644 --- a/crates/pgt_tokenizer/src/lib.rs +++ b/crates/pgt_tokenizer/src/lib.rs @@ -144,32 +144,37 @@ impl Cursor<'_> { } } ':' => { - // Named parameters in psql with different substitution styles. - // - // https://www.postgresql.org/docs/current/app-psql.html#APP-PSQL-INTERPOLATION - match self.first() { - '\'' => { - // Named parameter with colon prefix and single quotes. - self.bump(); - let terminated = self.single_quoted_string(); - let kind = NamedParamKind::ColonString { terminated }; - TokenKind::NamedParam { kind } - } - '"' => { - // Named parameter with colon prefix and double quotes. - self.bump(); - let terminated = self.double_quoted_string(); - let kind = NamedParamKind::ColonIdentifier { terminated }; - TokenKind::NamedParam { kind } - } - c if is_ident_start(c) => { - // Named parameter with colon prefix. - self.eat_while(is_ident_cont); - TokenKind::NamedParam { - kind: NamedParamKind::ColonRaw, + if self.first() == ':' { + self.bump(); + TokenKind::DoubleColon + } else { + // Named parameters in psql with different substitution styles. + // + // https://www.postgresql.org/docs/current/app-psql.html#APP-PSQL-INTERPOLATION + match self.first() { + '\'' => { + // Named parameter with colon prefix and single quotes. + self.bump(); + let terminated = self.single_quoted_string(); + let kind = NamedParamKind::ColonString { terminated }; + TokenKind::NamedParam { kind } + } + '"' => { + // Named parameter with colon prefix and double quotes. + self.bump(); + let terminated = self.double_quoted_string(); + let kind = NamedParamKind::ColonIdentifier { terminated }; + TokenKind::NamedParam { kind } + } + c if is_ident_start(c) => { + // Named parameter with colon prefix. + self.eat_while(is_ident_cont); + TokenKind::NamedParam { + kind: NamedParamKind::ColonRaw, + } } + _ => TokenKind::Colon, } - _ => TokenKind::Colon, } } // One-symbol tokens. @@ -675,6 +680,23 @@ mod tests { assert_debug_snapshot!(result); } + #[test] + fn debug_simple_cast() { + let result = lex("::test"); + assert_debug_snapshot!(result, @r###" + [ + "::" @ DoubleColon, + "test" @ Ident, + ] + "###); + } + + #[test] + fn named_param_colon_raw_vs_cast() { + let result = lex("select 1 from c where id::test = :id;"); + assert_debug_snapshot!(result); + } + #[test] fn named_param_colon_string() { let result = lex("select 1 from c where id = :'id';"); diff --git a/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_raw_vs_cast.snap b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_raw_vs_cast.snap new file mode 100644 index 00000000..ecfd4821 --- /dev/null +++ b/crates/pgt_tokenizer/src/snapshots/pgt_tokenizer__tests__named_param_colon_raw_vs_cast.snap @@ -0,0 +1,25 @@ +--- +source: crates/pgt_tokenizer/src/lib.rs +expression: result +snapshot_kind: text +--- +[ + "select" @ Ident, + " " @ Space, + "1" @ Literal { kind: Int { base: Decimal, empty_int: false } }, + " " @ Space, + "from" @ Ident, + " " @ Space, + "c" @ Ident, + " " @ Space, + "where" @ Ident, + " " @ Space, + "id" @ Ident, + "::" @ DoubleColon, + "test" @ Ident, + " " @ Space, + "=" @ Eq, + " " @ Space, + ":id" @ NamedParam { kind: ColonRaw }, + ";" @ Semi, +] diff --git a/crates/pgt_tokenizer/src/token.rs b/crates/pgt_tokenizer/src/token.rs index da98a229..1312773d 100644 --- a/crates/pgt_tokenizer/src/token.rs +++ b/crates/pgt_tokenizer/src/token.rs @@ -46,6 +46,8 @@ pub enum TokenKind { Minus, /// `:` Colon, + /// `::` + DoubleColon, /// `.` Dot, /// `=` diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index efded47c..860b5133 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -33,6 +33,7 @@ pgt_schema_cache = { workspace = true } pgt_statement_splitter = { workspace = true } pgt_suppressions = { workspace = true } pgt_text_size.workspace = true +pgt_tokenizer = { workspace = true } pgt_typecheck = { workspace = true } pgt_workspace_macros = { workspace = true } rustc-hash = { workspace = true } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index f0a39dbf..49c306f2 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -14,6 +14,7 @@ use document::{ TypecheckDiagnosticsMapper, }; use futures::{StreamExt, stream}; +use pg_query::convert_to_positional_params; use pgt_analyse::{AnalyserOptions, AnalysisFilter}; use pgt_analyser::{Analyser, AnalyserConfig, AnalyserParams}; use pgt_diagnostics::{ @@ -468,7 +469,7 @@ impl Workspace for WorkspaceServer { // Type checking let typecheck_result = pgt_typecheck::check_sql(TypecheckParams { conn: &pool, - sql: id.content(), + sql: convert_to_positional_params(id.content()).as_str(), ast: &ast, tree: &cst, schema_cache: schema_cache.as_ref(), diff --git a/crates/pgt_workspace/src/workspace/server.tests.rs b/crates/pgt_workspace/src/workspace/server.tests.rs index ef5ba267..894d1042 100644 --- a/crates/pgt_workspace/src/workspace/server.tests.rs +++ b/crates/pgt_workspace/src/workspace/server.tests.rs @@ -277,3 +277,57 @@ async fn test_dedupe_diagnostics(test_db: PgPool) { Some(TextRange::new(115.into(), 210.into())) ); } + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_positional_params(test_db: PgPool) { + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + + let workspace = get_test_workspace(Some(conf)).expect("Unable to create test workspace"); + + let path = PgTPath::new("test.sql"); + + let setup_sql = r" + create table users ( + id serial primary key, + name text not null, + email text not null + ); + "; + test_db.execute(setup_sql).await.expect("setup sql failed"); + + let content = r#"select * from users where id = @one and name = :two and email = :'three';"#; + + workspace + .open_file(OpenFileParams { + path: path.clone(), + content: content.into(), + version: 1, + }) + .expect("Unable to open test file"); + + let diagnostics = workspace + .pull_diagnostics(crate::workspace::PullDiagnosticsParams { + path: path.clone(), + categories: RuleCategories::all(), + max_diagnostics: 100, + only: vec![], + skip: vec![], + }) + .expect("Unable to pull diagnostics") + .diagnostics; + + assert_eq!(diagnostics.len(), 0, "Expected no diagnostic"); +} diff --git a/crates/pgt_workspace/src/workspace/server/pg_query.rs b/crates/pgt_workspace/src/workspace/server/pg_query.rs index 05f1425d..bd9ffdfc 100644 --- a/crates/pgt_workspace/src/workspace/server/pg_query.rs +++ b/crates/pgt_workspace/src/workspace/server/pg_query.rs @@ -1,9 +1,11 @@ +use std::collections::HashMap; use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; use lru::LruCache; use pgt_query_ext::diagnostics::*; use pgt_text_size::TextRange; +use pgt_tokenizer::tokenize; use super::statement_identifier::StatementId; @@ -37,7 +39,7 @@ impl PgQueryStore { } let r = Arc::new( - pgt_query::parse(statement.content()) + pgt_query::parse(&convert_to_positional_params(statement.content())) .map_err(SyntaxDiagnostic::from) .and_then(|ast| { ast.into_root().ok_or_else(|| { @@ -87,10 +89,79 @@ impl PgQueryStore { } } +/// Converts named parameters in a SQL query string to positional parameters. +/// +/// This function scans the input SQL string for named parameters (e.g., `@param`, `:param`, `:'param'`) +/// and replaces them with positional parameters (e.g., `$1`, `$2`, etc.). +/// +/// It maintains the original spacing of the named parameters in the output string. +/// +/// Useful for preparing SQL queries for parsing or execution where named paramters are not supported. +pub fn convert_to_positional_params(text: &str) -> String { + let mut result = String::with_capacity(text.len()); + let mut param_mapping: HashMap<&str, usize> = HashMap::new(); + let mut param_index = 1; + let mut position = 0; + + for token in tokenize(text) { + let token_len = token.len as usize; + let token_text = &text[position..position + token_len]; + + if matches!(token.kind, pgt_tokenizer::TokenKind::NamedParam { .. }) { + let idx = match param_mapping.get(token_text) { + Some(&index) => index, + None => { + let index = param_index; + param_mapping.insert(token_text, index); + param_index += 1; + index + } + }; + + let replacement = format!("${}", idx); + let original_len = token_text.len(); + let replacement_len = replacement.len(); + + result.push_str(&replacement); + + // maintain original spacing + if replacement_len < original_len { + result.push_str(&" ".repeat(original_len - replacement_len)); + } + } else { + result.push_str(token_text); + } + + position += token_len; + } + + result +} + #[cfg(test)] mod tests { use super::*; + #[test] + fn test_convert_to_positional_params() { + let input = "select * from users where id = @one and name = :two and email = :'three';"; + let result = convert_to_positional_params(input); + assert_eq!( + result, + "select * from users where id = $1 and name = $2 and email = $3 ;" + ); + } + + #[test] + fn test_convert_to_positional_params_with_duplicates() { + let input = "select * from users where first_name = @one and starts_with(email, @one) and created_at > @two;"; + let result = convert_to_positional_params(input); + assert_eq!( + result, + "select * from users where first_name = $1 and starts_with(email, $1 ) and created_at > $2 ;" + ); + } + #[test] fn test_plpgsql_syntax_error() { let input = "