diff --git a/.cirrus.star b/.cirrus.star index 36233872d1e50..e9bb672b95936 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -7,7 +7,7 @@ https://github.com/bazelbuild/starlark/blob/master/spec.md See also .cirrus.yml and src/tools/ci/README """ -load("cirrus", "env", "fs") +load("cirrus", "env", "fs", "re", "yaml") def main(): @@ -18,19 +18,36 @@ def main(): 1) the contents of .cirrus.yml - 2) if defined, the contents of the file referenced by the, repository + 2) computed environment variables + + 3) if defined, the contents of the file referenced by the, repository level, REPO_CI_CONFIG_GIT_URL variable (see https://cirrus-ci.org/guide/programming-tasks/#fs for the accepted format) - 3) .cirrus.tasks.yml + 4) .cirrus.tasks.yml """ output = "" # 1) is evaluated implicitly + # Add 2) + additional_env = compute_environment_vars() + env_fmt = """ +### +# Computed environment variables start here +### +{0} +### +# Computed environment variables end here +### +""" + output += env_fmt.format(yaml.dumps({'env': additional_env})) + + + # Add 3) repo_config_url = env.get("REPO_CI_CONFIG_GIT_URL") if repo_config_url != None: print("loading additional configuration from \"{}\"".format(repo_config_url)) @@ -38,12 +55,75 @@ def main(): else: output += "\n# REPO_CI_CONFIG_URL was not set\n" - # Add 3) + + # Add 4) output += config_from(".cirrus.tasks.yml") + return output +def compute_environment_vars(): + cenv = {} + + ### + # Some tasks are manually triggered by default because they might use too + # many resources for users of free Cirrus credits, but they can be + # triggered automatically by naming them in an environment variable e.g. + # REPO_CI_AUTOMATIC_TRIGGER_TASKS="task_name other_task" under "Repository + # Settings" on Cirrus CI's website. + + default_manual_trigger_tasks = ['mingw', 'netbsd', 'openbsd'] + + repo_ci_automatic_trigger_tasks = env.get('REPO_CI_AUTOMATIC_TRIGGER_TASKS', '') + for task in default_manual_trigger_tasks: + name = 'CI_TRIGGER_TYPE_' + task.upper() + if repo_ci_automatic_trigger_tasks.find(task) != -1: + value = 'automatic' + else: + value = 'manual' + cenv[name] = value + ### + + ### + # Parse "ci-os-only:" tag in commit message and set + # CI_{$OS}_ENABLED variable for each OS + + # We want to disable SanityCheck if testing just a specific OS. This + # shortens push-wait-for-ci cycle time a bit when debugging operating + # system specific failures. Just treating it as an OS in that case + # suffices. + + operating_systems = [ + 'compilerwarnings', + 'freebsd', + 'linux', + 'macos', + 'mingw', + 'netbsd', + 'openbsd', + 'sanitycheck', + 'windows', + ] + commit_message = env.get('CIRRUS_CHANGE_MESSAGE') + match_re = r"(^|.*\n)ci-os-only: ([^\n]+)($|\n.*)" + + # re.match() returns an array with a tuple of (matched-string, match_1, ...) + m = re.match(match_re, commit_message) + if m and len(m) > 0: + os_only = m[0][2] + os_only_list = re.split(r'[, ]+', os_only) + else: + os_only_list = operating_systems + + for os in operating_systems: + os_enabled = os in os_only_list + cenv['CI_{0}_ENABLED'.format(os.upper())] = os_enabled + ### + + return cenv + + def config_from(config_src): """return contents of config file `config_src`, surrounded by markers indicating start / end of the included file diff --git a/.cirrus.tasks.yml b/.cirrus.tasks.yml index 1a366975d824f..eca9d62fc2297 100644 --- a/.cirrus.tasks.yml +++ b/.cirrus.tasks.yml @@ -31,6 +31,31 @@ env: TEMP_CONFIG: ${CIRRUS_WORKING_DIR}/src/tools/ci/pg_ci_base.conf PG_TEST_EXTRA: kerberos ldap ssl libpq_encryption load_balance oauth + # Postgres config args for the meson builds, shared between all meson tasks + # except the 'SanityCheck' task + MESON_COMMON_PG_CONFIG_ARGS: -Dcassert=true -Dinjection_points=true + + # Meson feature flags shared by all meson tasks, except: + # SanityCheck: uses almost no dependencies. + # Windows - VS: has fewer dependencies than listed here, so defines its own. + # Linux: uses the 'auto' feature option to test meson feature autodetection. + MESON_COMMON_FEATURES: >- + -Dauto_features=disabled + -Dldap=enabled + -Dssl=openssl + -Dtap_tests=enabled + -Dplperl=enabled + -Dplpython=enabled + -Ddocs=enabled + -Dicu=enabled + -Dlibxml=enabled + -Dlibxslt=enabled + -Dlz4=enabled + -Dpltcl=enabled + -Dreadline=enabled + -Dzlib=enabled + -Dzstd=enabled + # What files to preserve in case tests fail on_failure_ac: &on_failure_ac @@ -72,7 +97,7 @@ task: # push-wait-for-ci cycle time a bit when debugging operating system specific # failures. Uses skip instead of only_if, as cirrus otherwise warns about # only_if conditions not matching. - skip: $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:.*' + skip: $CI_SANITYCHECK_ENABLED == false env: CPUS: 4 @@ -164,10 +189,19 @@ task: -c debug_parallel_query=regress PG_TEST_PG_UPGRADE_MODE: --link + MESON_FEATURES: >- + -Ddtrace=enabled + -Dgssapi=enabled + -Dlibcurl=enabled + -Dnls=enabled + -Dpam=enabled + -Dtcl_version=tcl86 + -Duuid=bsd + <<: *freebsd_task_template depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*freebsd.*' + only_if: $CI_FREEBSD_ENABLED sysinfo_script: | id @@ -196,10 +230,10 @@ task: configure_script: | su postgres <<-EOF meson setup \ + ${MESON_COMMON_PG_CONFIG_ARGS} \ --buildtype=debug \ - -Dcassert=true -Dinjection_points=true \ - -Duuid=bsd -Dtcl_version=tcl86 -Ddtrace=auto \ -Dextra_lib_dirs=/usr/local/lib -Dextra_include_dirs=/usr/local/include/ \ + ${MESON_COMMON_FEATURES} ${MESON_FEATURES} \ build EOF build_script: su postgres -c 'ninja -C build -j${BUILD_JOBS} ${MBUILD_TARGET}' @@ -239,7 +273,6 @@ task: task: depends_on: SanityCheck - trigger_type: manual env: # Below are experimentally derived to be a decent choice. @@ -257,7 +290,9 @@ task: matrix: - name: NetBSD - Meson - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*netbsd.*' + # See REPO_CI_AUTOMATIC_TRIGGER_TASKS in .cirrus.star + trigger_type: $CI_TRIGGER_TYPE_NETBSD + only_if: $CI_NETBSD_ENABLED env: OS_NAME: netbsd IMAGE_FAMILY: pg-ci-netbsd-postgres @@ -269,18 +304,31 @@ task: LC_ALL: "C" # -Duuid is not set for the NetBSD, see the comment below, above # configure_script, for more information. + MESON_FEATURES: >- + -Dgssapi=enabled + -Dlibcurl=enabled + -Dnls=enabled + -Dpam=enabled + setup_additional_packages_script: | #pkgin -y install ... <<: *netbsd_task_template - name: OpenBSD - Meson - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*openbsd.*' + # See REPO_CI_AUTOMATIC_TRIGGER_TASKS in .cirrus.star + trigger_type: $CI_TRIGGER_TYPE_OPENBSD + only_if: $CI_OPENBSD_ENABLED env: OS_NAME: openbsd IMAGE_FAMILY: pg-ci-openbsd-postgres PKGCONFIG_PATH: '/usr/lib/pkgconfig:/usr/local/lib/pkgconfig' - UUID: -Duuid=e2fs - TCL: -Dtcl_version=tcl86 + + MESON_FEATURES: >- + -Dbsd_auth=enabled + -Dlibcurl=enabled + -Dtcl_version=tcl86 + -Duuid=e2fs + setup_additional_packages_script: | #pkg_add -I ... # Always core dump to ${CORE_DUMP_DIR} @@ -314,11 +362,10 @@ task: configure_script: | su postgres <<-EOF meson setup \ + ${MESON_COMMON_PG_CONFIG_ARGS} \ --buildtype=debugoptimized \ --pkg-config-path ${PKGCONFIG_PATH} \ - -Dcassert=true -Dinjection_points=true \ - -Dssl=openssl ${UUID} ${TCL} \ - -DPG_TEST_EXTRA="$PG_TEST_EXTRA" \ + ${MESON_COMMON_FEATURES} ${MESON_FEATURES} \ build EOF @@ -365,10 +412,6 @@ LINUX_CONFIGURE_FEATURES: &LINUX_CONFIGURE_FEATURES >- --with-uuid=ossp --with-zstd -LINUX_MESON_FEATURES: &LINUX_MESON_FEATURES >- - -Dllvm=enabled - -Duuid=e2fs - # Check SPECIAL in the matrix: below task: @@ -409,12 +452,13 @@ task: LLVM_CONFIG: llvm-config-16 LINUX_CONFIGURE_FEATURES: *LINUX_CONFIGURE_FEATURES - LINUX_MESON_FEATURES: *LINUX_MESON_FEATURES + LINUX_MESON_FEATURES: >- + -Duuid=e2fs <<: *linux_task_template depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*linux.*' + only_if: $CI_LINUX_ENABLED ccache_cache: folder: ${CCACHE_DIR} @@ -495,6 +539,7 @@ task: # are typically printed in the server log # - Test both 64bit and 32 bit builds # - uses io_method=io_uring + # - Uses meson feature autodetection - name: Linux - Debian Bookworm - Meson env: @@ -506,9 +551,9 @@ task: configure_script: | su postgres <<-EOF meson setup \ + ${MESON_COMMON_PG_CONFIG_ARGS} \ --buildtype=debug \ - -Dcassert=true -Dinjection_points=true \ - ${LINUX_MESON_FEATURES} \ + ${LINUX_MESON_FEATURES} -Dllvm=enabled \ build EOF @@ -518,13 +563,11 @@ task: su postgres <<-EOF export CC='ccache gcc -m32' meson setup \ + ${MESON_COMMON_PG_CONFIG_ARGS} \ --buildtype=debug \ - -Dcassert=true -Dinjection_points=true \ - ${LINUX_MESON_FEATURES} \ - -Dllvm=disabled \ --pkg-config-path /usr/lib/i386-linux-gnu/pkgconfig/ \ -DPERL=perl5.36-i386-linux-gnu \ - -Dlibnuma=disabled \ + ${LINUX_MESON_FEATURES} -Dlibnuma=disabled \ build-32 EOF @@ -588,6 +631,14 @@ task: CCACHE_DIR: ${HOME}/ccache MACPORTS_CACHE: ${HOME}/macports-cache + MESON_FEATURES: >- + -Dbonjour=enabled + -Ddtrace=enabled + -Dgssapi=enabled + -Dlibcurl=enabled + -Dnls=enabled + -Duuid=e2fs + MACOS_PACKAGE_LIST: >- ccache icu @@ -613,7 +664,7 @@ task: <<: *macos_task_template depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*(macos|darwin|osx).*' + only_if: $CI_MACOS_ENABLED sysinfo_script: | id @@ -657,11 +708,11 @@ task: configure_script: | export PKG_CONFIG_PATH="/opt/local/lib/pkgconfig/" meson setup \ + ${MESON_COMMON_PG_CONFIG_ARGS} \ --buildtype=debug \ -Dextra_include_dirs=/opt/local/include \ -Dextra_lib_dirs=/opt/local/lib \ - -Dcassert=true -Dinjection_points=true \ - -Duuid=e2fs -Ddtrace=auto \ + ${MESON_COMMON_FEATURES} ${MESON_FEATURES} \ build build_script: ninja -C build -j${BUILD_JOBS} ${MBUILD_TARGET} @@ -716,10 +767,18 @@ task: # 0x8001 is SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX CIRRUS_WINDOWS_ERROR_MODE: 0x8001 + MESON_FEATURES: + -Dauto_features=disabled + -Dldap=enabled + -Dssl=openssl + -Dtap_tests=enabled + -Dplperl=enabled + -Dplpython=enabled + <<: *windows_task_template depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*windows.*' + only_if: $CI_WINDOWS_ENABLED setup_additional_packages_script: | REM choco install -y --no-progress ... @@ -730,10 +789,9 @@ task: echo 127.0.0.3 pg-loadbalancetest >> c:\Windows\System32\Drivers\etc\hosts type c:\Windows\System32\Drivers\etc\hosts - # Use /DEBUG:FASTLINK to avoid high memory usage during linking configure_script: | vcvarsall x64 - meson setup --backend ninja --buildtype debug -Dc_link_args=/DEBUG:FASTLINK -Dcassert=true -Dinjection_points=true -Db_pch=true -Dextra_lib_dirs=c:\openssl\1.1\lib -Dextra_include_dirs=c:\openssl\1.1\include -DTAR=%TAR% build + meson setup --backend ninja %MESON_COMMON_PG_CONFIG_ARGS% --buildtype debug -Db_pch=true -Dextra_lib_dirs=c:\openssl\1.1\lib -Dextra_include_dirs=c:\openssl\1.1\include -DTAR=%TAR% %MESON_FEATURES% build build_script: | vcvarsall x64 @@ -755,13 +813,11 @@ task: << : *WINDOWS_ENVIRONMENT_BASE name: Windows - Server 2019, MinGW64 - Meson - # due to resource constraints we don't run this task by default for now - trigger_type: manual - # worth using only_if despite being manual, otherwise this task will show up - # when e.g. ci-os-only: linux is used. - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*mingw.*' - # otherwise it'll be sorted before other tasks + # See REPO_CI_AUTOMATIC_TRIGGER_TASKS in .cirrus.star. + trigger_type: $CI_TRIGGER_TYPE_MINGW + depends_on: SanityCheck + only_if: $CI_MINGW_ENABLED env: TEST_JOBS: 4 # higher concurrency causes occasional failures @@ -777,6 +833,11 @@ task: CHERE_INVOKING: 1 BASH: C:\msys64\usr\bin\bash.exe -l + # Keep -Dnls explicitly disabled, as the number of files it creates causes a + # noticeable slowdown. + MESON_FEATURES: >- + -Dnls=disabled + <<: *windows_task_template ccache_cache: @@ -791,9 +852,8 @@ task: %BASH% -c "where perl" %BASH% -c "perl --version" - # disable -Dnls as the number of files it creates cause a noticable slowdown configure_script: | - %BASH% -c "meson setup -Ddebug=true -Doptimization=g -Dcassert=true -Dinjection_points=true -Db_pch=true -Dnls=disabled -DTAR=%TAR% build" + %BASH% -c "meson setup %MESON_COMMON_PG_CONFIG_ARGS% -Ddebug=true -Doptimization=g -Db_pch=true %MESON_COMMON_FEATURES% %MESON_FEATURES% -DTAR=%TAR% build" build_script: | %BASH% -c "ninja -C build ${MBUILD_TARGET}" @@ -815,10 +875,9 @@ task: # To limit unnecessary work only run this once the SanityCheck # succeeds. This is particularly important for this task as we intentionally - # use always: to continue after failures. Task that did not run count as a - # success, so we need to recheck SanityChecks's condition here ... + # use always: to continue after failures. depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' + only_if: $CI_COMPILERWARNINGS_ENABLED env: CPUS: 4 @@ -831,7 +890,6 @@ task: CCACHE_DIR: "/tmp/ccache_dir" LINUX_CONFIGURE_FEATURES: *LINUX_CONFIGURE_FEATURES - LINUX_MESON_FEATURES: *LINUX_MESON_FEATURES # GCC emits a warning for llvm-14, so switch to a newer one. LLVM_CONFIG: llvm-config-16 diff --git a/.cirrus.yml b/.cirrus.yml index 33c6e481d746a..3f75852e84ecb 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -10,12 +10,20 @@ # # 1) the contents of this file # -# 2) if defined, the contents of the file referenced by the, repository +# 2) computed environment variables +# +# Used to enable/disable tasks based on the execution environment. See +# .cirrus.star: compute_environment_vars() +# +# 3) if defined, the contents of the file referenced by the, repository # level, REPO_CI_CONFIG_GIT_URL variable (see # https://cirrus-ci.org/guide/programming-tasks/#fs for the accepted # format) # -# 3) .cirrus.tasks.yml +# This allows running tasks in a different execution environment than the +# default, e.g. to have sufficient resources for cfbot. +# +# 4) .cirrus.tasks.yml # # This composition is done by .cirrus.star diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index f8526d4d1a9c2..f83e2fc658664 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -14,6 +14,9 @@ # # $ git log --pretty=format:"%H # %cd%n# %s" $PGINDENTGITHASH -1 --date=iso +1d1612aec7688139e1a5506df1366b4b6a69605d # 2025-07-29 09:10:41 -0400 +# Run pgindent. + 73873805fb3627cb23937c750fa83ffd8f16fc6c # 2025-07-25 16:36:44 -0400 # Run pgindent on the changes of the previous patch. diff --git a/.gitattributes b/.gitattributes index 8df6b75e653b8..4e26bbfb14552 100644 --- a/.gitattributes +++ b/.gitattributes @@ -12,8 +12,8 @@ *.xsl whitespace=space-before-tab,trailing-space,tab-in-indent # Avoid confusing ASCII underlines with leftover merge conflict markers -README conflict-marker-size=32 -README.* conflict-marker-size=32 +README conflict-marker-size=48 +README.* conflict-marker-size=48 # Certain data files that contain special whitespace, and other special cases *.data -whitespace diff --git a/config/llvm.m4 b/config/llvm.m4 index fa4bedd9370fc..9d6fe8199e364 100644 --- a/config/llvm.m4 +++ b/config/llvm.m4 @@ -4,7 +4,7 @@ # ----------------- # # Look for the LLVM installation, check that it's new enough, set the -# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH} and LDFLAGS +# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH,LIBS} # variables. Also verify that CLANG is available, to transform C # into bitcode. # @@ -55,7 +55,7 @@ AC_DEFUN([PGAC_LLVM_SUPPORT], for pgac_option in `$LLVM_CONFIG --ldflags`; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LLVM_LIBS="$LLVM_LIBS $pgac_option";; esac done diff --git a/config/prep_buildtree b/config/prep_buildtree index a0eabd3dee288..e148535ac112e 100644 --- a/config/prep_buildtree +++ b/config/prep_buildtree @@ -22,18 +22,14 @@ sourcetree=`cd $1 && pwd` buildtree=`cd ${2:-'.'} && pwd` -# We must not auto-create the subdirectories holding built documentation. -# If we did, it would interfere with installation of prebuilt docs from -# the source tree, if a VPATH build is done from a distribution tarball. -# See bug #5595. -for item in `find "$sourcetree" -type d \( \( -name CVS -prune \) -o \( -name .git -prune \) -o -print \) | grep -v "$sourcetree/doc/src/sgml/\+"`; do +for item in `find "$sourcetree"/config "$sourcetree"/contrib "$sourcetree"/doc "$sourcetree"/src -type d -print`; do subdir=`expr "$item" : "$sourcetree\(.*\)"` if test ! -d "$buildtree/$subdir"; then mkdir -p "$buildtree/$subdir" || exit 1 fi done -for item in `find "$sourcetree" -name Makefile -print -o -name GNUmakefile -print | grep -v "$sourcetree/doc/src/sgml/images/"`; do +for item in "$sourcetree"/Makefile `find "$sourcetree"/config "$sourcetree"/contrib "$sourcetree"/doc "$sourcetree"/src -name Makefile -print -o -name GNUmakefile -print`; do filename=`expr "$item" : "$sourcetree\(.*\)"` if test ! -f "${item}.in"; then if cmp "$item" "$buildtree/$filename" >/dev/null 2>&1; then : ; else diff --git a/config/programs.m4 b/config/programs.m4 index c73d9307ea8a9..e57fe4907b844 100644 --- a/config/programs.m4 +++ b/config/programs.m4 @@ -290,8 +290,8 @@ AC_DEFUN([PGAC_CHECK_LIBCURL], pgac_save_LDFLAGS=$LDFLAGS pgac_save_LIBS=$LIBS - CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS" - LDFLAGS="$LIBCURL_LDFLAGS $LDFLAGS" + CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS" + LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS" AC_CHECK_HEADER(curl/curl.h, [], [AC_MSG_ERROR([header file is required for --with-libcurl])]) diff --git a/configure b/configure index 6d7c22e153fea..39c68161ceced 100755 --- a/configure +++ b/configure @@ -4475,190 +4475,49 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C99" >&5 -$as_echo_n "checking for $CC option to accept ISO C99... " >&6; } -if ${ac_cv_prog_cc_c99+:} false; then : + +# Detect option needed for C11 +# loosely modeled after code in later Autoconf versions +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C11" >&5 +$as_echo_n "checking for $CC option to accept ISO C11... " >&6; } + +if ${pgac_cv_prog_cc_c11+:} false; then : $as_echo_n "(cached) " >&6 else - ac_cv_prog_cc_c99=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + pgac_cv_prog_cc_c11=no +pgac_save_CC=$CC +for pgac_arg in '' '-std=gnu11' '-std=c11'; do + CC="$pgac_save_CC $pgac_arg" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#include -#include -#include -#include - -// Check varargs macros. These examples are taken from C99 6.10.3.5. -#define debug(...) fprintf (stderr, __VA_ARGS__) -#define showlist(...) puts (#__VA_ARGS__) -#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__)) -static void -test_varargs_macros (void) -{ - int x = 1234; - int y = 5678; - debug ("Flag"); - debug ("X = %d\n", x); - showlist (The first, second, and third items.); - report (x>y, "x is %d but y is %d", x, y); -} - -// Check long long types. -#define BIG64 18446744073709551615ull -#define BIG32 4294967295ul -#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0) -#if !BIG_OK - your preprocessor is broken; -#endif -#if BIG_OK -#else - your preprocessor is broken; +#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L +# error "Compiler does not advertise C11 conformance" #endif -static long long int bignum = -9223372036854775807LL; -static unsigned long long int ubignum = BIG64; - -struct incomplete_array -{ - int datasize; - double data[]; -}; - -struct named_init { - int number; - const wchar_t *name; - double average; -}; - -typedef const char *ccp; - -static inline int -test_restrict (ccp restrict text) -{ - // See if C++-style comments work. - // Iterate through items via the restricted pointer. - // Also check for declarations in for loops. - for (unsigned int i = 0; *(text+i) != '\0'; ++i) - continue; - return 0; -} - -// Check varargs and va_copy. -static void -test_varargs (const char *format, ...) -{ - va_list args; - va_start (args, format); - va_list args_copy; - va_copy (args_copy, args); - - const char *str; - int number; - float fnumber; - - while (*format) - { - switch (*format++) - { - case 's': // string - str = va_arg (args_copy, const char *); - break; - case 'd': // int - number = va_arg (args_copy, int); - break; - case 'f': // float - fnumber = va_arg (args_copy, double); - break; - default: - break; - } - } - va_end (args_copy); - va_end (args); -} - -int -main () -{ - - // Check bool. - _Bool success = false; - - // Check restrict. - if (test_restrict ("String literal") == 0) - success = true; - char *restrict newvar = "Another string"; - - // Check varargs. - test_varargs ("s, d' f .", "string", 65, 34.234); - test_varargs_macros (); - - // Check flexible array members. - struct incomplete_array *ia = - malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10)); - ia->datasize = 10; - for (int i = 0; i < ia->datasize; ++i) - ia->data[i] = i * 1.234; - - // Check named initializers. - struct named_init ni = { - .number = 34, - .name = L"Test wide string", - .average = 543.34343, - }; - - ni.number = 58; - - int dynamic_array[ni.number]; - dynamic_array[ni.number - 1] = 543; - - // work around unused variable warnings - return (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == 'x' - || dynamic_array[ni.number - 1] != 543); - - ; - return 0; -} _ACEOF -for ac_arg in '' -std=gnu99 -std=c99 -c99 -AC99 -D_STDC_C99= -qlanglvl=extc99 -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c99=$ac_arg +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_cc_c11=$pgac_arg fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c99" != "xno" && break +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + test x"$pgac_cv_prog_cc_c11" != x"no" && break done -rm -f conftest.$ac_ext -CC=$ac_save_CC - +CC=$pgac_save_CC fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c99" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c99" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 -$as_echo "$ac_cv_prog_cc_c99" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c99" != xno; then : -fi - - -# Error out if the compiler does not support C99, as the codebase -# relies on that. -if test "$ac_cv_prog_cc_c99" = no; then - as_fn_error $? "C compiler \"$CC\" does not support C99" "$LINENO" 5 +if test x"$pgac_cv_prog_cc_c11" = x"no"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } + as_fn_error $? "C compiler \"$CC\" does not support C11" "$LINENO" 5 +elif test x"$pgac_cv_prog_cc_c11" = x""; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_c11" >&5 +$as_echo "$pgac_cv_prog_cc_c11" >&6; } + CC="$CC $pgac_cv_prog_cc_c11" fi + ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' @@ -4920,7 +4779,6 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu # Check if it's Intel's compiler, which (usually) pretends to be gcc, # but has idiosyncrasies of its own. We assume icc will define # __INTEL_COMPILER regardless of CFLAGS. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -5194,7 +5052,7 @@ fi for pgac_option in `$LLVM_CONFIG --ldflags`; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LLVM_LIBS="$LLVM_LIBS $pgac_option";; esac done @@ -9436,12 +9294,12 @@ fi # Note the user could also set XML2_CFLAGS/XML2_LIBS directly for pgac_option in $XML2_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $XML2_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -9666,12 +9524,12 @@ fi # note that -llz4 will be added by AC_CHECK_LIB below. for pgac_option in $LZ4_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $LZ4_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -9807,12 +9665,12 @@ fi # note that -lzstd will be added by AC_CHECK_LIB below. for pgac_option in $ZSTD_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $ZSTD_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -12723,8 +12581,8 @@ if test "$with_libcurl" = yes ; then pgac_save_LDFLAGS=$LDFLAGS pgac_save_LIBS=$LIBS - CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS" - LDFLAGS="$LIBCURL_LDFLAGS $LDFLAGS" + CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS" + LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS" ac_fn_c_check_header_mongrel "$LINENO" "curl/curl.h" "ac_cv_header_curl_curl_h" "$ac_includes_default" if test "x$ac_cv_header_curl_curl_h" = xyes; then : @@ -16658,7 +16516,7 @@ fi if test "$with_icu" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$ICU_CFLAGS $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $ICU_CFLAGS" # Verify we have ICU's header files ac_fn_c_check_header_mongrel "$LINENO" "unicode/ucol.h" "ac_cv_header_unicode_ucol_h" "$ac_includes_default" @@ -17565,7 +17423,7 @@ $as_echo "#define HAVE_GCC__ATOMIC_INT64_CAS 1" >>confdefs.h fi -# Check for x86 cpuid instruction +# Check for __get_cpuid() and __cpuid() { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid" >&5 $as_echo_n "checking for __get_cpuid... " >&6; } if ${pgac_cv__get_cpuid+:} false; then : @@ -17598,77 +17456,79 @@ if test x"$pgac_cv__get_cpuid" = x"yes"; then $as_echo "#define HAVE__GET_CPUID 1" >>confdefs.h -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid_count" >&5 -$as_echo_n "checking for __get_cpuid_count... " >&6; } -if ${pgac_cv__get_cpuid_count+:} false; then : +else + # __cpuid() + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuid" >&5 +$as_echo_n "checking for __cpuid... " >&6; } +if ${pgac_cv__cpuid+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include +#include int main () { unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuid_count(7, 0, &exx[0], &exx[1], &exx[2], &exx[3]); + __cpuid(exx, 1); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - pgac_cv__get_cpuid_count="yes" + pgac_cv__cpuid="yes" else - pgac_cv__get_cpuid_count="no" + pgac_cv__cpuid="no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__get_cpuid_count" >&5 -$as_echo "$pgac_cv__get_cpuid_count" >&6; } -if test x"$pgac_cv__get_cpuid_count" = x"yes"; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuid" >&5 +$as_echo "$pgac_cv__cpuid" >&6; } + if test x"$pgac_cv__cpuid" = x"yes"; then -$as_echo "#define HAVE__GET_CPUID_COUNT 1" >>confdefs.h +$as_echo "#define HAVE__CPUID 1" >>confdefs.h + fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuid" >&5 -$as_echo_n "checking for __cpuid... " >&6; } -if ${pgac_cv__cpuid+:} false; then : +# Check for __get_cpuid_count() and __cpuidex() in a similar fashion. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid_count" >&5 +$as_echo_n "checking for __get_cpuid_count... " >&6; } +if ${pgac_cv__get_cpuid_count+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include +#include int main () { unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuid(exx[0], 1); + __get_cpuid_count(7, 0, &exx[0], &exx[1], &exx[2], &exx[3]); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - pgac_cv__cpuid="yes" + pgac_cv__get_cpuid_count="yes" else - pgac_cv__cpuid="no" + pgac_cv__get_cpuid_count="no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuid" >&5 -$as_echo "$pgac_cv__cpuid" >&6; } -if test x"$pgac_cv__cpuid" = x"yes"; then - -$as_echo "#define HAVE__CPUID 1" >>confdefs.h +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__get_cpuid_count" >&5 +$as_echo "$pgac_cv__get_cpuid_count" >&6; } +if test x"$pgac_cv__get_cpuid_count" = x"yes"; then -fi +$as_echo "#define HAVE__GET_CPUID_COUNT 1" >>confdefs.h -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuidex" >&5 +else + # __cpuidex() + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuidex" >&5 $as_echo_n "checking for __cpuidex... " >&6; } if ${pgac_cv__cpuidex+:} false; then : $as_echo_n "(cached) " >&6 @@ -17680,7 +17540,7 @@ int main () { unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuidex(exx[0], 7, 0); + __cpuidex(exx, 7, 0); ; return 0; @@ -17696,10 +17556,11 @@ rm -f core conftest.err conftest.$ac_objext \ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuidex" >&5 $as_echo "$pgac_cv__cpuidex" >&6; } -if test x"$pgac_cv__cpuidex" = x"yes"; then + if test x"$pgac_cv__cpuidex" = x"yes"; then $as_echo "#define HAVE__CPUIDEX 1" >>confdefs.h + fi fi # Check for XSAVE intrinsics @@ -18876,7 +18737,7 @@ Use --without-tcl to disable building PL/Tcl." "$LINENO" 5 fi # now that we have TCL_INCLUDE_SPEC, we can check for ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $TCL_INCLUDE_SPEC" ac_fn_c_check_header_mongrel "$LINENO" "tcl.h" "ac_cv_header_tcl_h" "$ac_includes_default" if test "x$ac_cv_header_tcl_h" = xyes; then : @@ -18945,7 +18806,7 @@ fi # check for if test "$with_python" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$python_includespec $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $python_includespec" ac_fn_c_check_header_mongrel "$LINENO" "Python.h" "ac_cv_header_Python_h" "$ac_includes_default" if test "x$ac_cv_header_Python_h" = xyes; then : diff --git a/configure.ac b/configure.ac index c2877e369350e..066e3976c0aac 100644 --- a/configure.ac +++ b/configure.ac @@ -364,14 +364,33 @@ pgac_cc_list="gcc cc" pgac_cxx_list="g++ c++" AC_PROG_CC([$pgac_cc_list]) -AC_PROG_CC_C99() -# Error out if the compiler does not support C99, as the codebase -# relies on that. -if test "$ac_cv_prog_cc_c99" = no; then - AC_MSG_ERROR([C compiler "$CC" does not support C99]) +# Detect option needed for C11 +# loosely modeled after code in later Autoconf versions +AC_MSG_CHECKING([for $CC option to accept ISO C11]) +AC_CACHE_VAL([pgac_cv_prog_cc_c11], +[pgac_cv_prog_cc_c11=no +pgac_save_CC=$CC +for pgac_arg in '' '-std=gnu11' '-std=c11'; do + CC="$pgac_save_CC $pgac_arg" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([[#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L +# error "Compiler does not advertise C11 conformance" +#endif]])], [[pgac_cv_prog_cc_c11=$pgac_arg]]) + test x"$pgac_cv_prog_cc_c11" != x"no" && break +done +CC=$pgac_save_CC]) + +if test x"$pgac_cv_prog_cc_c11" = x"no"; then + AC_MSG_RESULT([unsupported]) + AC_MSG_ERROR([C compiler "$CC" does not support C11]) +elif test x"$pgac_cv_prog_cc_c11" = x""; then + AC_MSG_RESULT([none needed]) +else + AC_MSG_RESULT([$pgac_cv_prog_cc_c11]) + CC="$CC $pgac_cv_prog_cc_c11" fi + AC_PROG_CXX([$pgac_cxx_list]) # Check if it's Intel's compiler, which (usually) pretends to be gcc, @@ -1103,12 +1122,12 @@ if test "$with_libxml" = yes ; then # Note the user could also set XML2_CFLAGS/XML2_LIBS directly for pgac_option in $XML2_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $XML2_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -1152,12 +1171,12 @@ if test "$with_lz4" = yes; then # note that -llz4 will be added by AC_CHECK_LIB below. for pgac_option in $LZ4_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $LZ4_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -1177,12 +1196,12 @@ if test "$with_zstd" = yes; then # note that -lzstd will be added by AC_CHECK_LIB below. for pgac_option in $ZSTD_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $ZSTD_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -1944,7 +1963,7 @@ fi if test "$with_icu" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$ICU_CFLAGS $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $ICU_CFLAGS" # Verify we have ICU's header files AC_CHECK_HEADER(unicode/ucol.h, [], @@ -2044,7 +2063,7 @@ PGAC_HAVE_GCC__ATOMIC_INT32_CAS PGAC_HAVE_GCC__ATOMIC_INT64_CAS -# Check for x86 cpuid instruction +# Check for __get_cpuid() and __cpuid() AC_CACHE_CHECK([for __get_cpuid], [pgac_cv__get_cpuid], [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], [[unsigned int exx[4] = {0, 0, 0, 0}; @@ -2054,8 +2073,21 @@ AC_CACHE_CHECK([for __get_cpuid], [pgac_cv__get_cpuid], [pgac_cv__get_cpuid="no"])]) if test x"$pgac_cv__get_cpuid" = x"yes"; then AC_DEFINE(HAVE__GET_CPUID, 1, [Define to 1 if you have __get_cpuid.]) +else + # __cpuid() + AC_CACHE_CHECK([for __cpuid], [pgac_cv__cpuid], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], + [[unsigned int exx[4] = {0, 0, 0, 0}; + __cpuid(exx, 1); + ]])], + [pgac_cv__cpuid="yes"], + [pgac_cv__cpuid="no"])]) + if test x"$pgac_cv__cpuid" = x"yes"; then + AC_DEFINE(HAVE__CPUID, 1, [Define to 1 if you have __cpuid.]) + fi fi +# Check for __get_cpuid_count() and __cpuidex() in a similar fashion. AC_CACHE_CHECK([for __get_cpuid_count], [pgac_cv__get_cpuid_count], [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], [[unsigned int exx[4] = {0, 0, 0, 0}; @@ -2065,28 +2097,18 @@ AC_CACHE_CHECK([for __get_cpuid_count], [pgac_cv__get_cpuid_count], [pgac_cv__get_cpuid_count="no"])]) if test x"$pgac_cv__get_cpuid_count" = x"yes"; then AC_DEFINE(HAVE__GET_CPUID_COUNT, 1, [Define to 1 if you have __get_cpuid_count.]) -fi - -AC_CACHE_CHECK([for __cpuid], [pgac_cv__cpuid], -[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], - [[unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuid(exx[0], 1); - ]])], - [pgac_cv__cpuid="yes"], - [pgac_cv__cpuid="no"])]) -if test x"$pgac_cv__cpuid" = x"yes"; then - AC_DEFINE(HAVE__CPUID, 1, [Define to 1 if you have __cpuid.]) -fi - -AC_CACHE_CHECK([for __cpuidex], [pgac_cv__cpuidex], -[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], - [[unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuidex(exx[0], 7, 0); - ]])], - [pgac_cv__cpuidex="yes"], - [pgac_cv__cpuidex="no"])]) -if test x"$pgac_cv__cpuidex" = x"yes"; then - AC_DEFINE(HAVE__CPUIDEX, 1, [Define to 1 if you have __cpuidex.]) +else + # __cpuidex() + AC_CACHE_CHECK([for __cpuidex], [pgac_cv__cpuidex], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], + [[unsigned int exx[4] = {0, 0, 0, 0}; + __cpuidex(exx, 7, 0); + ]])], + [pgac_cv__cpuidex="yes"], + [pgac_cv__cpuidex="no"])]) + if test x"$pgac_cv__cpuidex" = x"yes"; then + AC_DEFINE(HAVE__CPUIDEX, 1, [Define to 1 if you have __cpuidex.]) + fi fi # Check for XSAVE intrinsics @@ -2344,7 +2366,7 @@ Use --without-tcl to disable building PL/Tcl.]) fi # now that we have TCL_INCLUDE_SPEC, we can check for ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $TCL_INCLUDE_SPEC" AC_CHECK_HEADER(tcl.h, [], [AC_MSG_ERROR([header file is required for Tcl])]) CPPFLAGS=$ac_save_CPPFLAGS fi @@ -2381,7 +2403,7 @@ fi # check for if test "$with_python" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$python_includespec $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $python_includespec" AC_CHECK_HEADER(Python.h, [], [AC_MSG_ERROR([header file is required for Python])]) CPPFLAGS=$ac_save_CPPFLAGS fi diff --git a/contrib/amcheck/verify_gin.c b/contrib/amcheck/verify_gin.c index c615d950736f6..5c3eb4d0fd4f4 100644 --- a/contrib/amcheck/verify_gin.c +++ b/contrib/amcheck/verify_gin.c @@ -174,7 +174,7 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting buffer = ReadBufferExtended(rel, MAIN_FORKNUM, stack->blkno, RBM_NORMAL, strategy); LockBuffer(buffer, GIN_SHARE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); Assert(GinPageIsData(page)); @@ -434,7 +434,7 @@ gin_check_parent_keys_consistency(Relation rel, buffer = ReadBufferExtended(rel, MAIN_FORKNUM, stack->blkno, RBM_NORMAL, strategy); LockBuffer(buffer, GIN_SHARE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); maxoff = PageGetMaxOffsetNumber(page); rightlink = GinPageGetOpaque(page)->rightlink; diff --git a/contrib/auto_explain/Makefile b/contrib/auto_explain/Makefile index efd127d3cae64..94ab28e7c06b9 100644 --- a/contrib/auto_explain/Makefile +++ b/contrib/auto_explain/Makefile @@ -6,6 +6,8 @@ OBJS = \ auto_explain.o PGFILEDESC = "auto_explain - logging facility for execution plans" +REGRESS = alter_reset + TAP_TESTS = 1 ifdef USE_PGXS diff --git a/contrib/auto_explain/expected/alter_reset.out b/contrib/auto_explain/expected/alter_reset.out new file mode 100644 index 0000000000000..ec355189806ae --- /dev/null +++ b/contrib/auto_explain/expected/alter_reset.out @@ -0,0 +1,19 @@ +-- +-- This tests resetting unknown custom GUCs with reserved prefixes. There's +-- nothing specific to auto_explain; this is just a convenient place to put +-- this test. +-- +SELECT current_database() AS datname \gset +CREATE ROLE regress_ae_role; +ALTER DATABASE :"datname" SET auto_explain.bogus = 1; +ALTER ROLE regress_ae_role SET auto_explain.bogus = 1; +ALTER ROLE regress_ae_role IN DATABASE :"datname" SET auto_explain.bogus = 1; +ALTER SYSTEM SET auto_explain.bogus = 1; +LOAD 'auto_explain'; +WARNING: invalid configuration parameter name "auto_explain.bogus", removing it +DETAIL: "auto_explain" is now a reserved prefix. +ALTER DATABASE :"datname" RESET auto_explain.bogus; +ALTER ROLE regress_ae_role RESET auto_explain.bogus; +ALTER ROLE regress_ae_role IN DATABASE :"datname" RESET auto_explain.bogus; +ALTER SYSTEM RESET auto_explain.bogus; +DROP ROLE regress_ae_role; diff --git a/contrib/auto_explain/meson.build b/contrib/auto_explain/meson.build index 92dc9df6f7cac..a9b45cc235f12 100644 --- a/contrib/auto_explain/meson.build +++ b/contrib/auto_explain/meson.build @@ -20,6 +20,11 @@ tests += { 'name': 'auto_explain', 'sd': meson.current_source_dir(), 'bd': meson.current_build_dir(), + 'regress': { + 'sql': [ + 'alter_reset', + ], + }, 'tap': { 'tests': [ 't/001_auto_explain.pl', diff --git a/contrib/auto_explain/sql/alter_reset.sql b/contrib/auto_explain/sql/alter_reset.sql new file mode 100644 index 0000000000000..bf621454ec24a --- /dev/null +++ b/contrib/auto_explain/sql/alter_reset.sql @@ -0,0 +1,22 @@ +-- +-- This tests resetting unknown custom GUCs with reserved prefixes. There's +-- nothing specific to auto_explain; this is just a convenient place to put +-- this test. +-- + +SELECT current_database() AS datname \gset +CREATE ROLE regress_ae_role; + +ALTER DATABASE :"datname" SET auto_explain.bogus = 1; +ALTER ROLE regress_ae_role SET auto_explain.bogus = 1; +ALTER ROLE regress_ae_role IN DATABASE :"datname" SET auto_explain.bogus = 1; +ALTER SYSTEM SET auto_explain.bogus = 1; + +LOAD 'auto_explain'; + +ALTER DATABASE :"datname" RESET auto_explain.bogus; +ALTER ROLE regress_ae_role RESET auto_explain.bogus; +ALTER ROLE regress_ae_role IN DATABASE :"datname" RESET auto_explain.bogus; +ALTER SYSTEM RESET auto_explain.bogus; + +DROP ROLE regress_ae_role; diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c index 86b15a75f6fb9..9e5f0574fad2e 100644 --- a/contrib/bloom/blvacuum.c +++ b/contrib/bloom/blvacuum.c @@ -192,7 +192,7 @@ blvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (PageIsNew(page) || BloomPageIsDeleted(page)) { diff --git a/contrib/btree_gist/btree_bool.c b/contrib/btree_gist/btree_bool.c index 1127597bb6017..344f059c78fde 100644 --- a/contrib/btree_gist/btree_bool.c +++ b/contrib/btree_gist/btree_bool.c @@ -5,6 +5,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct boolkey diff --git a/contrib/btree_gist/btree_cash.c b/contrib/btree_gist/btree_cash.c index 01c8d5a5f4074..282d5c5731fef 100644 --- a/contrib/btree_gist/btree_cash.c +++ b/contrib/btree_gist/btree_cash.c @@ -7,6 +7,7 @@ #include "btree_utils_num.h" #include "common/int.h" #include "utils/cash.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct diff --git a/contrib/btree_gist/btree_date.c b/contrib/btree_gist/btree_date.c index c008dc61ba5f5..1f1a3f32b56a9 100644 --- a/contrib/btree_gist/btree_date.c +++ b/contrib/btree_gist/btree_date.c @@ -7,6 +7,7 @@ #include "btree_utils_num.h" #include "utils/fmgrprotos.h" #include "utils/date.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct diff --git a/contrib/btree_gist/btree_enum.c b/contrib/btree_gist/btree_enum.c index 83c95c7bb0401..8f1ffff46965f 100644 --- a/contrib/btree_gist/btree_enum.c +++ b/contrib/btree_gist/btree_enum.c @@ -8,6 +8,7 @@ #include "fmgr.h" #include "utils/fmgrprotos.h" #include "utils/fmgroids.h" +#include "utils/rel.h" #include "utils/sortsupport.h" /* enums are really Oids, so we just use the same structure */ @@ -193,8 +194,8 @@ gbt_enum_ssup_cmp(Datum x, Datum y, SortSupport ssup) return DatumGetInt32(CallerFInfoFunctionCall2(enum_cmp, ssup->ssup_extra, InvalidOid, - arg1->lower, - arg2->lower)); + ObjectIdGetDatum(arg1->lower), + ObjectIdGetDatum(arg2->lower))); } Datum diff --git a/contrib/btree_gist/btree_float4.c b/contrib/btree_gist/btree_float4.c index bec026a923a18..d9c859835dacc 100644 --- a/contrib/btree_gist/btree_float4.c +++ b/contrib/btree_gist/btree_float4.c @@ -6,6 +6,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" #include "utils/float.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct float4key diff --git a/contrib/btree_gist/btree_float8.c b/contrib/btree_gist/btree_float8.c index 43e7cde2b6958..567beede178ad 100644 --- a/contrib/btree_gist/btree_float8.c +++ b/contrib/btree_gist/btree_float8.c @@ -6,6 +6,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" #include "utils/float.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct float8key diff --git a/contrib/btree_gist/btree_inet.c b/contrib/btree_gist/btree_inet.c index 8b23853bafbb7..52bf3e2446e95 100644 --- a/contrib/btree_gist/btree_inet.c +++ b/contrib/btree_gist/btree_inet.c @@ -7,6 +7,7 @@ #include "btree_utils_num.h" #include "catalog/pg_type.h" #include "utils/builtins.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct inetkey diff --git a/contrib/btree_gist/btree_int2.c b/contrib/btree_gist/btree_int2.c index 33eccdedd7049..faf456997bbf1 100644 --- a/contrib/btree_gist/btree_int2.c +++ b/contrib/btree_gist/btree_int2.c @@ -6,6 +6,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" #include "common/int.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct int16key diff --git a/contrib/btree_gist/btree_int4.c b/contrib/btree_gist/btree_int4.c index a82cee9a58a8c..0bdb9e58c5601 100644 --- a/contrib/btree_gist/btree_int4.c +++ b/contrib/btree_gist/btree_int4.c @@ -5,6 +5,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" #include "common/int.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct int32key diff --git a/contrib/btree_gist/btree_int8.c b/contrib/btree_gist/btree_int8.c index f0c56e017269a..a9a7b56927847 100644 --- a/contrib/btree_gist/btree_int8.c +++ b/contrib/btree_gist/btree_int8.c @@ -6,6 +6,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" #include "common/int.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct int64key diff --git a/contrib/btree_gist/btree_interval.c b/contrib/btree_gist/btree_interval.c index b5e365c6e09b4..19eefc60cde3f 100644 --- a/contrib/btree_gist/btree_interval.c +++ b/contrib/btree_gist/btree_interval.c @@ -6,6 +6,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" #include "utils/fmgrprotos.h" +#include "utils/rel.h" #include "utils/sortsupport.h" #include "utils/timestamp.h" diff --git a/contrib/btree_gist/btree_macaddr.c b/contrib/btree_gist/btree_macaddr.c index 3b2f26719d5dc..c444a709853a7 100644 --- a/contrib/btree_gist/btree_macaddr.c +++ b/contrib/btree_gist/btree_macaddr.c @@ -7,6 +7,7 @@ #include "btree_utils_num.h" #include "utils/fmgrprotos.h" #include "utils/inet.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct diff --git a/contrib/btree_gist/btree_macaddr8.c b/contrib/btree_gist/btree_macaddr8.c index f2b104617e680..6d9837d90a340 100644 --- a/contrib/btree_gist/btree_macaddr8.c +++ b/contrib/btree_gist/btree_macaddr8.c @@ -7,6 +7,7 @@ #include "btree_utils_num.h" #include "utils/fmgrprotos.h" #include "utils/inet.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct diff --git a/contrib/btree_gist/btree_numeric.c b/contrib/btree_gist/btree_numeric.c index a39c05d9da1cf..052f27b07949e 100644 --- a/contrib/btree_gist/btree_numeric.c +++ b/contrib/btree_gist/btree_numeric.c @@ -192,7 +192,7 @@ gbt_numeric_penalty(PG_FUNCTION_ARGS) *result = 0.0; - if (DirectFunctionCall2(numeric_gt, NumericGetDatum(ds), NumericGetDatum(nul))) + if (DatumGetBool(DirectFunctionCall2(numeric_gt, NumericGetDatum(ds), NumericGetDatum(nul)))) { *result += FLT_MIN; os = DatumGetNumeric(DirectFunctionCall2(numeric_div, diff --git a/contrib/btree_gist/btree_oid.c b/contrib/btree_gist/btree_oid.c index ffe0d7983e40f..b8f2f661076c6 100644 --- a/contrib/btree_gist/btree_oid.c +++ b/contrib/btree_gist/btree_oid.c @@ -5,6 +5,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct diff --git a/contrib/btree_gist/btree_time.c b/contrib/btree_gist/btree_time.c index 1dba95057ba9f..9483846c4738b 100644 --- a/contrib/btree_gist/btree_time.c +++ b/contrib/btree_gist/btree_time.c @@ -7,6 +7,7 @@ #include "btree_utils_num.h" #include "utils/fmgrprotos.h" #include "utils/date.h" +#include "utils/rel.h" #include "utils/sortsupport.h" #include "utils/timestamp.h" @@ -31,13 +32,6 @@ PG_FUNCTION_INFO_V1(gbt_time_sortsupport); PG_FUNCTION_INFO_V1(gbt_timetz_sortsupport); -#ifdef USE_FLOAT8_BYVAL -#define TimeADTGetDatumFast(X) TimeADTGetDatum(X) -#else -#define TimeADTGetDatumFast(X) PointerGetDatum(&(X)) -#endif - - static bool gbt_timegt(const void *a, const void *b, FmgrInfo *flinfo) { @@ -45,8 +39,8 @@ gbt_timegt(const void *a, const void *b, FmgrInfo *flinfo) const TimeADT *bb = (const TimeADT *) b; return DatumGetBool(DirectFunctionCall2(time_gt, - TimeADTGetDatumFast(*aa), - TimeADTGetDatumFast(*bb))); + TimeADTGetDatum(*aa), + TimeADTGetDatum(*bb))); } static bool @@ -56,8 +50,8 @@ gbt_timege(const void *a, const void *b, FmgrInfo *flinfo) const TimeADT *bb = (const TimeADT *) b; return DatumGetBool(DirectFunctionCall2(time_ge, - TimeADTGetDatumFast(*aa), - TimeADTGetDatumFast(*bb))); + TimeADTGetDatum(*aa), + TimeADTGetDatum(*bb))); } static bool @@ -67,8 +61,8 @@ gbt_timeeq(const void *a, const void *b, FmgrInfo *flinfo) const TimeADT *bb = (const TimeADT *) b; return DatumGetBool(DirectFunctionCall2(time_eq, - TimeADTGetDatumFast(*aa), - TimeADTGetDatumFast(*bb))); + TimeADTGetDatum(*aa), + TimeADTGetDatum(*bb))); } static bool @@ -78,8 +72,8 @@ gbt_timele(const void *a, const void *b, FmgrInfo *flinfo) const TimeADT *bb = (const TimeADT *) b; return DatumGetBool(DirectFunctionCall2(time_le, - TimeADTGetDatumFast(*aa), - TimeADTGetDatumFast(*bb))); + TimeADTGetDatum(*aa), + TimeADTGetDatum(*bb))); } static bool @@ -89,8 +83,8 @@ gbt_timelt(const void *a, const void *b, FmgrInfo *flinfo) const TimeADT *bb = (const TimeADT *) b; return DatumGetBool(DirectFunctionCall2(time_lt, - TimeADTGetDatumFast(*aa), - TimeADTGetDatumFast(*bb))); + TimeADTGetDatum(*aa), + TimeADTGetDatum(*bb))); } static int @@ -100,9 +94,9 @@ gbt_timekey_cmp(const void *a, const void *b, FmgrInfo *flinfo) timeKEY *ib = (timeKEY *) (((const Nsrt *) b)->t); int res; - res = DatumGetInt32(DirectFunctionCall2(time_cmp, TimeADTGetDatumFast(ia->lower), TimeADTGetDatumFast(ib->lower))); + res = DatumGetInt32(DirectFunctionCall2(time_cmp, TimeADTGetDatum(ia->lower), TimeADTGetDatum(ib->lower))); if (res == 0) - return DatumGetInt32(DirectFunctionCall2(time_cmp, TimeADTGetDatumFast(ia->upper), TimeADTGetDatumFast(ib->upper))); + return DatumGetInt32(DirectFunctionCall2(time_cmp, TimeADTGetDatum(ia->upper), TimeADTGetDatum(ib->upper))); return res; } @@ -115,8 +109,8 @@ gbt_time_dist(const void *a, const void *b, FmgrInfo *flinfo) Interval *i; i = DatumGetIntervalP(DirectFunctionCall2(time_mi_time, - TimeADTGetDatumFast(*aa), - TimeADTGetDatumFast(*bb))); + TimeADTGetDatum(*aa), + TimeADTGetDatum(*bb))); return fabs(INTERVAL_TO_SEC(i)); } @@ -279,14 +273,14 @@ gbt_time_penalty(PG_FUNCTION_ARGS) double res2; intr = DatumGetIntervalP(DirectFunctionCall2(time_mi_time, - TimeADTGetDatumFast(newentry->upper), - TimeADTGetDatumFast(origentry->upper))); + TimeADTGetDatum(newentry->upper), + TimeADTGetDatum(origentry->upper))); res = INTERVAL_TO_SEC(intr); res = Max(res, 0); intr = DatumGetIntervalP(DirectFunctionCall2(time_mi_time, - TimeADTGetDatumFast(origentry->lower), - TimeADTGetDatumFast(newentry->lower))); + TimeADTGetDatum(origentry->lower), + TimeADTGetDatum(newentry->lower))); res2 = INTERVAL_TO_SEC(intr); res2 = Max(res2, 0); @@ -297,8 +291,8 @@ gbt_time_penalty(PG_FUNCTION_ARGS) if (res > 0) { intr = DatumGetIntervalP(DirectFunctionCall2(time_mi_time, - TimeADTGetDatumFast(origentry->upper), - TimeADTGetDatumFast(origentry->lower))); + TimeADTGetDatum(origentry->upper), + TimeADTGetDatum(origentry->lower))); *result += FLT_MIN; *result += (float) (res / (res + INTERVAL_TO_SEC(intr))); *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); @@ -334,8 +328,8 @@ gbt_timekey_ssup_cmp(Datum x, Datum y, SortSupport ssup) /* for leaf items we expect lower == upper, so only compare lower */ return DatumGetInt32(DirectFunctionCall2(time_cmp, - TimeADTGetDatumFast(arg1->lower), - TimeADTGetDatumFast(arg2->lower))); + TimeADTGetDatum(arg1->lower), + TimeADTGetDatum(arg2->lower))); } Datum diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c index eb899c4d21363..b7bbae2f4d6fb 100644 --- a/contrib/btree_gist/btree_ts.c +++ b/contrib/btree_gist/btree_ts.c @@ -10,6 +10,7 @@ #include "utils/fmgrprotos.h" #include "utils/timestamp.h" #include "utils/float.h" +#include "utils/rel.h" #include "utils/sortsupport.h" typedef struct @@ -33,13 +34,6 @@ PG_FUNCTION_INFO_V1(gbt_ts_same); PG_FUNCTION_INFO_V1(gbt_ts_sortsupport); -#ifdef USE_FLOAT8_BYVAL -#define TimestampGetDatumFast(X) TimestampGetDatum(X) -#else -#define TimestampGetDatumFast(X) PointerGetDatum(&(X)) -#endif - - /* define for comparison */ static bool @@ -49,8 +43,8 @@ gbt_tsgt(const void *a, const void *b, FmgrInfo *flinfo) const Timestamp *bb = (const Timestamp *) b; return DatumGetBool(DirectFunctionCall2(timestamp_gt, - TimestampGetDatumFast(*aa), - TimestampGetDatumFast(*bb))); + TimestampGetDatum(*aa), + TimestampGetDatum(*bb))); } static bool @@ -60,8 +54,8 @@ gbt_tsge(const void *a, const void *b, FmgrInfo *flinfo) const Timestamp *bb = (const Timestamp *) b; return DatumGetBool(DirectFunctionCall2(timestamp_ge, - TimestampGetDatumFast(*aa), - TimestampGetDatumFast(*bb))); + TimestampGetDatum(*aa), + TimestampGetDatum(*bb))); } static bool @@ -71,8 +65,8 @@ gbt_tseq(const void *a, const void *b, FmgrInfo *flinfo) const Timestamp *bb = (const Timestamp *) b; return DatumGetBool(DirectFunctionCall2(timestamp_eq, - TimestampGetDatumFast(*aa), - TimestampGetDatumFast(*bb))); + TimestampGetDatum(*aa), + TimestampGetDatum(*bb))); } static bool @@ -82,8 +76,8 @@ gbt_tsle(const void *a, const void *b, FmgrInfo *flinfo) const Timestamp *bb = (const Timestamp *) b; return DatumGetBool(DirectFunctionCall2(timestamp_le, - TimestampGetDatumFast(*aa), - TimestampGetDatumFast(*bb))); + TimestampGetDatum(*aa), + TimestampGetDatum(*bb))); } static bool @@ -93,8 +87,8 @@ gbt_tslt(const void *a, const void *b, FmgrInfo *flinfo) const Timestamp *bb = (const Timestamp *) b; return DatumGetBool(DirectFunctionCall2(timestamp_lt, - TimestampGetDatumFast(*aa), - TimestampGetDatumFast(*bb))); + TimestampGetDatum(*aa), + TimestampGetDatum(*bb))); } static int @@ -104,9 +98,9 @@ gbt_tskey_cmp(const void *a, const void *b, FmgrInfo *flinfo) tsKEY *ib = (tsKEY *) (((const Nsrt *) b)->t); int res; - res = DatumGetInt32(DirectFunctionCall2(timestamp_cmp, TimestampGetDatumFast(ia->lower), TimestampGetDatumFast(ib->lower))); + res = DatumGetInt32(DirectFunctionCall2(timestamp_cmp, TimestampGetDatum(ia->lower), TimestampGetDatum(ib->lower))); if (res == 0) - return DatumGetInt32(DirectFunctionCall2(timestamp_cmp, TimestampGetDatumFast(ia->upper), TimestampGetDatumFast(ib->upper))); + return DatumGetInt32(DirectFunctionCall2(timestamp_cmp, TimestampGetDatum(ia->upper), TimestampGetDatum(ib->upper))); return res; } @@ -122,8 +116,8 @@ gbt_ts_dist(const void *a, const void *b, FmgrInfo *flinfo) return get_float8_infinity(); i = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi, - TimestampGetDatumFast(*aa), - TimestampGetDatumFast(*bb))); + TimestampGetDatum(*aa), + TimestampGetDatum(*bb))); return fabs(INTERVAL_TO_SEC(i)); } @@ -404,8 +398,8 @@ gbt_ts_ssup_cmp(Datum x, Datum y, SortSupport ssup) /* for leaf items we expect lower == upper, so only compare lower */ return DatumGetInt32(DirectFunctionCall2(timestamp_cmp, - TimestampGetDatumFast(arg1->lower), - TimestampGetDatumFast(arg2->lower))); + TimestampGetDatum(arg1->lower), + TimestampGetDatum(arg2->lower))); } Datum diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c index 346ee837d75f4..446fa930b92c4 100644 --- a/contrib/btree_gist/btree_utils_num.c +++ b/contrib/btree_gist/btree_utils_num.c @@ -119,38 +119,38 @@ gbt_num_fetch(GISTENTRY *entry, const gbtree_ninfo *tinfo) switch (tinfo->t) { case gbt_t_bool: - datum = BoolGetDatum(*(bool *) entry->key); + datum = BoolGetDatum(*(bool *) DatumGetPointer(entry->key)); break; case gbt_t_int2: - datum = Int16GetDatum(*(int16 *) entry->key); + datum = Int16GetDatum(*(int16 *) DatumGetPointer(entry->key)); break; case gbt_t_int4: - datum = Int32GetDatum(*(int32 *) entry->key); + datum = Int32GetDatum(*(int32 *) DatumGetPointer(entry->key)); break; case gbt_t_int8: - datum = Int64GetDatum(*(int64 *) entry->key); + datum = Int64GetDatum(*(int64 *) DatumGetPointer(entry->key)); break; case gbt_t_oid: case gbt_t_enum: - datum = ObjectIdGetDatum(*(Oid *) entry->key); + datum = ObjectIdGetDatum(*(Oid *) DatumGetPointer(entry->key)); break; case gbt_t_float4: - datum = Float4GetDatum(*(float4 *) entry->key); + datum = Float4GetDatum(*(float4 *) DatumGetPointer(entry->key)); break; case gbt_t_float8: - datum = Float8GetDatum(*(float8 *) entry->key); + datum = Float8GetDatum(*(float8 *) DatumGetPointer(entry->key)); break; case gbt_t_date: - datum = DateADTGetDatum(*(DateADT *) entry->key); + datum = DateADTGetDatum(*(DateADT *) DatumGetPointer(entry->key)); break; case gbt_t_time: - datum = TimeADTGetDatum(*(TimeADT *) entry->key); + datum = TimeADTGetDatum(*(TimeADT *) DatumGetPointer(entry->key)); break; case gbt_t_ts: - datum = TimestampGetDatum(*(Timestamp *) entry->key); + datum = TimestampGetDatum(*(Timestamp *) DatumGetPointer(entry->key)); break; case gbt_t_cash: - datum = CashGetDatum(*(Cash *) entry->key); + datum = CashGetDatum(*(Cash *) DatumGetPointer(entry->key)); break; default: datum = entry->key; diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c index d9df2356cd1e4..fb466e5aa32ba 100644 --- a/contrib/btree_gist/btree_utils_var.c +++ b/contrib/btree_gist/btree_utils_var.c @@ -11,6 +11,7 @@ #include "btree_utils_var.h" #include "mb/pg_wchar.h" #include "utils/rel.h" +#include "varatt.h" /* used for key sorting */ typedef struct diff --git a/contrib/btree_gist/btree_uuid.c b/contrib/btree_gist/btree_uuid.c index 23a307a6a71d5..07f304f39f14c 100644 --- a/contrib/btree_gist/btree_uuid.c +++ b/contrib/btree_gist/btree_uuid.c @@ -6,6 +6,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" #include "port/pg_bswap.h" +#include "utils/rel.h" #include "utils/sortsupport.h" #include "utils/uuid.h" diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index f98805fb5f735..0cf4c27f2e967 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -2643,7 +2643,7 @@ dblink_connstr_has_required_scram_options(const char *connstr) PQconninfoFree(options); } - has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort->has_scram_keys; + has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort != NULL && MyProcPort->has_scram_keys; return (has_scram_keys && has_require_auth); } @@ -2676,7 +2676,7 @@ dblink_security_check(PGconn *conn, const char *connname, const char *connstr) * only added if UseScramPassthrough is set, and the user is not allowed * to add the SCRAM keys on fdw and user mapping options. */ - if (MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr)) return; #ifdef ENABLE_GSS @@ -2749,7 +2749,7 @@ dblink_connstr_check(const char *connstr) if (dblink_connstr_has_pw(connstr)) return; - if (MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr)) return; #ifdef ENABLE_GSS @@ -2896,7 +2896,7 @@ get_connect_string(const char *servername) * the user overwrites these options we can ereport on * dblink_connstr_check and dblink_security_check. */ - if (MyProcPort->has_scram_keys && UseScramPassthrough(foreign_server, user_mapping)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && UseScramPassthrough(foreign_server, user_mapping)) appendSCRAMKeysInfo(&buf); foreach(cell, fdw->options) diff --git a/contrib/hstore/hstore_gin.c b/contrib/hstore/hstore_gin.c index 766c00bb6a735..2e5fa115924ba 100644 --- a/contrib/hstore/hstore_gin.c +++ b/contrib/hstore/hstore_gin.c @@ -127,7 +127,7 @@ gin_extract_hstore_query(PG_FUNCTION_ARGS) /* Nulls in the array are ignored, cf hstoreArrayToPairs */ if (key_nulls[i]) continue; - item = makeitem(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ, KEYFLAG); + item = makeitem(VARDATA(DatumGetPointer(key_datums[i])), VARSIZE(DatumGetPointer(key_datums[i])) - VARHDRSZ, KEYFLAG); entries[j++] = PointerGetDatum(item); } diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c index a3b08af385016..69515dc3d3fbd 100644 --- a/contrib/hstore/hstore_gist.c +++ b/contrib/hstore/hstore_gist.c @@ -576,7 +576,7 @@ ghstore_consistent(PG_FUNCTION_ARGS) if (key_nulls[i]) continue; - crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ); + crc = crc32_sz(VARDATA(DatumGetPointer(key_datums[i])), VARSIZE(DatumGetPointer(key_datums[i])) - VARHDRSZ); if (!(GETBIT(sign, HASHVAL(crc, siglen)))) res = false; } @@ -599,7 +599,7 @@ ghstore_consistent(PG_FUNCTION_ARGS) if (key_nulls[i]) continue; - crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ); + crc = crc32_sz(VARDATA(DatumGetPointer(key_datums[i])), VARSIZE(DatumGetPointer(key_datums[i])) - VARHDRSZ); if (GETBIT(sign, HASHVAL(crc, siglen))) res = true; } diff --git a/contrib/hstore/hstore_io.c b/contrib/hstore/hstore_io.c index 4f867e4bd1f1c..9c53877c4a582 100644 --- a/contrib/hstore/hstore_io.c +++ b/contrib/hstore/hstore_io.c @@ -684,22 +684,22 @@ hstore_from_arrays(PG_FUNCTION_ARGS) if (!value_nulls || value_nulls[i]) { - pairs[i].key = VARDATA(key_datums[i]); + pairs[i].key = VARDATA(DatumGetPointer(key_datums[i])); pairs[i].val = NULL; pairs[i].keylen = - hstoreCheckKeyLen(VARSIZE(key_datums[i]) - VARHDRSZ); + hstoreCheckKeyLen(VARSIZE(DatumGetPointer(key_datums[i])) - VARHDRSZ); pairs[i].vallen = 4; pairs[i].isnull = true; pairs[i].needfree = false; } else { - pairs[i].key = VARDATA(key_datums[i]); - pairs[i].val = VARDATA(value_datums[i]); + pairs[i].key = VARDATA(DatumGetPointer(key_datums[i])); + pairs[i].val = VARDATA(DatumGetPointer(value_datums[i])); pairs[i].keylen = - hstoreCheckKeyLen(VARSIZE(key_datums[i]) - VARHDRSZ); + hstoreCheckKeyLen(VARSIZE(DatumGetPointer(key_datums[i])) - VARHDRSZ); pairs[i].vallen = - hstoreCheckValLen(VARSIZE(value_datums[i]) - VARHDRSZ); + hstoreCheckValLen(VARSIZE(DatumGetPointer(value_datums[i])) - VARHDRSZ); pairs[i].isnull = false; pairs[i].needfree = false; } @@ -778,22 +778,22 @@ hstore_from_array(PG_FUNCTION_ARGS) if (in_nulls[i * 2 + 1]) { - pairs[i].key = VARDATA(in_datums[i * 2]); + pairs[i].key = VARDATA(DatumGetPointer(in_datums[i * 2])); pairs[i].val = NULL; pairs[i].keylen = - hstoreCheckKeyLen(VARSIZE(in_datums[i * 2]) - VARHDRSZ); + hstoreCheckKeyLen(VARSIZE(DatumGetPointer(in_datums[i * 2])) - VARHDRSZ); pairs[i].vallen = 4; pairs[i].isnull = true; pairs[i].needfree = false; } else { - pairs[i].key = VARDATA(in_datums[i * 2]); - pairs[i].val = VARDATA(in_datums[i * 2 + 1]); + pairs[i].key = VARDATA(DatumGetPointer(in_datums[i * 2])); + pairs[i].val = VARDATA(DatumGetPointer(in_datums[i * 2 + 1])); pairs[i].keylen = - hstoreCheckKeyLen(VARSIZE(in_datums[i * 2]) - VARHDRSZ); + hstoreCheckKeyLen(VARSIZE(DatumGetPointer(in_datums[i * 2])) - VARHDRSZ); pairs[i].vallen = - hstoreCheckValLen(VARSIZE(in_datums[i * 2 + 1]) - VARHDRSZ); + hstoreCheckValLen(VARSIZE(DatumGetPointer(in_datums[i * 2 + 1])) - VARHDRSZ); pairs[i].isnull = false; pairs[i].needfree = false; } diff --git a/contrib/hstore/hstore_op.c b/contrib/hstore/hstore_op.c index 5e57eceffc817..bcba75f925808 100644 --- a/contrib/hstore/hstore_op.c +++ b/contrib/hstore/hstore_op.c @@ -107,8 +107,8 @@ hstoreArrayToPairs(ArrayType *a, int *npairs) { if (!key_nulls[i]) { - key_pairs[j].key = VARDATA(key_datums[i]); - key_pairs[j].keylen = VARSIZE(key_datums[i]) - VARHDRSZ; + key_pairs[j].key = VARDATA(DatumGetPointer(key_datums[i])); + key_pairs[j].keylen = VARSIZE(DatumGetPointer(key_datums[i])) - VARHDRSZ; key_pairs[j].val = NULL; key_pairs[j].vallen = 0; key_pairs[j].needfree = 0; diff --git a/contrib/intarray/_int_op.c b/contrib/intarray/_int_op.c index ba6d0a99995ed..a706e353c6f94 100644 --- a/contrib/intarray/_int_op.c +++ b/contrib/intarray/_int_op.c @@ -108,7 +108,7 @@ _int_overlap(PG_FUNCTION_ARGS) CHECKARRVALID(a); CHECKARRVALID(b); if (ARRISEMPTY(a) || ARRISEMPTY(b)) - return false; + PG_RETURN_BOOL(false); SORT(a); SORT(b); diff --git a/contrib/intarray/_int_selfuncs.c b/contrib/intarray/_int_selfuncs.c index 6c3b7ace146aa..9bf6448624254 100644 --- a/contrib/intarray/_int_selfuncs.c +++ b/contrib/intarray/_int_selfuncs.c @@ -177,7 +177,7 @@ _int_matchsel(PG_FUNCTION_ARGS) if (query->size == 0) { ReleaseVariableStats(vardata); - return (Selectivity) 0.0; + PG_RETURN_FLOAT8(0.0); } /* diff --git a/contrib/ltree/_ltree_gist.c b/contrib/ltree/_ltree_gist.c index 286ad24fbe847..30d516e60bc21 100644 --- a/contrib/ltree/_ltree_gist.c +++ b/contrib/ltree/_ltree_gist.c @@ -84,7 +84,7 @@ _ltree_compress(PG_FUNCTION_ARGS) entry->rel, entry->page, entry->offset, false); } - else if (!LTG_ISALLTRUE(entry->key)) + else if (!LTG_ISALLTRUE(DatumGetPointer(entry->key))) { int32 i; ltree_gist *key; diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c index 294821231fc3b..4e2e8891cddfd 100644 --- a/contrib/pageinspect/btreefuncs.c +++ b/contrib/pageinspect/btreefuncs.c @@ -506,7 +506,7 @@ bt_page_print_tuples(ua_page_items *uargs) j = 0; memset(nulls, 0, sizeof(nulls)); - values[j++] = DatumGetInt16(offset); + values[j++] = Int16GetDatum(offset); values[j++] = ItemPointerGetDatum(&itup->t_tid); values[j++] = Int32GetDatum((int) IndexTupleSize(itup)); values[j++] = BoolGetDatum(IndexTupleHasNulls(itup)); diff --git a/contrib/pageinspect/gistfuncs.c b/contrib/pageinspect/gistfuncs.c index 7b16e2a1ef33c..1b299374890b0 100644 --- a/contrib/pageinspect/gistfuncs.c +++ b/contrib/pageinspect/gistfuncs.c @@ -174,7 +174,7 @@ gist_page_items_bytea(PG_FUNCTION_ARGS) memset(nulls, 0, sizeof(nulls)); - values[0] = DatumGetInt16(offset); + values[0] = Int16GetDatum(offset); values[1] = ItemPointerGetDatum(&itup->t_tid); values[2] = Int32GetDatum((int) IndexTupleSize(itup)); @@ -281,7 +281,7 @@ gist_page_items(PG_FUNCTION_ARGS) memset(nulls, 0, sizeof(nulls)); - values[0] = DatumGetInt16(offset); + values[0] = Int16GetDatum(offset); values[1] = ItemPointerGetDatum(&itup->t_tid); values[2] = Int32GetDatum((int) IndexTupleSize(itup)); values[3] = BoolGetDatum(ItemIdIsDead(id)); diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c index 377ae30d1fa71..c13f07655c441 100644 --- a/contrib/pageinspect/heapfuncs.c +++ b/contrib/pageinspect/heapfuncs.c @@ -256,7 +256,7 @@ heap_page_items(PG_FUNCTION_ARGS) nulls[11] = true; if (tuphdr->t_infomask & HEAP_HASOID_OLD) - values[12] = HeapTupleHeaderGetOidOld(tuphdr); + values[12] = ObjectIdGetDatum(HeapTupleHeaderGetOidOld(tuphdr)); else nulls[12] = true; diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c index ae0291e6e96df..3df04c98959e1 100644 --- a/contrib/pg_buffercache/pg_buffercache_pages.c +++ b/contrib/pg_buffercache/pg_buffercache_pages.c @@ -194,6 +194,8 @@ pg_buffercache_pages(PG_FUNCTION_ARGS) BufferDesc *bufHdr; uint32 buf_state; + CHECK_FOR_INTERRUPTS(); + bufHdr = GetBufferDescriptor(i); /* Lock each buffer header before inspecting. */ buf_state = LockBufHdr(bufHdr); @@ -560,6 +562,8 @@ pg_buffercache_summary(PG_FUNCTION_ARGS) BufferDesc *bufHdr; uint32 buf_state; + CHECK_FOR_INTERRUPTS(); + /* * This function summarizes the state of all headers. Locking the * buffer headers wouldn't provide an improved result as the state of @@ -620,6 +624,8 @@ pg_buffercache_usage_counts(PG_FUNCTION_ARGS) uint32 buf_state = pg_atomic_read_u32(&bufHdr->state); int usage_count; + CHECK_FOR_INTERRUPTS(); + usage_count = BUF_STATE_GET_USAGECOUNT(buf_state); usage_counts[usage_count]++; diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c index c01b9c7e6a4d6..8b68dafc2611c 100644 --- a/contrib/pg_prewarm/autoprewarm.c +++ b/contrib/pg_prewarm/autoprewarm.c @@ -370,6 +370,15 @@ apw_load_buffers(void) apw_state->prewarm_start_idx = apw_state->prewarm_stop_idx = 0; apw_state->prewarmed_blocks = 0; + /* Don't prewarm more than we can fit. */ + if (num_elements > NBuffers) + { + num_elements = NBuffers; + ereport(LOG, + (errmsg("autoprewarm capping prewarmed blocks to %d (shared_buffers size)", + NBuffers))); + } + /* Get the info position of the first block of the next database. */ while (apw_state->prewarm_start_idx < num_elements) { @@ -410,10 +419,6 @@ apw_load_buffers(void) apw_state->database = current_db; Assert(apw_state->prewarm_start_idx < apw_state->prewarm_stop_idx); - /* If we've run out of free buffers, don't launch another worker. */ - if (!have_free_buffer()) - break; - /* * Likewise, don't launch if we've already been told to shut down. * (The launch would fail anyway, but we might as well skip it.) @@ -462,12 +467,6 @@ apw_read_stream_next_block(ReadStream *stream, { BlockInfoRecord blk = p->block_info[p->pos]; - if (!have_free_buffer()) - { - p->pos = apw_state->prewarm_stop_idx; - return InvalidBlockNumber; - } - if (blk.tablespace != p->tablespace) return InvalidBlockNumber; @@ -523,10 +522,10 @@ autoprewarm_database_main(Datum main_arg) blk = block_info[i]; /* - * Loop until we run out of blocks to prewarm or until we run out of free + * Loop until we run out of blocks to prewarm or until we run out of * buffers. */ - while (i < apw_state->prewarm_stop_idx && have_free_buffer()) + while (i < apw_state->prewarm_stop_idx) { Oid tablespace = blk.tablespace; RelFileNumber filenumber = blk.filenumber; @@ -568,14 +567,13 @@ autoprewarm_database_main(Datum main_arg) /* * We have a relation; now let's loop until we find a valid fork of - * the relation or we run out of free buffers. Once we've read from - * all valid forks or run out of options, we'll close the relation and + * the relation or we run out of buffers. Once we've read from all + * valid forks or run out of options, we'll close the relation and * move on. */ while (i < apw_state->prewarm_stop_idx && blk.tablespace == tablespace && - blk.filenumber == filenumber && - have_free_buffer()) + blk.filenumber == filenumber) { ForkNumber forknum = blk.forknum; BlockNumber nblocks; @@ -864,7 +862,7 @@ apw_init_state(void *ptr) { AutoPrewarmSharedState *state = (AutoPrewarmSharedState *) ptr; - LWLockInitialize(&state->lock, LWLockNewTrancheId()); + LWLockInitialize(&state->lock, LWLockNewTrancheId("autoprewarm")); state->bgworker_pid = InvalidPid; state->pid_using_dumpfile = InvalidPid; } @@ -883,7 +881,6 @@ apw_init_shmem(void) sizeof(AutoPrewarmSharedState), apw_init_state, &found); - LWLockRegisterTranche(apw_state->lock.tranche, "autoprewarm"); return found; } diff --git a/contrib/pg_stat_statements/Makefile b/contrib/pg_stat_statements/Makefile index b2bd8794d2a14..fe0478ac55266 100644 --- a/contrib/pg_stat_statements/Makefile +++ b/contrib/pg_stat_statements/Makefile @@ -7,6 +7,7 @@ OBJS = \ EXTENSION = pg_stat_statements DATA = pg_stat_statements--1.4.sql \ + pg_stat_statements--1.12--1.13.sql \ pg_stat_statements--1.11--1.12.sql pg_stat_statements--1.10--1.11.sql \ pg_stat_statements--1.9--1.10.sql pg_stat_statements--1.8--1.9.sql \ pg_stat_statements--1.7--1.8.sql pg_stat_statements--1.6--1.7.sql \ @@ -20,7 +21,7 @@ LDFLAGS_SL += $(filter -lm, $(LIBS)) REGRESS_OPTS = --temp-config $(top_srcdir)/contrib/pg_stat_statements/pg_stat_statements.conf REGRESS = select dml cursors utility level_tracking planning \ user_activity wal entry_timestamp privileges extended \ - parallel cleanup oldextversions squashing + parallel plancache cleanup oldextversions squashing # Disabled because these tests require "shared_preload_libraries=pg_stat_statements", # which typical installcheck users do not have (e.g. buildfarm clients). NO_INSTALLCHECK = 1 diff --git a/contrib/pg_stat_statements/expected/oldextversions.out b/contrib/pg_stat_statements/expected/oldextversions.out index de679b19711ab..726383a99d7c1 100644 --- a/contrib/pg_stat_statements/expected/oldextversions.out +++ b/contrib/pg_stat_statements/expected/oldextversions.out @@ -407,4 +407,71 @@ SELECT count(*) > 0 AS has_data FROM pg_stat_statements; t (1 row) +-- New functions and views for pg_stat_statements in 1.13 +AlTER EXTENSION pg_stat_statements UPDATE TO '1.13'; +\d pg_stat_statements + View "public.pg_stat_statements" + Column | Type | Collation | Nullable | Default +----------------------------+--------------------------+-----------+----------+--------- + userid | oid | | | + dbid | oid | | | + toplevel | boolean | | | + queryid | bigint | | | + query | text | | | + plans | bigint | | | + total_plan_time | double precision | | | + min_plan_time | double precision | | | + max_plan_time | double precision | | | + mean_plan_time | double precision | | | + stddev_plan_time | double precision | | | + calls | bigint | | | + total_exec_time | double precision | | | + min_exec_time | double precision | | | + max_exec_time | double precision | | | + mean_exec_time | double precision | | | + stddev_exec_time | double precision | | | + rows | bigint | | | + shared_blks_hit | bigint | | | + shared_blks_read | bigint | | | + shared_blks_dirtied | bigint | | | + shared_blks_written | bigint | | | + local_blks_hit | bigint | | | + local_blks_read | bigint | | | + local_blks_dirtied | bigint | | | + local_blks_written | bigint | | | + temp_blks_read | bigint | | | + temp_blks_written | bigint | | | + shared_blk_read_time | double precision | | | + shared_blk_write_time | double precision | | | + local_blk_read_time | double precision | | | + local_blk_write_time | double precision | | | + temp_blk_read_time | double precision | | | + temp_blk_write_time | double precision | | | + wal_records | bigint | | | + wal_fpi | bigint | | | + wal_bytes | numeric | | | + wal_buffers_full | bigint | | | + jit_functions | bigint | | | + jit_generation_time | double precision | | | + jit_inlining_count | bigint | | | + jit_inlining_time | double precision | | | + jit_optimization_count | bigint | | | + jit_optimization_time | double precision | | | + jit_emission_count | bigint | | | + jit_emission_time | double precision | | | + jit_deform_count | bigint | | | + jit_deform_time | double precision | | | + parallel_workers_to_launch | bigint | | | + parallel_workers_launched | bigint | | | + generic_plan_calls | bigint | | | + custom_plan_calls | bigint | | | + stats_since | timestamp with time zone | | | + minmax_stats_since | timestamp with time zone | | | + +SELECT count(*) > 0 AS has_data FROM pg_stat_statements; + has_data +---------- + t +(1 row) + DROP EXTENSION pg_stat_statements; diff --git a/contrib/pg_stat_statements/expected/plancache.out b/contrib/pg_stat_statements/expected/plancache.out new file mode 100644 index 0000000000000..e152de9f55130 --- /dev/null +++ b/contrib/pg_stat_statements/expected/plancache.out @@ -0,0 +1,224 @@ +-- +-- Tests with plan cache +-- +-- Setup +CREATE OR REPLACE FUNCTION select_one_func(int) RETURNS VOID AS $$ +DECLARE + ret INT; +BEGIN + SELECT $1 INTO ret; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE PROCEDURE select_one_proc(int) AS $$ +DECLARE + ret INT; +BEGIN + SELECT $1 INTO ret; +END; +$$ LANGUAGE plpgsql; +-- Prepared statements +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + +PREPARE p1 AS SELECT $1 AS a; +SET plan_cache_mode TO force_generic_plan; +EXECUTE p1(1); + a +--- + 1 +(1 row) + +SET plan_cache_mode TO force_custom_plan; +EXECUTE p1(1); + a +--- + 1 +(1 row) + +SELECT calls, generic_plan_calls, custom_plan_calls, query FROM pg_stat_statements + ORDER BY query COLLATE "C"; + calls | generic_plan_calls | custom_plan_calls | query +-------+--------------------+-------------------+---------------------------------------------------- + 2 | 1 | 1 | PREPARE p1 AS SELECT $1 AS a + 1 | 0 | 0 | SELECT pg_stat_statements_reset() IS NOT NULL AS t + 2 | 0 | 0 | SET plan_cache_mode TO $1 +(3 rows) + +DEALLOCATE p1; +-- Extended query protocol +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + +SELECT $1 AS a \parse p1 +SET plan_cache_mode TO force_generic_plan; +\bind_named p1 1 +; + a +--- + 1 +(1 row) + +SET plan_cache_mode TO force_custom_plan; +\bind_named p1 1 +; + a +--- + 1 +(1 row) + +SELECT calls, generic_plan_calls, custom_plan_calls, query FROM pg_stat_statements + ORDER BY query COLLATE "C"; + calls | generic_plan_calls | custom_plan_calls | query +-------+--------------------+-------------------+---------------------------------------------------- + 2 | 1 | 1 | SELECT $1 AS a + 1 | 0 | 0 | SELECT pg_stat_statements_reset() IS NOT NULL AS t + 2 | 0 | 0 | SET plan_cache_mode TO $1 +(3 rows) + +\close_prepared p1 +-- EXPLAIN [ANALYZE] EXECUTE +SET pg_stat_statements.track = 'all'; +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + +PREPARE p1 AS SELECT $1; +SET plan_cache_mode TO force_generic_plan; +EXPLAIN (COSTS OFF) EXECUTE p1(1); + QUERY PLAN +------------ + Result +(1 row) + +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1); + QUERY PLAN +----------------------------------- + Result (actual rows=1.00 loops=1) +(1 row) + +SET plan_cache_mode TO force_custom_plan; +EXPLAIN (COSTS OFF) EXECUTE p1(1); + QUERY PLAN +------------ + Result +(1 row) + +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1); + QUERY PLAN +----------------------------------- + Result (actual rows=1.00 loops=1) +(1 row) + +SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements + ORDER BY query COLLATE "C"; + calls | generic_plan_calls | custom_plan_calls | toplevel | query +-------+--------------------+-------------------+----------+---------------------------------------------------------------------------------- + 2 | 0 | 0 | t | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1) + 2 | 0 | 0 | t | EXPLAIN (COSTS OFF) EXECUTE p1(1) + 4 | 2 | 2 | f | PREPARE p1 AS SELECT $1 + 1 | 0 | 0 | t | SELECT pg_stat_statements_reset() IS NOT NULL AS t + 2 | 0 | 0 | t | SET plan_cache_mode TO $1 +(5 rows) + +RESET pg_stat_statements.track; +DEALLOCATE p1; +-- Functions/procedures +SET pg_stat_statements.track = 'all'; +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + +SET plan_cache_mode TO force_generic_plan; +SELECT select_one_func(1); + select_one_func +----------------- + +(1 row) + +CALL select_one_proc(1); +SET plan_cache_mode TO force_custom_plan; +SELECT select_one_func(1); + select_one_func +----------------- + +(1 row) + +CALL select_one_proc(1); +SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements + ORDER BY query COLLATE "C"; + calls | generic_plan_calls | custom_plan_calls | toplevel | query +-------+--------------------+-------------------+----------+---------------------------------------------------- + 2 | 0 | 0 | t | CALL select_one_proc($1) + 4 | 2 | 2 | f | SELECT $1 + 1 | 0 | 0 | t | SELECT pg_stat_statements_reset() IS NOT NULL AS t + 2 | 0 | 0 | t | SELECT select_one_func($1) + 2 | 0 | 0 | t | SET plan_cache_mode TO $1 +(5 rows) + +-- +-- EXPLAIN [ANALYZE] EXECUTE + functions/procedures +-- +SET pg_stat_statements.track = 'all'; +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + +SET plan_cache_mode TO force_generic_plan; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func(1); + QUERY PLAN +----------------------------------- + Result (actual rows=1.00 loops=1) +(1 row) + +EXPLAIN (COSTS OFF) SELECT select_one_func(1); + QUERY PLAN +------------ + Result +(1 row) + +CALL select_one_proc(1); +SET plan_cache_mode TO force_custom_plan; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func(1); + QUERY PLAN +----------------------------------- + Result (actual rows=1.00 loops=1) +(1 row) + +EXPLAIN (COSTS OFF) SELECT select_one_func(1); + QUERY PLAN +------------ + Result +(1 row) + +CALL select_one_proc(1); +SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements + ORDER BY query COLLATE "C", toplevel; + calls | generic_plan_calls | custom_plan_calls | toplevel | query +-------+--------------------+-------------------+----------+------------------------------------------------------------------------------------------------ + 2 | 0 | 0 | t | CALL select_one_proc($1) + 2 | 0 | 0 | t | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func($1) + 4 | 0 | 0 | f | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func($1); + 2 | 0 | 0 | t | EXPLAIN (COSTS OFF) SELECT select_one_func($1) + 4 | 2 | 2 | f | SELECT $1 + 1 | 0 | 0 | t | SELECT pg_stat_statements_reset() IS NOT NULL AS t + 2 | 0 | 0 | t | SET plan_cache_mode TO $1 +(7 rows) + +RESET pg_stat_statements.track; +-- +-- Cleanup +-- +DROP FUNCTION select_one_func(int); +DROP PROCEDURE select_one_proc(int); diff --git a/contrib/pg_stat_statements/meson.build b/contrib/pg_stat_statements/meson.build index 01a6cbdcf6139..7b8bfbb1de78c 100644 --- a/contrib/pg_stat_statements/meson.build +++ b/contrib/pg_stat_statements/meson.build @@ -21,6 +21,7 @@ contrib_targets += pg_stat_statements install_data( 'pg_stat_statements.control', 'pg_stat_statements--1.4.sql', + 'pg_stat_statements--1.12--1.13.sql', 'pg_stat_statements--1.11--1.12.sql', 'pg_stat_statements--1.10--1.11.sql', 'pg_stat_statements--1.9--1.10.sql', @@ -54,6 +55,7 @@ tests += { 'privileges', 'extended', 'parallel', + 'plancache', 'cleanup', 'oldextversions', 'squashing', diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.12--1.13.sql b/contrib/pg_stat_statements/pg_stat_statements--1.12--1.13.sql new file mode 100644 index 0000000000000..2f0eaf14ec34d --- /dev/null +++ b/contrib/pg_stat_statements/pg_stat_statements--1.12--1.13.sql @@ -0,0 +1,78 @@ +/* contrib/pg_stat_statements/pg_stat_statements--1.12--1.13.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.13'" to load this file. \quit + +/* First we have to remove them from the extension */ +ALTER EXTENSION pg_stat_statements DROP VIEW pg_stat_statements; +ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements(boolean); + +/* Then we can drop them */ +DROP VIEW pg_stat_statements; +DROP FUNCTION pg_stat_statements(boolean); + +/* Now redefine */ +CREATE FUNCTION pg_stat_statements(IN showtext boolean, + OUT userid oid, + OUT dbid oid, + OUT toplevel bool, + OUT queryid bigint, + OUT query text, + OUT plans int8, + OUT total_plan_time float8, + OUT min_plan_time float8, + OUT max_plan_time float8, + OUT mean_plan_time float8, + OUT stddev_plan_time float8, + OUT calls int8, + OUT total_exec_time float8, + OUT min_exec_time float8, + OUT max_exec_time float8, + OUT mean_exec_time float8, + OUT stddev_exec_time float8, + OUT rows int8, + OUT shared_blks_hit int8, + OUT shared_blks_read int8, + OUT shared_blks_dirtied int8, + OUT shared_blks_written int8, + OUT local_blks_hit int8, + OUT local_blks_read int8, + OUT local_blks_dirtied int8, + OUT local_blks_written int8, + OUT temp_blks_read int8, + OUT temp_blks_written int8, + OUT shared_blk_read_time float8, + OUT shared_blk_write_time float8, + OUT local_blk_read_time float8, + OUT local_blk_write_time float8, + OUT temp_blk_read_time float8, + OUT temp_blk_write_time float8, + OUT wal_records int8, + OUT wal_fpi int8, + OUT wal_bytes numeric, + OUT wal_buffers_full int8, + OUT jit_functions int8, + OUT jit_generation_time float8, + OUT jit_inlining_count int8, + OUT jit_inlining_time float8, + OUT jit_optimization_count int8, + OUT jit_optimization_time float8, + OUT jit_emission_count int8, + OUT jit_emission_time float8, + OUT jit_deform_count int8, + OUT jit_deform_time float8, + OUT parallel_workers_to_launch int8, + OUT parallel_workers_launched int8, + OUT generic_plan_calls int8, + OUT custom_plan_calls int8, + OUT stats_since timestamp with time zone, + OUT minmax_stats_since timestamp with time zone +) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pg_stat_statements_1_13' +LANGUAGE C STRICT VOLATILE PARALLEL SAFE; + +CREATE VIEW pg_stat_statements AS + SELECT * FROM pg_stat_statements(true); + +GRANT SELECT ON pg_stat_statements TO PUBLIC; diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index e7857f81ec057..1cb368c8590ba 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -85,7 +85,7 @@ PG_MODULE_MAGIC_EXT( #define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat" /* Magic number identifying the stats file format */ -static const uint32 PGSS_FILE_HEADER = 0x20220408; +static const uint32 PGSS_FILE_HEADER = 0x20250731; /* PostgreSQL major version number, changes in which invalidate all entries */ static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100; @@ -114,6 +114,7 @@ typedef enum pgssVersion PGSS_V1_10, PGSS_V1_11, PGSS_V1_12, + PGSS_V1_13, } pgssVersion; typedef enum pgssStoreKind @@ -210,6 +211,8 @@ typedef struct Counters * to be launched */ int64 parallel_workers_launched; /* # of parallel workers actually * launched */ + int64 generic_plan_calls; /* number of calls using a generic plan */ + int64 custom_plan_calls; /* number of calls using a custom plan */ } Counters; /* @@ -323,6 +326,7 @@ PG_FUNCTION_INFO_V1(pg_stat_statements_1_9); PG_FUNCTION_INFO_V1(pg_stat_statements_1_10); PG_FUNCTION_INFO_V1(pg_stat_statements_1_11); PG_FUNCTION_INFO_V1(pg_stat_statements_1_12); +PG_FUNCTION_INFO_V1(pg_stat_statements_1_13); PG_FUNCTION_INFO_V1(pg_stat_statements); PG_FUNCTION_INFO_V1(pg_stat_statements_info); @@ -355,7 +359,8 @@ static void pgss_store(const char *query, int64 queryId, const struct JitInstrumentation *jitusage, JumbleState *jstate, int parallel_workers_to_launch, - int parallel_workers_launched); + int parallel_workers_launched, + PlannedStmtOrigin planOrigin); static void pg_stat_statements_internal(FunctionCallInfo fcinfo, pgssVersion api_version, bool showtext); @@ -877,7 +882,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) NULL, jstate, 0, - 0); + 0, + PLAN_STMT_UNKNOWN); } /* @@ -957,7 +963,8 @@ pgss_planner(Query *parse, NULL, NULL, 0, - 0); + 0, + result->planOrigin); } else { @@ -1091,7 +1098,8 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) queryDesc->estate->es_jit ? &queryDesc->estate->es_jit->instr : NULL, NULL, queryDesc->estate->es_parallel_workers_to_launch, - queryDesc->estate->es_parallel_workers_launched); + queryDesc->estate->es_parallel_workers_launched, + queryDesc->plannedstmt->planOrigin); } if (prev_ExecutorEnd) @@ -1224,7 +1232,8 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, NULL, NULL, 0, - 0); + 0, + pstmt->planOrigin); } else { @@ -1287,7 +1296,8 @@ pgss_store(const char *query, int64 queryId, const struct JitInstrumentation *jitusage, JumbleState *jstate, int parallel_workers_to_launch, - int parallel_workers_launched) + int parallel_workers_launched, + PlannedStmtOrigin planOrigin) { pgssHashKey key; pgssEntry *entry; @@ -1495,6 +1505,12 @@ pgss_store(const char *query, int64 queryId, entry->counters.parallel_workers_to_launch += parallel_workers_to_launch; entry->counters.parallel_workers_launched += parallel_workers_launched; + /* plan cache counters */ + if (planOrigin == PLAN_STMT_CACHE_GENERIC) + entry->counters.generic_plan_calls++; + else if (planOrigin == PLAN_STMT_CACHE_CUSTOM) + entry->counters.custom_plan_calls++; + SpinLockRelease(&entry->mutex); } @@ -1562,7 +1578,8 @@ pg_stat_statements_reset(PG_FUNCTION_ARGS) #define PG_STAT_STATEMENTS_COLS_V1_10 43 #define PG_STAT_STATEMENTS_COLS_V1_11 49 #define PG_STAT_STATEMENTS_COLS_V1_12 52 -#define PG_STAT_STATEMENTS_COLS 52 /* maximum of above */ +#define PG_STAT_STATEMENTS_COLS_V1_13 54 +#define PG_STAT_STATEMENTS_COLS 54 /* maximum of above */ /* * Retrieve statement statistics. @@ -1574,6 +1591,16 @@ pg_stat_statements_reset(PG_FUNCTION_ARGS) * expected API version is identified by embedding it in the C name of the * function. Unfortunately we weren't bright enough to do that for 1.1. */ +Datum +pg_stat_statements_1_13(PG_FUNCTION_ARGS) +{ + bool showtext = PG_GETARG_BOOL(0); + + pg_stat_statements_internal(fcinfo, PGSS_V1_13, showtext); + + return (Datum) 0; +} + Datum pg_stat_statements_1_12(PG_FUNCTION_ARGS) { @@ -1732,6 +1759,10 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, if (api_version != PGSS_V1_12) elog(ERROR, "incorrect number of output arguments"); break; + case PG_STAT_STATEMENTS_COLS_V1_13: + if (api_version != PGSS_V1_13) + elog(ERROR, "incorrect number of output arguments"); + break; default: elog(ERROR, "incorrect number of output arguments"); } @@ -1984,6 +2015,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, values[i++] = Int64GetDatumFast(tmp.parallel_workers_to_launch); values[i++] = Int64GetDatumFast(tmp.parallel_workers_launched); } + if (api_version >= PGSS_V1_13) + { + values[i++] = Int64GetDatumFast(tmp.generic_plan_calls); + values[i++] = Int64GetDatumFast(tmp.custom_plan_calls); + } if (api_version >= PGSS_V1_11) { values[i++] = TimestampTzGetDatum(stats_since); @@ -1999,6 +2035,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, api_version == PGSS_V1_10 ? PG_STAT_STATEMENTS_COLS_V1_10 : api_version == PGSS_V1_11 ? PG_STAT_STATEMENTS_COLS_V1_11 : api_version == PGSS_V1_12 ? PG_STAT_STATEMENTS_COLS_V1_12 : + api_version == PGSS_V1_13 ? PG_STAT_STATEMENTS_COLS_V1_13 : -1 /* fail if you forget to update this assert */ )); tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); @@ -2676,8 +2713,8 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only) HASH_SEQ_STATUS hash_seq; pgssEntry *entry; FILE *qfile; - long num_entries; - long num_remove = 0; + int64 num_entries; + int64 num_remove = 0; pgssHashKey key; TimestampTz stats_reset; diff --git a/contrib/pg_stat_statements/pg_stat_statements.control b/contrib/pg_stat_statements/pg_stat_statements.control index d45ebc12e3605..2eee0ceffa894 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.control +++ b/contrib/pg_stat_statements/pg_stat_statements.control @@ -1,5 +1,5 @@ # pg_stat_statements extension comment = 'track planning and execution statistics of all SQL statements executed' -default_version = '1.12' +default_version = '1.13' module_pathname = '$libdir/pg_stat_statements' relocatable = true diff --git a/contrib/pg_stat_statements/sql/oldextversions.sql b/contrib/pg_stat_statements/sql/oldextversions.sql index 13b8ca28586d1..e416efe9ffbee 100644 --- a/contrib/pg_stat_statements/sql/oldextversions.sql +++ b/contrib/pg_stat_statements/sql/oldextversions.sql @@ -63,4 +63,9 @@ AlTER EXTENSION pg_stat_statements UPDATE TO '1.12'; \d pg_stat_statements SELECT count(*) > 0 AS has_data FROM pg_stat_statements; +-- New functions and views for pg_stat_statements in 1.13 +AlTER EXTENSION pg_stat_statements UPDATE TO '1.13'; +\d pg_stat_statements +SELECT count(*) > 0 AS has_data FROM pg_stat_statements; + DROP EXTENSION pg_stat_statements; diff --git a/contrib/pg_stat_statements/sql/plancache.sql b/contrib/pg_stat_statements/sql/plancache.sql new file mode 100644 index 0000000000000..160ced7add368 --- /dev/null +++ b/contrib/pg_stat_statements/sql/plancache.sql @@ -0,0 +1,94 @@ +-- +-- Tests with plan cache +-- + +-- Setup +CREATE OR REPLACE FUNCTION select_one_func(int) RETURNS VOID AS $$ +DECLARE + ret INT; +BEGIN + SELECT $1 INTO ret; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE PROCEDURE select_one_proc(int) AS $$ +DECLARE + ret INT; +BEGIN + SELECT $1 INTO ret; +END; +$$ LANGUAGE plpgsql; + +-- Prepared statements +SELECT pg_stat_statements_reset() IS NOT NULL AS t; +PREPARE p1 AS SELECT $1 AS a; +SET plan_cache_mode TO force_generic_plan; +EXECUTE p1(1); +SET plan_cache_mode TO force_custom_plan; +EXECUTE p1(1); +SELECT calls, generic_plan_calls, custom_plan_calls, query FROM pg_stat_statements + ORDER BY query COLLATE "C"; +DEALLOCATE p1; + +-- Extended query protocol +SELECT pg_stat_statements_reset() IS NOT NULL AS t; +SELECT $1 AS a \parse p1 +SET plan_cache_mode TO force_generic_plan; +\bind_named p1 1 +; +SET plan_cache_mode TO force_custom_plan; +\bind_named p1 1 +; +SELECT calls, generic_plan_calls, custom_plan_calls, query FROM pg_stat_statements + ORDER BY query COLLATE "C"; +\close_prepared p1 + +-- EXPLAIN [ANALYZE] EXECUTE +SET pg_stat_statements.track = 'all'; +SELECT pg_stat_statements_reset() IS NOT NULL AS t; +PREPARE p1 AS SELECT $1; +SET plan_cache_mode TO force_generic_plan; +EXPLAIN (COSTS OFF) EXECUTE p1(1); +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1); +SET plan_cache_mode TO force_custom_plan; +EXPLAIN (COSTS OFF) EXECUTE p1(1); +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1); +SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements + ORDER BY query COLLATE "C"; +RESET pg_stat_statements.track; +DEALLOCATE p1; + +-- Functions/procedures +SET pg_stat_statements.track = 'all'; +SELECT pg_stat_statements_reset() IS NOT NULL AS t; +SET plan_cache_mode TO force_generic_plan; +SELECT select_one_func(1); +CALL select_one_proc(1); +SET plan_cache_mode TO force_custom_plan; +SELECT select_one_func(1); +CALL select_one_proc(1); +SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements + ORDER BY query COLLATE "C"; + +-- +-- EXPLAIN [ANALYZE] EXECUTE + functions/procedures +-- +SET pg_stat_statements.track = 'all'; +SELECT pg_stat_statements_reset() IS NOT NULL AS t; +SET plan_cache_mode TO force_generic_plan; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func(1); +EXPLAIN (COSTS OFF) SELECT select_one_func(1); +CALL select_one_proc(1); +SET plan_cache_mode TO force_custom_plan; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func(1); +EXPLAIN (COSTS OFF) SELECT select_one_func(1); +CALL select_one_proc(1); +SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements + ORDER BY query COLLATE "C", toplevel; + +RESET pg_stat_statements.track; + +-- +-- Cleanup +-- +DROP FUNCTION select_one_func(int); +DROP PROCEDURE select_one_proc(int); diff --git a/contrib/pg_trgm/expected/pg_trgm.out b/contrib/pg_trgm/expected/pg_trgm.out index 0b70d9de25624..04da98170ab15 100644 --- a/contrib/pg_trgm/expected/pg_trgm.out +++ b/contrib/pg_trgm/expected/pg_trgm.out @@ -4693,6 +4693,23 @@ select count(*) from test_trgm where t like '%99%' and t like '%qw%'; 19 (1 row) +explain (costs off) +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on test_trgm + Recheck Cond: ((t %> ''::text) AND (t %> '%qwerty%'::text)) + -> Bitmap Index Scan on trgm_idx + Index Cond: ((t %> ''::text) AND (t %> '%qwerty%'::text)) +(5 rows) + +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; + count +------- + 0 +(1 row) + -- ensure that pending-list items are handled correctly, too create temp table t_test_trgm(t text COLLATE "C"); create index t_trgm_idx on t_test_trgm using gin (t gin_trgm_ops); @@ -4731,6 +4748,23 @@ select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; 1 (1 row) +explain (costs off) +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on t_test_trgm + Recheck Cond: ((t %> ''::text) AND (t %> '%qwerty%'::text)) + -> Bitmap Index Scan on t_trgm_idx + Index Cond: ((t %> ''::text) AND (t %> '%qwerty%'::text)) +(5 rows) + +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; + count +------- + 0 +(1 row) + -- run the same queries with sequential scan to check the results set enable_bitmapscan=off; set enable_seqscan=on; @@ -4746,6 +4780,12 @@ select count(*) from test_trgm where t like '%99%' and t like '%qw%'; 19 (1 row) +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; + count +------- + 0 +(1 row) + select count(*) from t_test_trgm where t like '%99%' and t like '%qwerty%'; count ------- @@ -4758,6 +4798,12 @@ select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; 1 (1 row) +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; + count +------- + 0 +(1 row) + reset enable_bitmapscan; create table test2(t text COLLATE "C"); insert into test2 values ('abcdef'); diff --git a/contrib/pg_trgm/sql/pg_trgm.sql b/contrib/pg_trgm/sql/pg_trgm.sql index 340c9891899f0..44debced6d581 100644 --- a/contrib/pg_trgm/sql/pg_trgm.sql +++ b/contrib/pg_trgm/sql/pg_trgm.sql @@ -80,6 +80,9 @@ select count(*) from test_trgm where t like '%99%' and t like '%qwerty%'; explain (costs off) select count(*) from test_trgm where t like '%99%' and t like '%qw%'; select count(*) from test_trgm where t like '%99%' and t like '%qw%'; +explain (costs off) +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; -- ensure that pending-list items are handled correctly, too create temp table t_test_trgm(t text COLLATE "C"); create index t_trgm_idx on t_test_trgm using gin (t gin_trgm_ops); @@ -90,14 +93,19 @@ select count(*) from t_test_trgm where t like '%99%' and t like '%qwerty%'; explain (costs off) select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; +explain (costs off) +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; -- run the same queries with sequential scan to check the results set enable_bitmapscan=off; set enable_seqscan=on; select count(*) from test_trgm where t like '%99%' and t like '%qwerty%'; select count(*) from test_trgm where t like '%99%' and t like '%qw%'; +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; select count(*) from t_test_trgm where t like '%99%' and t like '%qwerty%'; select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; reset enable_bitmapscan; create table test2(t text COLLATE "C"); diff --git a/contrib/pgrowlocks/pgrowlocks.c b/contrib/pgrowlocks/pgrowlocks.c index b75d80fa7a9c2..f88269332b6be 100644 --- a/contrib/pgrowlocks/pgrowlocks.c +++ b/contrib/pgrowlocks/pgrowlocks.c @@ -141,8 +141,8 @@ pgrowlocks(PG_FUNCTION_ARGS) */ if (htsu == TM_BeingModified) { - values[Atnum_tid] = (char *) DirectFunctionCall1(tidout, - PointerGetDatum(&tuple->t_self)); + values[Atnum_tid] = DatumGetCString(DirectFunctionCall1(tidout, + PointerGetDatum(&tuple->t_self))); values[Atnum_xmax] = palloc(NCHARS * sizeof(char)); snprintf(values[Atnum_xmax], NCHARS, "%u", xmax); diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c index 4b9d76ec4e4df..40823d54fcac0 100644 --- a/contrib/pgstattuple/pgstatindex.c +++ b/contrib/pgstattuple/pgstatindex.c @@ -647,7 +647,7 @@ pgstathashindex(PG_FUNCTION_ARGS) buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy); LockBuffer(buf, BUFFER_LOCK_SHARE); - page = (Page) BufferGetPage(buf); + page = BufferGetPage(buf); if (PageIsNew(page)) stats.unused_pages++; diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index 0d9c2b0b65369..b5de68b7232d2 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -378,7 +378,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, RBM_NORMAL, hscan->rs_strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); - stat.free_space += PageGetExactFreeSpace((Page) BufferGetPage(buffer)); + stat.free_space += PageGetExactFreeSpace(BufferGetPage(buffer)); UnlockReleaseBuffer(buffer); block++; } @@ -391,7 +391,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, RBM_NORMAL, hscan->rs_strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); - stat.free_space += PageGetExactFreeSpace((Page) BufferGetPage(buffer)); + stat.free_space += PageGetExactFreeSpace(BufferGetPage(buffer)); UnlockReleaseBuffer(buffer); block++; } diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index a33843fcf8531..4fbb6c182b82c 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -142,6 +142,8 @@ static void do_sql_command_begin(PGconn *conn, const char *sql); static void do_sql_command_end(PGconn *conn, const char *sql, bool consume_input); static void begin_remote_xact(ConnCacheEntry *entry); +static void pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn, + const char *sql); static void pgfdw_xact_callback(XactEvent event, void *arg); static void pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid, @@ -462,7 +464,7 @@ pgfdw_security_check(const char **keywords, const char **values, UserMapping *us * assume that UseScramPassthrough is also true since SCRAM options are * only set when UseScramPassthrough is enabled. */ - if (MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values)) return; ereport(ERROR, @@ -568,7 +570,7 @@ connect_pg_server(ForeignServer *server, UserMapping *user) n++; /* Add required SCRAM pass-through connection options if it's enabled. */ - if (MyProcPort->has_scram_keys && UseScramPassthrough(server, user)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && UseScramPassthrough(server, user)) { int len; int encoded_len; @@ -746,7 +748,7 @@ check_conn_params(const char **keywords, const char **values, UserMapping *user) * assume that UseScramPassthrough is also true since SCRAM options are * only set when UseScramPassthrough is enabled. */ - if (MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values)) return; ereport(ERROR, @@ -815,7 +817,7 @@ static void do_sql_command_begin(PGconn *conn, const char *sql) { if (!PQsendQuery(conn, sql)) - pgfdw_report_error(ERROR, NULL, conn, sql); + pgfdw_report_error(NULL, conn, sql); } static void @@ -830,10 +832,10 @@ do_sql_command_end(PGconn *conn, const char *sql, bool consume_input) * would be large compared to the overhead of PQconsumeInput.) */ if (consume_input && !PQconsumeInput(conn)) - pgfdw_report_error(ERROR, NULL, conn, sql); + pgfdw_report_error(NULL, conn, sql); res = pgfdw_get_result(conn); if (PQresultStatus(res) != PGRES_COMMAND_OK) - pgfdw_report_error(ERROR, res, conn, sql); + pgfdw_report_error(res, conn, sql); PQclear(res); } @@ -966,7 +968,10 @@ pgfdw_get_result(PGconn *conn) /* * Report an error we got from the remote server. * - * elevel: error level to use (typically ERROR, but might be less) + * Callers should use pgfdw_report_error() to throw an error, or use + * pgfdw_report() for lesser message levels. (We make this distinction + * so that pgfdw_report_error() can be marked noreturn.) + * * res: PGresult containing the error (might be NULL) * conn: connection we did the query on * sql: NULL, or text of remote command we tried to execute @@ -979,8 +984,22 @@ pgfdw_get_result(PGconn *conn) * marked with have_error = true. */ void -pgfdw_report_error(int elevel, PGresult *res, PGconn *conn, - const char *sql) +pgfdw_report_error(PGresult *res, PGconn *conn, const char *sql) +{ + pgfdw_report_internal(ERROR, res, conn, sql); + pg_unreachable(); +} + +void +pgfdw_report(int elevel, PGresult *res, PGconn *conn, const char *sql) +{ + Assert(elevel < ERROR); /* use pgfdw_report_error for that */ + pgfdw_report_internal(elevel, res, conn, sql); +} + +static void +pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn, + const char *sql) { char *diag_sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE); char *message_primary = PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY); @@ -1538,7 +1557,7 @@ pgfdw_exec_cleanup_query_begin(PGconn *conn, const char *query) */ if (!PQsendQuery(conn, query)) { - pgfdw_report_error(WARNING, NULL, conn, query); + pgfdw_report(WARNING, NULL, conn, query); return false; } @@ -1563,7 +1582,7 @@ pgfdw_exec_cleanup_query_end(PGconn *conn, const char *query, */ if (consume_input && !PQconsumeInput(conn)) { - pgfdw_report_error(WARNING, NULL, conn, query); + pgfdw_report(WARNING, NULL, conn, query); return false; } @@ -1575,7 +1594,7 @@ pgfdw_exec_cleanup_query_end(PGconn *conn, const char *query, (errmsg("could not get query result due to timeout"), errcontext("remote SQL command: %s", query))); else - pgfdw_report_error(WARNING, NULL, conn, query); + pgfdw_report(WARNING, NULL, conn, query); return false; } @@ -1583,7 +1602,7 @@ pgfdw_exec_cleanup_query_end(PGconn *conn, const char *query, /* Issue a warning if not successful. */ if (PQresultStatus(result) != PGRES_COMMAND_OK) { - pgfdw_report_error(WARNING, result, conn, query); + pgfdw_report(WARNING, result, conn, query); return ignore_errors; } PQclear(result); @@ -2540,7 +2559,7 @@ pgfdw_has_required_scram_options(const char **keywords, const char **values) } } - has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort->has_scram_keys; + has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort != NULL && MyProcPort->has_scram_keys; return (has_scram_keys && has_require_auth); } diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index 4b6e49a5d950d..18d727d77907a 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -2,23 +2,16 @@ -- create FDW objects -- =================================================================== CREATE EXTENSION postgres_fdw; +SELECT current_database() AS current_database, + current_setting('port') AS current_port +\gset CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw; -DO $d$ - BEGIN - EXECUTE $$CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname '$$||current_database()||$$', - port '$$||current_setting('port')||$$' - )$$; - EXECUTE $$CREATE SERVER loopback2 FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname '$$||current_database()||$$', - port '$$||current_setting('port')||$$' - )$$; - EXECUTE $$CREATE SERVER loopback3 FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname '$$||current_database()||$$', - port '$$||current_setting('port')||$$' - )$$; - END; -$d$; +CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname :'current_database', port :'current_port'); +CREATE SERVER loopback2 FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname :'current_database', port :'current_port'); +CREATE SERVER loopback3 FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname :'current_database', port :'current_port'); CREATE USER MAPPING FOR public SERVER testserver1 OPTIONS (user 'value', password 'value'); CREATE USER MAPPING FOR CURRENT_USER SERVER loopback; @@ -235,12 +228,7 @@ SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work ALTER SERVER loopback OPTIONS (SET dbname 'no such database'); SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail ERROR: could not connect to server "loopback" -DO $d$ - BEGIN - EXECUTE $$ALTER SERVER loopback - OPTIONS (SET dbname '$$||current_database()||$$')$$; - END; -$d$; +ALTER SERVER loopback OPTIONS (SET dbname :'current_database'); SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again c3 | c4 -------+------------------------------ @@ -5098,13 +5086,13 @@ SELECT ft1.c1 FROM ft1 JOIN ft2 on ft1.c1 = ft2.c1 WHERE -- =================================================================== EXPLAIN (verbose, costs off) INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Insert on public.ft2 Remote SQL: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) Batch Size: 1 - -> Subquery Scan on "*SELECT*" - Output: "*SELECT*"."?column?", "*SELECT*"."?column?_1", NULL::integer, "*SELECT*"."?column?_2", NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying(10), 'ft2 '::character(10), NULL::user_enum + -> Subquery Scan on unnamed_subquery + Output: unnamed_subquery."?column?", unnamed_subquery."?column?_1", NULL::integer, unnamed_subquery."?column?_2", NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying(10), 'ft2 '::character(10), NULL::user_enum -> Foreign Scan on public.ft2 ft2_1 Output: (ft2_1.c1 + 1000), (ft2_1.c2 + 100), (ft2_1.c3 || ft2_1.c3) Remote SQL: SELECT "C 1", c2, c3 FROM "S 1"."T 1" LIMIT 20::bigint @@ -8233,6 +8221,119 @@ DELETE FROM rem1; -- can't be pushed down (5 rows) DROP TRIGGER trig_row_after_delete ON rem1; +-- We are allowed to create transition-table triggers on both kinds of +-- inheritance even if they contain foreign tables as children, but currently +-- collecting transition tuples from such foreign tables is not supported. +CREATE TABLE local_tbl (a text, b int); +CREATE FOREIGN TABLE foreign_tbl (a text, b int) + SERVER loopback OPTIONS (table_name 'local_tbl'); +INSERT INTO foreign_tbl VALUES ('AAA', 42); +-- Test case for partition hierarchy +CREATE TABLE parent_tbl (a text, b int) PARTITION BY LIST (a); +ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES IN ('AAA'); +CREATE TRIGGER parent_tbl_insert_trig + AFTER INSERT ON parent_tbl REFERENCING NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_update_trig + AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_delete_trig + AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +INSERT INTO parent_tbl VALUES ('AAA', 42); +ERROR: cannot collect transition tuples from child foreign tables +COPY parent_tbl (a, b) FROM stdin; +ERROR: cannot collect transition tuples from child foreign tables +CONTEXT: COPY parent_tbl, line 1: "AAA 42" +ALTER SERVER loopback OPTIONS (ADD batch_size '10'); +INSERT INTO parent_tbl VALUES ('AAA', 42); +ERROR: cannot collect transition tuples from child foreign tables +COPY parent_tbl (a, b) FROM stdin; +ERROR: cannot collect transition tuples from child foreign tables +CONTEXT: COPY parent_tbl, line 1: "AAA 42" +ALTER SERVER loopback OPTIONS (DROP batch_size); +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE parent_tbl SET b = b + 1; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Update on public.parent_tbl + Foreign Update on public.foreign_tbl parent_tbl_1 + Remote SQL: UPDATE public.local_tbl SET b = $2 WHERE ctid = $1 + -> Foreign Scan on public.foreign_tbl parent_tbl_1 + Output: (parent_tbl_1.b + 1), parent_tbl_1.tableoid, parent_tbl_1.ctid, parent_tbl_1.* + Remote SQL: SELECT a, b, ctid FROM public.local_tbl FOR UPDATE +(6 rows) + +UPDATE parent_tbl SET b = b + 1; +ERROR: cannot collect transition tuples from child foreign tables +EXPLAIN (VERBOSE, COSTS OFF) +DELETE FROM parent_tbl; + QUERY PLAN +------------------------------------------------------------------ + Delete on public.parent_tbl + Foreign Delete on public.foreign_tbl parent_tbl_1 + Remote SQL: DELETE FROM public.local_tbl WHERE ctid = $1 + -> Foreign Scan on public.foreign_tbl parent_tbl_1 + Output: parent_tbl_1.tableoid, parent_tbl_1.ctid + Remote SQL: SELECT ctid FROM public.local_tbl FOR UPDATE +(6 rows) + +DELETE FROM parent_tbl; +ERROR: cannot collect transition tuples from child foreign tables +ALTER TABLE parent_tbl DETACH PARTITION foreign_tbl; +DROP TABLE parent_tbl; +-- Test case for non-partition hierarchy +CREATE TABLE parent_tbl (a text, b int); +ALTER FOREIGN TABLE foreign_tbl INHERIT parent_tbl; +CREATE TRIGGER parent_tbl_update_trig + AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_delete_trig + AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE parent_tbl SET b = b + 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Update on public.parent_tbl + Update on public.parent_tbl parent_tbl_1 + Foreign Update on public.foreign_tbl parent_tbl_2 + Remote SQL: UPDATE public.local_tbl SET b = $2 WHERE ctid = $1 + -> Result + Output: (parent_tbl.b + 1), parent_tbl.tableoid, parent_tbl.ctid, (NULL::record) + -> Append + -> Seq Scan on public.parent_tbl parent_tbl_1 + Output: parent_tbl_1.b, parent_tbl_1.tableoid, parent_tbl_1.ctid, NULL::record + -> Foreign Scan on public.foreign_tbl parent_tbl_2 + Output: parent_tbl_2.b, parent_tbl_2.tableoid, parent_tbl_2.ctid, parent_tbl_2.* + Remote SQL: SELECT a, b, ctid FROM public.local_tbl FOR UPDATE +(12 rows) + +UPDATE parent_tbl SET b = b + 1; +ERROR: cannot collect transition tuples from child foreign tables +EXPLAIN (VERBOSE, COSTS OFF) +DELETE FROM parent_tbl; + QUERY PLAN +------------------------------------------------------------------------ + Delete on public.parent_tbl + Delete on public.parent_tbl parent_tbl_1 + Foreign Delete on public.foreign_tbl parent_tbl_2 + Remote SQL: DELETE FROM public.local_tbl WHERE ctid = $1 + -> Append + -> Seq Scan on public.parent_tbl parent_tbl_1 + Output: parent_tbl_1.tableoid, parent_tbl_1.ctid + -> Foreign Scan on public.foreign_tbl parent_tbl_2 + Output: parent_tbl_2.tableoid, parent_tbl_2.ctid + Remote SQL: SELECT ctid FROM public.local_tbl FOR UPDATE +(10 rows) + +DELETE FROM parent_tbl; +ERROR: cannot collect transition tuples from child foreign tables +ALTER FOREIGN TABLE foreign_tbl NO INHERIT parent_tbl; +DROP TABLE parent_tbl; +-- Cleanup +DROP FOREIGN TABLE foreign_tbl; +DROP TABLE local_tbl; -- =================================================================== -- test inheritance features -- =================================================================== @@ -10530,14 +10631,8 @@ SHOW is_superuser; (1 row) -- This will be OK, we can create the FDW -DO $d$ - BEGIN - EXECUTE $$CREATE SERVER loopback_nopw FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname '$$||current_database()||$$', - port '$$||current_setting('port')||$$' - )$$; - END; -$d$; +CREATE SERVER loopback_nopw FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname :'current_database', port :'current_port'); -- But creation of user mappings for non-superusers should fail CREATE USER MAPPING FOR public SERVER loopback_nopw; CREATE USER MAPPING FOR CURRENT_USER SERVER loopback_nopw; @@ -12536,7 +12631,7 @@ ALTER SERVER loopback2 OPTIONS (DROP parallel_abort); -- =================================================================== CREATE TABLE analyze_table (id int, a text, b bigint); CREATE FOREIGN TABLE analyze_ftable (id int, a text, b bigint) - SERVER loopback OPTIONS (table_name 'analyze_rtable1'); + SERVER loopback OPTIONS (table_name 'analyze_table'); INSERT INTO analyze_table (SELECT x FROM generate_series(1,1000) x); ANALYZE analyze_table; SET default_statistics_target = 10; @@ -12544,15 +12639,15 @@ ANALYZE analyze_table; ALTER SERVER loopback OPTIONS (analyze_sampling 'invalid'); ERROR: invalid value for string option "analyze_sampling": invalid ALTER SERVER loopback OPTIONS (analyze_sampling 'auto'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'system'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'bernoulli'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'random'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'off'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; -- cleanup DROP FOREIGN TABLE analyze_ftable; DROP TABLE analyze_table; diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c index d6fa89bad9399..04788b7e8b35f 100644 --- a/contrib/postgres_fdw/option.c +++ b/contrib/postgres_fdw/option.c @@ -522,7 +522,7 @@ process_pgfdw_appname(const char *appname) appendStringInfoString(&buf, application_name); break; case 'c': - appendStringInfo(&buf, INT64_HEX_FORMAT ".%x", MyStartTime, MyProcPid); + appendStringInfo(&buf, "%" PRIx64 ".%x", MyStartTime, MyProcPid); break; case 'C': appendStringInfoString(&buf, cluster_name); diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index 25b287be069fa..456b267f70b5b 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -1704,7 +1704,7 @@ postgresReScanForeignScan(ForeignScanState *node) res = pgfdw_exec_query(fsstate->conn, sql, fsstate->conn_state); if (PQresultStatus(res) != PGRES_COMMAND_OK) - pgfdw_report_error(ERROR, res, fsstate->conn, sql); + pgfdw_report_error(res, fsstate->conn, sql); PQclear(res); /* Now force a fresh FETCH. */ @@ -3614,7 +3614,7 @@ get_remote_estimate(const char *sql, PGconn *conn, */ res = pgfdw_exec_query(conn, sql, NULL); if (PQresultStatus(res) != PGRES_TUPLES_OK) - pgfdw_report_error(ERROR, res, conn, sql); + pgfdw_report_error(res, conn, sql); /* * Extract cost numbers for topmost plan node. Note we search for a left @@ -3769,14 +3769,14 @@ create_cursor(ForeignScanState *node) */ if (!PQsendQueryParams(conn, buf.data, numParams, NULL, values, NULL, NULL, 0)) - pgfdw_report_error(ERROR, NULL, conn, buf.data); + pgfdw_report_error(NULL, conn, buf.data); /* * Get the result, and check for success. */ res = pgfdw_get_result(conn); if (PQresultStatus(res) != PGRES_COMMAND_OK) - pgfdw_report_error(ERROR, res, conn, fsstate->query); + pgfdw_report_error(res, conn, fsstate->query); PQclear(res); /* Mark the cursor as created, and show no tuples have been retrieved */ @@ -3823,7 +3823,7 @@ fetch_more_data(ForeignScanState *node) res = pgfdw_get_result(conn); /* On error, report the original query, not the FETCH. */ if (PQresultStatus(res) != PGRES_TUPLES_OK) - pgfdw_report_error(ERROR, res, conn, fsstate->query); + pgfdw_report_error(res, conn, fsstate->query); /* Reset per-connection state */ fsstate->conn_state->pendingAreq = NULL; @@ -3839,7 +3839,7 @@ fetch_more_data(ForeignScanState *node) res = pgfdw_exec_query(conn, sql, fsstate->conn_state); /* On error, report the original query, not the FETCH. */ if (PQresultStatus(res) != PGRES_TUPLES_OK) - pgfdw_report_error(ERROR, res, conn, fsstate->query); + pgfdw_report_error(res, conn, fsstate->query); } /* Convert the data into HeapTuples */ @@ -3944,7 +3944,7 @@ close_cursor(PGconn *conn, unsigned int cursor_number, snprintf(sql, sizeof(sql), "CLOSE c%u", cursor_number); res = pgfdw_exec_query(conn, sql, conn_state); if (PQresultStatus(res) != PGRES_COMMAND_OK) - pgfdw_report_error(ERROR, res, conn, sql); + pgfdw_report_error(res, conn, sql); PQclear(res); } @@ -4152,7 +4152,7 @@ execute_foreign_modify(EState *estate, NULL, NULL, 0)) - pgfdw_report_error(ERROR, NULL, fmstate->conn, fmstate->query); + pgfdw_report_error(NULL, fmstate->conn, fmstate->query); /* * Get the result, and check for success. @@ -4160,7 +4160,7 @@ execute_foreign_modify(EState *estate, res = pgfdw_get_result(fmstate->conn); if (PQresultStatus(res) != (fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK)) - pgfdw_report_error(ERROR, res, fmstate->conn, fmstate->query); + pgfdw_report_error(res, fmstate->conn, fmstate->query); /* Check number of rows affected, and fetch RETURNING tuple if any */ if (fmstate->has_returning) @@ -4219,14 +4219,14 @@ prepare_foreign_modify(PgFdwModifyState *fmstate) fmstate->query, 0, NULL)) - pgfdw_report_error(ERROR, NULL, fmstate->conn, fmstate->query); + pgfdw_report_error(NULL, fmstate->conn, fmstate->query); /* * Get the result, and check for success. */ res = pgfdw_get_result(fmstate->conn); if (PQresultStatus(res) != PGRES_COMMAND_OK) - pgfdw_report_error(ERROR, res, fmstate->conn, fmstate->query); + pgfdw_report_error(res, fmstate->conn, fmstate->query); PQclear(res); /* This action shows that the prepare has been done. */ @@ -4373,7 +4373,7 @@ deallocate_query(PgFdwModifyState *fmstate) snprintf(sql, sizeof(sql), "DEALLOCATE %s", fmstate->p_name); res = pgfdw_exec_query(fmstate->conn, sql, fmstate->conn_state); if (PQresultStatus(res) != PGRES_COMMAND_OK) - pgfdw_report_error(ERROR, res, fmstate->conn, sql); + pgfdw_report_error(res, fmstate->conn, sql); PQclear(res); pfree(fmstate->p_name); fmstate->p_name = NULL; @@ -4541,7 +4541,7 @@ execute_dml_stmt(ForeignScanState *node) */ if (!PQsendQueryParams(dmstate->conn, dmstate->query, numParams, NULL, values, NULL, NULL, 0)) - pgfdw_report_error(ERROR, NULL, dmstate->conn, dmstate->query); + pgfdw_report_error(NULL, dmstate->conn, dmstate->query); /* * Get the result, and check for success. @@ -4549,7 +4549,7 @@ execute_dml_stmt(ForeignScanState *node) dmstate->result = pgfdw_get_result(dmstate->conn); if (PQresultStatus(dmstate->result) != (dmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK)) - pgfdw_report_error(ERROR, dmstate->result, dmstate->conn, + pgfdw_report_error(dmstate->result, dmstate->conn, dmstate->query); /* @@ -4923,7 +4923,7 @@ postgresAnalyzeForeignTable(Relation relation, res = pgfdw_exec_query(conn, sql.data, NULL); if (PQresultStatus(res) != PGRES_TUPLES_OK) - pgfdw_report_error(ERROR, res, conn, sql.data); + pgfdw_report_error(res, conn, sql.data); if (PQntuples(res) != 1 || PQnfields(res) != 1) elog(ERROR, "unexpected result from deparseAnalyzeSizeSql query"); @@ -4972,7 +4972,7 @@ postgresGetAnalyzeInfoForForeignTable(Relation relation, bool *can_tablesample) res = pgfdw_exec_query(conn, sql.data, NULL); if (PQresultStatus(res) != PGRES_TUPLES_OK) - pgfdw_report_error(ERROR, res, conn, sql.data); + pgfdw_report_error(res, conn, sql.data); if (PQntuples(res) != 1 || PQnfields(res) != 2) elog(ERROR, "unexpected result from deparseAnalyzeInfoSql query"); @@ -5018,7 +5018,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel, int server_version_num; PgFdwSamplingMethod method = ANALYZE_SAMPLE_AUTO; /* auto is default */ double sample_frac = -1.0; - double reltuples; + double reltuples = -1.0; unsigned int cursor_number; StringInfoData sql; PGresult *res; @@ -5202,7 +5202,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel, res = pgfdw_exec_query(conn, sql.data, NULL); if (PQresultStatus(res) != PGRES_COMMAND_OK) - pgfdw_report_error(ERROR, res, conn, sql.data); + pgfdw_report_error(res, conn, sql.data); PQclear(res); /* @@ -5254,7 +5254,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel, res = pgfdw_exec_query(conn, fetch_sql, NULL); /* On error, report the original query, not the FETCH. */ if (PQresultStatus(res) != PGRES_TUPLES_OK) - pgfdw_report_error(ERROR, res, conn, sql.data); + pgfdw_report_error(res, conn, sql.data); /* Process whatever we got. */ numrows = PQntuples(res); @@ -5426,7 +5426,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid) res = pgfdw_exec_query(conn, buf.data, NULL); if (PQresultStatus(res) != PGRES_TUPLES_OK) - pgfdw_report_error(ERROR, res, conn, buf.data); + pgfdw_report_error(res, conn, buf.data); if (PQntuples(res) != 1) ereport(ERROR, @@ -5540,7 +5540,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid) /* Fetch the data */ res = pgfdw_exec_query(conn, buf.data, NULL); if (PQresultStatus(res) != PGRES_TUPLES_OK) - pgfdw_report_error(ERROR, res, conn, buf.data); + pgfdw_report_error(res, conn, buf.data); /* Process results */ numrows = PQntuples(res); @@ -7312,7 +7312,7 @@ postgresForeignAsyncNotify(AsyncRequest *areq) /* On error, report the original query, not the FETCH. */ if (!PQconsumeInput(fsstate->conn)) - pgfdw_report_error(ERROR, NULL, fsstate->conn, fsstate->query); + pgfdw_report_error(NULL, fsstate->conn, fsstate->query); fetch_more_data(node); @@ -7411,7 +7411,7 @@ fetch_more_data_begin(AsyncRequest *areq) fsstate->fetch_size, fsstate->cursor_number); if (!PQsendQuery(fsstate->conn, sql)) - pgfdw_report_error(ERROR, NULL, fsstate->conn, fsstate->query); + pgfdw_report_error(NULL, fsstate->conn, fsstate->query); /* Remember that the request is in process */ fsstate->conn_state->pendingAreq = areq; diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h index 38e1a88594131..e69735298d78f 100644 --- a/contrib/postgres_fdw/postgres_fdw.h +++ b/contrib/postgres_fdw/postgres_fdw.h @@ -166,8 +166,10 @@ extern void do_sql_command(PGconn *conn, const char *sql); extern PGresult *pgfdw_get_result(PGconn *conn); extern PGresult *pgfdw_exec_query(PGconn *conn, const char *query, PgFdwConnState *state); -extern void pgfdw_report_error(int elevel, PGresult *res, PGconn *conn, - const char *sql); +pg_noreturn extern void pgfdw_report_error(PGresult *res, PGconn *conn, + const char *sql); +extern void pgfdw_report(int elevel, PGresult *res, PGconn *conn, + const char *sql); /* in option.c */ extern int ExtractConnectionOptions(List *defelems, diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index 31b6c685b551b..3b7da1285192c 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -4,24 +4,17 @@ CREATE EXTENSION postgres_fdw; -CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw; -DO $d$ - BEGIN - EXECUTE $$CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname '$$||current_database()||$$', - port '$$||current_setting('port')||$$' - )$$; - EXECUTE $$CREATE SERVER loopback2 FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname '$$||current_database()||$$', - port '$$||current_setting('port')||$$' - )$$; - EXECUTE $$CREATE SERVER loopback3 FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname '$$||current_database()||$$', - port '$$||current_setting('port')||$$' - )$$; - END; -$d$; +SELECT current_database() AS current_database, + current_setting('port') AS current_port +\gset +CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw; +CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname :'current_database', port :'current_port'); +CREATE SERVER loopback2 FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname :'current_database', port :'current_port'); +CREATE SERVER loopback3 FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname :'current_database', port :'current_port'); CREATE USER MAPPING FOR public SERVER testserver1 OPTIONS (user 'value', password 'value'); CREATE USER MAPPING FOR CURRENT_USER SERVER loopback; @@ -233,12 +226,7 @@ ALTER FOREIGN TABLE ft2 ALTER COLUMN c1 OPTIONS (column_name 'C 1'); SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work ALTER SERVER loopback OPTIONS (SET dbname 'no such database'); SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail -DO $d$ - BEGIN - EXECUTE $$ALTER SERVER loopback - OPTIONS (SET dbname '$$||current_database()||$$')$$; - END; -$d$; +ALTER SERVER loopback OPTIONS (SET dbname :'current_database'); SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again -- Test that alteration of user mapping options causes reconnection @@ -2281,6 +2269,84 @@ EXPLAIN (verbose, costs off) DELETE FROM rem1; -- can't be pushed down DROP TRIGGER trig_row_after_delete ON rem1; + +-- We are allowed to create transition-table triggers on both kinds of +-- inheritance even if they contain foreign tables as children, but currently +-- collecting transition tuples from such foreign tables is not supported. + +CREATE TABLE local_tbl (a text, b int); +CREATE FOREIGN TABLE foreign_tbl (a text, b int) + SERVER loopback OPTIONS (table_name 'local_tbl'); + +INSERT INTO foreign_tbl VALUES ('AAA', 42); + +-- Test case for partition hierarchy +CREATE TABLE parent_tbl (a text, b int) PARTITION BY LIST (a); +ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES IN ('AAA'); + +CREATE TRIGGER parent_tbl_insert_trig + AFTER INSERT ON parent_tbl REFERENCING NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_update_trig + AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_delete_trig + AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); + +INSERT INTO parent_tbl VALUES ('AAA', 42); + +COPY parent_tbl (a, b) FROM stdin; +AAA 42 +\. + +ALTER SERVER loopback OPTIONS (ADD batch_size '10'); + +INSERT INTO parent_tbl VALUES ('AAA', 42); + +COPY parent_tbl (a, b) FROM stdin; +AAA 42 +\. + +ALTER SERVER loopback OPTIONS (DROP batch_size); + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE parent_tbl SET b = b + 1; +UPDATE parent_tbl SET b = b + 1; + +EXPLAIN (VERBOSE, COSTS OFF) +DELETE FROM parent_tbl; +DELETE FROM parent_tbl; + +ALTER TABLE parent_tbl DETACH PARTITION foreign_tbl; +DROP TABLE parent_tbl; + +-- Test case for non-partition hierarchy +CREATE TABLE parent_tbl (a text, b int); +ALTER FOREIGN TABLE foreign_tbl INHERIT parent_tbl; + +CREATE TRIGGER parent_tbl_update_trig + AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_delete_trig + AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE parent_tbl SET b = b + 1; +UPDATE parent_tbl SET b = b + 1; + +EXPLAIN (VERBOSE, COSTS OFF) +DELETE FROM parent_tbl; +DELETE FROM parent_tbl; + +ALTER FOREIGN TABLE foreign_tbl NO INHERIT parent_tbl; +DROP TABLE parent_tbl; + +-- Cleanup +DROP FOREIGN TABLE foreign_tbl; +DROP TABLE local_tbl; + -- =================================================================== -- test inheritance features -- =================================================================== @@ -3297,14 +3363,8 @@ SET ROLE regress_nosuper; SHOW is_superuser; -- This will be OK, we can create the FDW -DO $d$ - BEGIN - EXECUTE $$CREATE SERVER loopback_nopw FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname '$$||current_database()||$$', - port '$$||current_setting('port')||$$' - )$$; - END; -$d$; +CREATE SERVER loopback_nopw FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname :'current_database', port :'current_port'); -- But creation of user mappings for non-superusers should fail CREATE USER MAPPING FOR public SERVER loopback_nopw; @@ -4287,7 +4347,7 @@ ALTER SERVER loopback2 OPTIONS (DROP parallel_abort); CREATE TABLE analyze_table (id int, a text, b bigint); CREATE FOREIGN TABLE analyze_ftable (id int, a text, b bigint) - SERVER loopback OPTIONS (table_name 'analyze_rtable1'); + SERVER loopback OPTIONS (table_name 'analyze_table'); INSERT INTO analyze_table (SELECT x FROM generate_series(1,1000) x); ANALYZE analyze_table; @@ -4298,19 +4358,19 @@ ANALYZE analyze_table; ALTER SERVER loopback OPTIONS (analyze_sampling 'invalid'); ALTER SERVER loopback OPTIONS (analyze_sampling 'auto'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'system'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'bernoulli'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'random'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'off'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; -- cleanup DROP FOREIGN TABLE analyze_ftable; diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c index 151cbb954b9a1..b5de2a5e1be3f 100644 --- a/contrib/seg/seg.c +++ b/contrib/seg/seg.c @@ -417,7 +417,7 @@ gseg_same(PG_FUNCTION_ARGS) { bool *result = (bool *) PG_GETARG_POINTER(2); - if (DirectFunctionCall2(seg_same, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1))) + if (DatumGetBool(DirectFunctionCall2(seg_same, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1)))) *result = true; else *result = false; @@ -470,7 +470,7 @@ gseg_leaf_consistent(Datum key, Datum query, StrategyNumber strategy) retval = DirectFunctionCall2(seg_contained, key, query); break; default: - retval = false; + retval = BoolGetDatum(false); } PG_RETURN_DATUM(retval); diff --git a/contrib/sepgsql/database.c b/contrib/sepgsql/database.c index 6eeb429a28c08..c4ed646436990 100644 --- a/contrib/sepgsql/database.c +++ b/contrib/sepgsql/database.c @@ -16,7 +16,6 @@ #include "access/table.h" #include "catalog/dependency.h" #include "catalog/pg_database.h" -#include "commands/dbcommands.h" #include "commands/seclabel.h" #include "sepgsql.h" #include "utils/builtins.h" diff --git a/contrib/sepgsql/label.c b/contrib/sepgsql/label.c index 996ce174454dc..a37d89a3f1cc0 100644 --- a/contrib/sepgsql/label.c +++ b/contrib/sepgsql/label.c @@ -23,7 +23,6 @@ #include "catalog/pg_database.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" -#include "commands/dbcommands.h" #include "commands/seclabel.h" #include "libpq/auth.h" #include "libpq/libpq-be.h" diff --git a/contrib/sepgsql/uavc.c b/contrib/sepgsql/uavc.c index 65ea8e7946a6e..d9ccbc38bc538 100644 --- a/contrib/sepgsql/uavc.c +++ b/contrib/sepgsql/uavc.c @@ -66,8 +66,8 @@ static char *avc_unlabeled; /* system 'unlabeled' label */ static uint32 sepgsql_avc_hash(const char *scontext, const char *tcontext, uint16 tclass) { - return hash_any((const unsigned char *) scontext, strlen(scontext)) - ^ hash_any((const unsigned char *) tcontext, strlen(tcontext)) + return hash_bytes((const unsigned char *) scontext, strlen(scontext)) + ^ hash_bytes((const unsigned char *) tcontext, strlen(tcontext)) ^ tclass; } diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index bb495563200c3..f671a7d4b3125 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -581,7 +581,7 @@ tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_ /* print data */ if (isnull) appendStringInfoString(s, "null"); - else if (typisvarlena && VARATT_IS_EXTERNAL_ONDISK(origval)) + else if (typisvarlena && VARATT_IS_EXTERNAL_ONDISK(DatumGetPointer(origval))) appendStringInfoString(s, "unchanged-toast-datum"); else if (!typisvarlena) print_literal(s, typid, diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index 97f547b3cc4b2..e9095bedf21e0 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -8087,12 +8087,38 @@ SCRAM-SHA-256$<iteration count>:&l subretaindeadtuples bool - If true, the information (e.g., dead tuples, commit timestamps, and + If true, the detection of is + enabled and the information (e.g., dead tuples, commit timestamps, and origins) on the subscriber that is useful for conflict detection is retained. + + + submaxretention int4 + + + The maximum duration (in milliseconds) for which information (e.g., dead + tuples, commit timestamps, and origins) useful for conflict detection can + be retained. + + + + + + subretentionactive bool + + + The retention status of information (e.g., dead tuples, commit + timestamps, and origins) useful for conflict detection. True if + retain_dead_tuples + is enabled, and the retention duration has not exceeded + max_retention_duration, + when defined. + + + subconninfo text diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 20ccb2d6b5447..2a3685f474a96 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -3399,8 +3399,9 @@ include_dir 'conf.d' This parameter enables compression of WAL using the specified compression method. When enabled, the PostgreSQL - server compresses full page images written to WAL when - is on or during a base backup. + server compresses full page images written to WAL (e.g. when + is on, during a base backup, + etc.). A compressed page image will be decompressed during WAL replay. The supported methods are pglz, lz4 (if PostgreSQL @@ -7382,6 +7383,11 @@ local0.* /var/log/postgresql + debug_print_raw_parse (boolean) + + debug_print_raw_parse configuration parameter + + debug_print_parse (boolean) debug_print_parse configuration parameter @@ -7400,8 +7406,8 @@ local0.* /var/log/postgresql These parameters enable various debugging output to be emitted. - When set, they print the resulting parse tree, the query rewriter - output, or the execution plan for each executed query. + When set, they print the resulting raw parse tree, the parse tree, the query + rewriter output, or the execution plan for each executed query. These messages are emitted at LOG message level, so by default they will appear in the server log but will not be sent to the client. You can change that by adjusting @@ -7421,7 +7427,8 @@ local0.* /var/log/postgresql When set, debug_pretty_print indents the messages - produced by debug_print_parse, + produced by debug_print_raw_parse, + debug_print_parse, debug_print_rewritten, or debug_print_plan. This results in more readable but much longer output than the compact format used when diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 0994e08931155..b81d89e260809 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -117,7 +117,7 @@ double precision - float8 + float, float8 double precision floating-point number (8 bytes) @@ -2054,8 +2054,6 @@ MINUTE TO SECOND Time Input - - Example @@ -5245,8 +5243,8 @@ WHERE ...
Pseudo-Types - - + + Name diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml index bcde3cfd0374a..ac66fcbdb5727 100644 --- a/doc/src/sgml/filelist.sgml +++ b/doc/src/sgml/filelist.sgml @@ -17,7 +17,10 @@ - + + +%allfiles_func; + diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml deleted file mode 100644 index de5b5929ee078..0000000000000 --- a/doc/src/sgml/func.sgml +++ /dev/null @@ -1,32072 +0,0 @@ - - - - Functions and Operators - - - function - - - - operator - - - - PostgreSQL provides a large number of - functions and operators for the built-in data types. This chapter - describes most of them, although additional special-purpose functions - appear in relevant sections of the manual. Users can also - define their own functions and operators, as described in - . The - psql commands \df and - \do can be used to list all - available functions and operators, respectively. - - - - The notation used throughout this chapter to describe the argument and - result data types of a function or operator is like this: - -repeat ( text, integer ) text - - which says that the function repeat takes one text and - one integer argument and returns a result of type text. The right arrow - is also used to indicate the result of an example, thus: - -repeat('Pg', 4) PgPgPgPg - - - - - If you are concerned about portability then note that most of - the functions and operators described in this chapter, with the - exception of the most trivial arithmetic and comparison operators - and some explicitly marked functions, are not specified by the - SQL standard. Some of this extended functionality - is present in other SQL database management - systems, and in many cases this functionality is compatible and - consistent between the various implementations. - - - - - Logical Operators - - - operator - logical - - - - Boolean - operators - operators, logical - - - - The usual logical operators are available: - - - AND (operator) - - - - OR (operator) - - - - NOT (operator) - - - - conjunction - - - - disjunction - - - - negation - - - -boolean AND boolean boolean -boolean OR boolean boolean -NOT boolean boolean - - - SQL uses a three-valued logic system with true, - false, and null, which represents unknown. - Observe the following truth tables: - - - - - - a - b - a AND b - a OR b - - - - - - TRUE - TRUE - TRUE - TRUE - - - - TRUE - FALSE - FALSE - TRUE - - - - TRUE - NULL - NULL - TRUE - - - - FALSE - FALSE - FALSE - FALSE - - - - FALSE - NULL - FALSE - NULL - - - - NULL - NULL - NULL - NULL - - - - - - - - - - a - NOT a - - - - - - TRUE - FALSE - - - - FALSE - TRUE - - - - NULL - NULL - - - - - - - - The operators AND and OR are - commutative, that is, you can switch the left and right operands - without affecting the result. (However, it is not guaranteed that - the left operand is evaluated before the right operand. See for more information about the - order of evaluation of subexpressions.) - - - - - Comparison Functions and Operators - - - comparison - operators - - - - The usual comparison operators are available, as shown in . - - -
- Comparison Operators - - - - Operator - Description - - - - - - - datatype < datatype - boolean - - Less than - - - - - datatype > datatype - boolean - - Greater than - - - - - datatype <= datatype - boolean - - Less than or equal to - - - - - datatype >= datatype - boolean - - Greater than or equal to - - - - - datatype = datatype - boolean - - Equal - - - - - datatype <> datatype - boolean - - Not equal - - - - - datatype != datatype - boolean - - Not equal - - - -
- - - - <> is the standard SQL notation for not - equal. != is an alias, which is converted - to <> at a very early stage of parsing. - Hence, it is not possible to implement != - and <> operators that do different things. - - - - - These comparison operators are available for all built-in data types - that have a natural ordering, including numeric, string, and date/time - types. In addition, arrays, composite types, and ranges can be compared - if their component data types are comparable. - - - - It is usually possible to compare values of related data - types as well; for example integer > - bigint will work. Some cases of this sort are implemented - directly by cross-type comparison operators, but if no - such operator is available, the parser will coerce the less-general type - to the more-general type and apply the latter's comparison operator. - - - - As shown above, all comparison operators are binary operators that - return values of type boolean. Thus, expressions like - 1 < 2 < 3 are not valid (because there is - no < operator to compare a Boolean value with - 3). Use the BETWEEN predicates - shown below to perform range tests. - - - - There are also some comparison predicates, as shown in . These behave much like - operators, but have special syntax mandated by the SQL standard. - - - - Comparison Predicates - - - - - Predicate - - - Description - - - Example(s) - - - - - - - - datatype BETWEEN datatype AND datatype - boolean - - - Between (inclusive of the range endpoints). - - - 2 BETWEEN 1 AND 3 - t - - - 2 BETWEEN 3 AND 1 - f - - - - - - datatype NOT BETWEEN datatype AND datatype - boolean - - - Not between (the negation of BETWEEN). - - - 2 NOT BETWEEN 1 AND 3 - f - - - - - - datatype BETWEEN SYMMETRIC datatype AND datatype - boolean - - - Between, after sorting the two endpoint values. - - - 2 BETWEEN SYMMETRIC 3 AND 1 - t - - - - - - datatype NOT BETWEEN SYMMETRIC datatype AND datatype - boolean - - - Not between, after sorting the two endpoint values. - - - 2 NOT BETWEEN SYMMETRIC 3 AND 1 - f - - - - - - datatype IS DISTINCT FROM datatype - boolean - - - Not equal, treating null as a comparable value. - - - 1 IS DISTINCT FROM NULL - t (rather than NULL) - - - NULL IS DISTINCT FROM NULL - f (rather than NULL) - - - - - - datatype IS NOT DISTINCT FROM datatype - boolean - - - Equal, treating null as a comparable value. - - - 1 IS NOT DISTINCT FROM NULL - f (rather than NULL) - - - NULL IS NOT DISTINCT FROM NULL - t (rather than NULL) - - - - - - datatype IS NULL - boolean - - - Test whether value is null. - - - 1.5 IS NULL - f - - - - - - datatype IS NOT NULL - boolean - - - Test whether value is not null. - - - 'null' IS NOT NULL - t - - - - - - datatype ISNULL - boolean - - - Test whether value is null (nonstandard syntax). - - - - - - datatype NOTNULL - boolean - - - Test whether value is not null (nonstandard syntax). - - - - - - boolean IS TRUE - boolean - - - Test whether boolean expression yields true. - - - true IS TRUE - t - - - NULL::boolean IS TRUE - f (rather than NULL) - - - - - - boolean IS NOT TRUE - boolean - - - Test whether boolean expression yields false or unknown. - - - true IS NOT TRUE - f - - - NULL::boolean IS NOT TRUE - t (rather than NULL) - - - - - - boolean IS FALSE - boolean - - - Test whether boolean expression yields false. - - - true IS FALSE - f - - - NULL::boolean IS FALSE - f (rather than NULL) - - - - - - boolean IS NOT FALSE - boolean - - - Test whether boolean expression yields true or unknown. - - - true IS NOT FALSE - t - - - NULL::boolean IS NOT FALSE - t (rather than NULL) - - - - - - boolean IS UNKNOWN - boolean - - - Test whether boolean expression yields unknown. - - - true IS UNKNOWN - f - - - NULL::boolean IS UNKNOWN - t (rather than NULL) - - - - - - boolean IS NOT UNKNOWN - boolean - - - Test whether boolean expression yields true or false. - - - true IS NOT UNKNOWN - t - - - NULL::boolean IS NOT UNKNOWN - f (rather than NULL) - - - - -
- - - - BETWEEN - - - BETWEEN SYMMETRIC - - The BETWEEN predicate simplifies range tests: - -a BETWEEN x AND y - - is equivalent to - -a >= x AND a <= y - - Notice that BETWEEN treats the endpoint values as included - in the range. - BETWEEN SYMMETRIC is like BETWEEN - except there is no requirement that the argument to the left of - AND be less than or equal to the argument on the right. - If it is not, those two arguments are automatically swapped, so that - a nonempty range is always implied. - - - - The various variants of BETWEEN are implemented in - terms of the ordinary comparison operators, and therefore will work for - any data type(s) that can be compared. - - - - - The use of AND in the BETWEEN - syntax creates an ambiguity with the use of AND as a - logical operator. To resolve this, only a limited set of expression - types are allowed as the second argument of a BETWEEN - clause. If you need to write a more complex sub-expression - in BETWEEN, write parentheses around the - sub-expression. - - - - - - IS DISTINCT FROM - - - IS NOT DISTINCT FROM - - Ordinary comparison operators yield null (signifying unknown), - not true or false, when either input is null. For example, - 7 = NULL yields null, as does 7 <> NULL. When - this behavior is not suitable, use the - IS NOT DISTINCT FROM predicates: - -a IS DISTINCT FROM b -a IS NOT DISTINCT FROM b - - For non-null inputs, IS DISTINCT FROM is - the same as the <> operator. However, if both - inputs are null it returns false, and if only one input is - null it returns true. Similarly, IS NOT DISTINCT - FROM is identical to = for non-null - inputs, but it returns true when both inputs are null, and false when only - one input is null. Thus, these predicates effectively act as though null - were a normal data value, rather than unknown. - - - - - IS NULL - - - IS NOT NULL - - - ISNULL - - - NOTNULL - - To check whether a value is or is not null, use the predicates: - -expression IS NULL -expression IS NOT NULL - - or the equivalent, but nonstandard, predicates: - -expression ISNULL -expression NOTNULL - - null valuecomparing - - - - Do not write - expression = NULL - because NULL is not equal to - NULL. (The null value represents an unknown value, - and it is not known whether two unknown values are equal.) - - - - - Some applications might expect that - expression = NULL - returns true if expression evaluates to - the null value. It is highly recommended that these applications - be modified to comply with the SQL standard. However, if that - cannot be done the - configuration variable is available. If it is enabled, - PostgreSQL will convert x = - NULL clauses to x IS NULL. - - - - - If the expression is row-valued, then - IS NULL is true when the row expression itself is null - or when all the row's fields are null, while - IS NOT NULL is true when the row expression itself is non-null - and all the row's fields are non-null. Because of this behavior, - IS NULL and IS NOT NULL do not always return - inverse results for row-valued expressions; in particular, a row-valued - expression that contains both null and non-null fields will return false - for both tests. For example: - - -SELECT ROW(1,2.5,'this is a test') = ROW(1, 3, 'not the same'); - -SELECT ROW(table.*) IS NULL FROM table; -- detect all-null rows - -SELECT ROW(table.*) IS NOT NULL FROM table; -- detect all-non-null rows - -SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in rows - - - In some cases, it may be preferable to - write row IS DISTINCT FROM NULL - or row IS NOT DISTINCT FROM NULL, - which will simply check whether the overall row value is null without any - additional tests on the row fields. - - - - - IS TRUE - - - IS NOT TRUE - - - IS FALSE - - - IS NOT FALSE - - - IS UNKNOWN - - - IS NOT UNKNOWN - - Boolean values can also be tested using the predicates - -boolean_expression IS TRUE -boolean_expression IS NOT TRUE -boolean_expression IS FALSE -boolean_expression IS NOT FALSE -boolean_expression IS UNKNOWN -boolean_expression IS NOT UNKNOWN - - These will always return true or false, never a null value, even when the - operand is null. - A null input is treated as the logical value unknown. - Notice that IS UNKNOWN and IS NOT UNKNOWN are - effectively the same as IS NULL and - IS NOT NULL, respectively, except that the input - expression must be of Boolean type. - - - - Some comparison-related functions are also available, as shown in . - - - - Comparison Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - num_nonnulls - - num_nonnulls ( VARIADIC "any" ) - integer - - - Returns the number of non-null arguments. - - - num_nonnulls(1, NULL, 2) - 2 - - - - - - num_nulls - - num_nulls ( VARIADIC "any" ) - integer - - - Returns the number of null arguments. - - - num_nulls(1, NULL, 2) - 1 - - - - -
- - - - - Mathematical Functions and Operators - - - Mathematical operators are provided for many - PostgreSQL types. For types without - standard mathematical conventions - (e.g., date/time types) we - describe the actual behavior in subsequent sections. - - - - shows the mathematical - operators that are available for the standard numeric types. - Unless otherwise noted, operators shown as - accepting numeric_type are available for all - the types smallint, integer, - bigint, numeric, real, - and double precision. - Operators shown as accepting integral_type - are available for the types smallint, integer, - and bigint. - Except where noted, each form of an operator returns the same data type - as its argument(s). Calls involving multiple argument data types, such - as integer + numeric, - are resolved by using the type appearing later in these lists. - - - - Mathematical Operators - - - - - - Operator - - - Description - - - Example(s) - - - - - - - - numeric_type + numeric_type - numeric_type - - - Addition - - - 2 + 3 - 5 - - - - - - + numeric_type - numeric_type - - - Unary plus (no operation) - - - + 3.5 - 3.5 - - - - - - numeric_type - numeric_type - numeric_type - - - Subtraction - - - 2 - 3 - -1 - - - - - - - numeric_type - numeric_type - - - Negation - - - - (-4) - 4 - - - - - - numeric_type * numeric_type - numeric_type - - - Multiplication - - - 2 * 3 - 6 - - - - - - numeric_type / numeric_type - numeric_type - - - Division (for integral types, division truncates the result towards - zero) - - - 5.0 / 2 - 2.5000000000000000 - - - 5 / 2 - 2 - - - (-5) / 2 - -2 - - - - - - numeric_type % numeric_type - numeric_type - - - Modulo (remainder); available for smallint, - integer, bigint, and numeric - - - 5 % 4 - 1 - - - - - - numeric ^ numeric - numeric - - - double precision ^ double precision - double precision - - - Exponentiation - - - 2 ^ 3 - 8 - - - Unlike typical mathematical practice, multiple uses of - ^ will associate left to right by default: - - - 2 ^ 3 ^ 3 - 512 - - - 2 ^ (3 ^ 3) - 134217728 - - - - - - |/ double precision - double precision - - - Square root - - - |/ 25.0 - 5 - - - - - - ||/ double precision - double precision - - - Cube root - - - ||/ 64.0 - 4 - - - - - - @ numeric_type - numeric_type - - - Absolute value - - - @ -5.0 - 5.0 - - - - - - integral_type & integral_type - integral_type - - - Bitwise AND - - - 91 & 15 - 11 - - - - - - integral_type | integral_type - integral_type - - - Bitwise OR - - - 32 | 3 - 35 - - - - - - integral_type # integral_type - integral_type - - - Bitwise exclusive OR - - - 17 # 5 - 20 - - - - - - ~ integral_type - integral_type - - - Bitwise NOT - - - ~1 - -2 - - - - - - integral_type << integer - integral_type - - - Bitwise shift left - - - 1 << 4 - 16 - - - - - - integral_type >> integer - integral_type - - - Bitwise shift right - - - 8 >> 2 - 2 - - - - - -
- - - shows the available - mathematical functions. - Many of these functions are provided in multiple forms with different - argument types. - Except where noted, any given form of a function returns the same - data type as its argument(s); cross-type cases are resolved in the - same way as explained above for operators. - The functions working with double precision data are mostly - implemented on top of the host system's C library; accuracy and behavior in - boundary cases can therefore vary depending on the host system. - - - - Mathematical Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - abs - - abs ( numeric_type ) - numeric_type - - - Absolute value - - - abs(-17.4) - 17.4 - - - - - - - cbrt - - cbrt ( double precision ) - double precision - - - Cube root - - - cbrt(64.0) - 4 - - - - - - - ceil - - ceil ( numeric ) - numeric - - - ceil ( double precision ) - double precision - - - Nearest integer greater than or equal to argument - - - ceil(42.2) - 43 - - - ceil(-42.8) - -42 - - - - - - - ceiling - - ceiling ( numeric ) - numeric - - - ceiling ( double precision ) - double precision - - - Nearest integer greater than or equal to argument (same - as ceil) - - - ceiling(95.3) - 96 - - - - - - - degrees - - degrees ( double precision ) - double precision - - - Converts radians to degrees - - - degrees(0.5) - 28.64788975654116 - - - - - - - div - - div ( y numeric, - x numeric ) - numeric - - - Integer quotient of y/x - (truncates towards zero) - - - div(9, 4) - 2 - - - - - - - erf - - erf ( double precision ) - double precision - - - Error function - - - erf(1.0) - 0.8427007929497149 - - - - - - - erfc - - erfc ( double precision ) - double precision - - - Complementary error function (1 - erf(x), without - loss of precision for large inputs) - - - erfc(1.0) - 0.15729920705028513 - - - - - - - exp - - exp ( numeric ) - numeric - - - exp ( double precision ) - double precision - - - Exponential (e raised to the given power) - - - exp(1.0) - 2.7182818284590452 - - - - - - - factorial - - factorial ( bigint ) - numeric - - - Factorial - - - factorial(5) - 120 - - - - - - - floor - - floor ( numeric ) - numeric - - - floor ( double precision ) - double precision - - - Nearest integer less than or equal to argument - - - floor(42.8) - 42 - - - floor(-42.8) - -43 - - - - - - - gamma - - gamma ( double precision ) - double precision - - - Gamma function - - - gamma(0.5) - 1.772453850905516 - - - gamma(6) - 120 - - - - - - - gcd - - gcd ( numeric_type, numeric_type ) - numeric_type - - - Greatest common divisor (the largest positive number that divides both - inputs with no remainder); returns 0 if both inputs - are zero; available for integer, bigint, - and numeric - - - gcd(1071, 462) - 21 - - - - - - - lcm - - lcm ( numeric_type, numeric_type ) - numeric_type - - - Least common multiple (the smallest strictly positive number that is - an integral multiple of both inputs); returns 0 if - either input is zero; available for integer, - bigint, and numeric - - - lcm(1071, 462) - 23562 - - - - - - - lgamma - - lgamma ( double precision ) - double precision - - - Natural logarithm of the absolute value of the gamma function - - - lgamma(1000) - 5905.220423209181 - - - - - - - ln - - ln ( numeric ) - numeric - - - ln ( double precision ) - double precision - - - Natural logarithm - - - ln(2.0) - 0.6931471805599453 - - - - - - - log - - log ( numeric ) - numeric - - - log ( double precision ) - double precision - - - Base 10 logarithm - - - log(100) - 2 - - - - - - - log10 - - log10 ( numeric ) - numeric - - - log10 ( double precision ) - double precision - - - Base 10 logarithm (same as log) - - - log10(1000) - 3 - - - - - - log ( b numeric, - x numeric ) - numeric - - - Logarithm of x to base b - - - log(2.0, 64.0) - 6.0000000000000000 - - - - - - - min_scale - - min_scale ( numeric ) - integer - - - Minimum scale (number of fractional decimal digits) needed - to represent the supplied value precisely - - - min_scale(8.4100) - 2 - - - - - - - mod - - mod ( y numeric_type, - x numeric_type ) - numeric_type - - - Remainder of y/x; - available for smallint, integer, - bigint, and numeric - - - mod(9, 4) - 1 - - - - - - - pi - - pi ( ) - double precision - - - Approximate value of π - - - pi() - 3.141592653589793 - - - - - - - power - - power ( a numeric, - b numeric ) - numeric - - - power ( a double precision, - b double precision ) - double precision - - - a raised to the power of b - - - power(9, 3) - 729 - - - - - - - radians - - radians ( double precision ) - double precision - - - Converts degrees to radians - - - radians(45.0) - 0.7853981633974483 - - - - - - - round - - round ( numeric ) - numeric - - - round ( double precision ) - double precision - - - Rounds to nearest integer. For numeric, ties are - broken by rounding away from zero. For double precision, - the tie-breaking behavior is platform dependent, but - round to nearest even is the most common rule. - - - round(42.4) - 42 - - - - - - round ( v numeric, s integer ) - numeric - - - Rounds v to s decimal - places. Ties are broken by rounding away from zero. - - - round(42.4382, 2) - 42.44 - - - round(1234.56, -1) - 1230 - - - - - - - scale - - scale ( numeric ) - integer - - - Scale of the argument (the number of decimal digits in the fractional part) - - - scale(8.4100) - 4 - - - - - - - sign - - sign ( numeric ) - numeric - - - sign ( double precision ) - double precision - - - Sign of the argument (-1, 0, or +1) - - - sign(-8.4) - -1 - - - - - - - sqrt - - sqrt ( numeric ) - numeric - - - sqrt ( double precision ) - double precision - - - Square root - - - sqrt(2) - 1.4142135623730951 - - - - - - - trim_scale - - trim_scale ( numeric ) - numeric - - - Reduces the value's scale (number of fractional decimal digits) by - removing trailing zeroes - - - trim_scale(8.4100) - 8.41 - - - - - - - trunc - - trunc ( numeric ) - numeric - - - trunc ( double precision ) - double precision - - - Truncates to integer (towards zero) - - - trunc(42.8) - 42 - - - trunc(-42.8) - -42 - - - - - - trunc ( v numeric, s integer ) - numeric - - - Truncates v to s - decimal places - - - trunc(42.4382, 2) - 42.43 - - - - - - - width_bucket - - width_bucket ( operand numeric, low numeric, high numeric, count integer ) - integer - - - width_bucket ( operand double precision, low double precision, high double precision, count integer ) - integer - - - Returns the number of the bucket in - which operand falls in a histogram - having count equal-width buckets spanning the - range low to high. - The buckets have inclusive lower bounds and exclusive upper bounds. - Returns 0 for an input less - than low, - or count+1 for an input - greater than or equal to high. - If low > high, - the behavior is mirror-reversed, with bucket 1 - now being the one just below low, and the - inclusive bounds now being on the upper side. - - - width_bucket(5.35, 0.024, 10.06, 5) - 3 - - - width_bucket(9, 10, 0, 10) - 2 - - - - - - width_bucket ( operand anycompatible, thresholds anycompatiblearray ) - integer - - - Returns the number of the bucket in - which operand falls given an array listing the - inclusive lower bounds of the buckets. - Returns 0 for an input less than the first lower - bound. operand and the array elements can be - of any type having standard comparison operators. - The thresholds array must be - sorted, smallest first, or unexpected results will be - obtained. - - - width_bucket(now(), array['yesterday', 'today', 'tomorrow']::timestamptz[]) - 2 - - - - -
- - - shows functions for - generating random numbers. - - - - Random Functions - - - - - - Function - - - Description - - - Example(s) - - - - - - - - - random - - random ( ) - double precision - - - Returns a random value in the range 0.0 <= x < 1.0 - - - random() - 0.897124072839091 - - - - - - - random - - random ( min integer, max integer ) - integer - - - random ( min bigint, max bigint ) - bigint - - - random ( min numeric, max numeric ) - numeric - - - Returns a random value in the range - min <= x <= max. - For type numeric, the result will have the same number of - fractional decimal digits as min or - max, whichever has more. - - - random(1, 10) - 7 - - - random(-0.499, 0.499) - 0.347 - - - - - - - random_normal - - - random_normal ( - mean double precision - , stddev double precision ) - double precision - - - Returns a random value from the normal distribution with the given - parameters; mean defaults to 0.0 - and stddev defaults to 1.0 - - - random_normal(0.0, 1.0) - 0.051285419 - - - - - - - setseed - - setseed ( double precision ) - void - - - Sets the seed for subsequent random() and - random_normal() calls; - argument must be between -1.0 and 1.0, inclusive - - - setseed(0.12345) - - - - -
- - - The random() and random_normal() - functions listed in use a - deterministic pseudo-random number generator. - It is fast but not suitable for cryptographic - applications; see the module for a more - secure alternative. - If setseed() is called, the series of results of - subsequent calls to these functions in the current session - can be repeated by re-issuing setseed() with the same - argument. - Without any prior setseed() call in the same - session, the first call to any of these functions obtains a seed - from a platform-dependent source of random bits. - - - - shows the - available trigonometric functions. Each of these functions comes in - two variants, one that measures angles in radians and one that - measures angles in degrees. - - - - Trigonometric Functions - - - - - - Function - - - Description - - - Example(s) - - - - - - - - - acos - - acos ( double precision ) - double precision - - - Inverse cosine, result in radians - - - acos(1) - 0 - - - - - - - acosd - - acosd ( double precision ) - double precision - - - Inverse cosine, result in degrees - - - acosd(0.5) - 60 - - - - - - - asin - - asin ( double precision ) - double precision - - - Inverse sine, result in radians - - - asin(1) - 1.5707963267948966 - - - - - - - asind - - asind ( double precision ) - double precision - - - Inverse sine, result in degrees - - - asind(0.5) - 30 - - - - - - - atan - - atan ( double precision ) - double precision - - - Inverse tangent, result in radians - - - atan(1) - 0.7853981633974483 - - - - - - - atand - - atand ( double precision ) - double precision - - - Inverse tangent, result in degrees - - - atand(1) - 45 - - - - - - - atan2 - - atan2 ( y double precision, - x double precision ) - double precision - - - Inverse tangent of - y/x, - result in radians - - - atan2(1, 0) - 1.5707963267948966 - - - - - - - atan2d - - atan2d ( y double precision, - x double precision ) - double precision - - - Inverse tangent of - y/x, - result in degrees - - - atan2d(1, 0) - 90 - - - - - - - cos - - cos ( double precision ) - double precision - - - Cosine, argument in radians - - - cos(0) - 1 - - - - - - - cosd - - cosd ( double precision ) - double precision - - - Cosine, argument in degrees - - - cosd(60) - 0.5 - - - - - - - cot - - cot ( double precision ) - double precision - - - Cotangent, argument in radians - - - cot(0.5) - 1.830487721712452 - - - - - - - cotd - - cotd ( double precision ) - double precision - - - Cotangent, argument in degrees - - - cotd(45) - 1 - - - - - - - sin - - sin ( double precision ) - double precision - - - Sine, argument in radians - - - sin(1) - 0.8414709848078965 - - - - - - - sind - - sind ( double precision ) - double precision - - - Sine, argument in degrees - - - sind(30) - 0.5 - - - - - - - tan - - tan ( double precision ) - double precision - - - Tangent, argument in radians - - - tan(1) - 1.5574077246549023 - - - - - - - tand - - tand ( double precision ) - double precision - - - Tangent, argument in degrees - - - tand(45) - 1 - - - - -
- - - - Another way to work with angles measured in degrees is to use the unit - transformation functions radians() - and degrees() shown earlier. - However, using the degree-based trigonometric functions is preferred, - as that way avoids round-off error for special cases such - as sind(30). - - - - - shows the - available hyperbolic functions. - - - - Hyperbolic Functions - - - - - - Function - - - Description - - - Example(s) - - - - - - - - - sinh - - sinh ( double precision ) - double precision - - - Hyperbolic sine - - - sinh(1) - 1.1752011936438014 - - - - - - - cosh - - cosh ( double precision ) - double precision - - - Hyperbolic cosine - - - cosh(0) - 1 - - - - - - - tanh - - tanh ( double precision ) - double precision - - - Hyperbolic tangent - - - tanh(1) - 0.7615941559557649 - - - - - - - asinh - - asinh ( double precision ) - double precision - - - Inverse hyperbolic sine - - - asinh(1) - 0.881373587019543 - - - - - - - acosh - - acosh ( double precision ) - double precision - - - Inverse hyperbolic cosine - - - acosh(1) - 0 - - - - - - - atanh - - atanh ( double precision ) - double precision - - - Inverse hyperbolic tangent - - - atanh(0.5) - 0.5493061443340548 - - - - -
- -
- - - - String Functions and Operators - - - This section describes functions and operators for examining and - manipulating string values. Strings in this context include values - of the types character, character varying, - and text. Except where noted, these functions and operators - are declared to accept and return type text. They will - interchangeably accept character varying arguments. - Values of type character will be converted - to text before the function or operator is applied, resulting - in stripping any trailing spaces in the character value. - - - - SQL defines some string functions that use - key words, rather than commas, to separate - arguments. Details are in - . - PostgreSQL also provides versions of these functions - that use the regular function invocation syntax - (see ). - - - - - The string concatenation operator (||) will accept - non-string input, so long as at least one input is of string type, as shown - in . For other cases, inserting an - explicit coercion to text can be used to have non-string input - accepted. - - - - - <acronym>SQL</acronym> String Functions and Operators - - - - - Function/Operator - - - Description - - - Example(s) - - - - - - - - - character string - concatenation - - text || text - text - - - Concatenates the two strings. - - - 'Post' || 'greSQL' - PostgreSQL - - - - - - text || anynonarray - text - - - anynonarray || text - text - - - Converts the non-string input to text, then concatenates the two - strings. (The non-string input cannot be of an array type, because - that would create ambiguity with the array || - operators. If you want to concatenate an array's text equivalent, - cast it to text explicitly.) - - - 'Value: ' || 42 - Value: 42 - - - - - - - btrim - - btrim ( string text - , characters text ) - text - - - Removes the longest string containing only characters - in characters (a space by default) - from the start and end of string. - - - btrim('xyxtrimyyx', 'xyz') - trim - - - - - - - normalized - - - Unicode normalization - - text IS NOT form NORMALIZED - boolean - - - Checks whether the string is in the specified Unicode normalization - form. The optional form key word specifies the - form: NFC (the default), NFD, - NFKC, or NFKD. This expression can - only be used when the server encoding is UTF8. Note - that checking for normalization using this expression is often faster - than normalizing possibly already normalized strings. - - - U&'\0061\0308bc' IS NFD NORMALIZED - t - - - - - - - bit_length - - bit_length ( text ) - integer - - - Returns number of bits in the string (8 - times the octet_length). - - - bit_length('jose') - 32 - - - - - - - char_length - - - character string - length - - - length - of a character string - character string, length - - char_length ( text ) - integer - - - - character_length - - character_length ( text ) - integer - - - Returns number of characters in the string. - - - char_length('josé') - 4 - - - - - - - lower - - lower ( text ) - text - - - Converts the string to all lower case, according to the rules of the - database's locale. - - - lower('TOM') - tom - - - - - - - lpad - - lpad ( string text, - length integer - , fill text ) - text - - - Extends the string to length - length by prepending the characters - fill (a space by default). If the - string is already longer than - length then it is truncated (on the right). - - - lpad('hi', 5, 'xy') - xyxhi - - - - - - - ltrim - - ltrim ( string text - , characters text ) - text - - - Removes the longest string containing only characters in - characters (a space by default) from the start of - string. - - - ltrim('zzzytest', 'xyz') - test - - - - - - - normalize - - - Unicode normalization - - normalize ( text - , form ) - text - - - Converts the string to the specified Unicode - normalization form. The optional form key word - specifies the form: NFC (the default), - NFD, NFKC, or - NFKD. This function can only be used when the - server encoding is UTF8. - - - normalize(U&'\0061\0308bc', NFC) - U&'\00E4bc' - - - - - - - octet_length - - octet_length ( text ) - integer - - - Returns number of bytes in the string. - - - octet_length('josé') - 5 (if server encoding is UTF8) - - - - - - - octet_length - - octet_length ( character ) - integer - - - Returns number of bytes in the string. Since this version of the - function accepts type character directly, it will not - strip trailing spaces. - - - octet_length('abc '::character(4)) - 4 - - - - - - - overlay - - overlay ( string text PLACING newsubstring text FROM start integer FOR count integer ) - text - - - Replaces the substring of string that starts at - the start'th character and extends - for count characters - with newsubstring. - If count is omitted, it defaults to the length - of newsubstring. - - - overlay('Txxxxas' placing 'hom' from 2 for 4) - Thomas - - - - - - - position - - position ( substring text IN string text ) - integer - - - Returns first starting index of the specified - substring within - string, or zero if it's not present. - - - position('om' in 'Thomas') - 3 - - - - - - - rpad - - rpad ( string text, - length integer - , fill text ) - text - - - Extends the string to length - length by appending the characters - fill (a space by default). If the - string is already longer than - length then it is truncated. - - - rpad('hi', 5, 'xy') - hixyx - - - - - - - rtrim - - rtrim ( string text - , characters text ) - text - - - Removes the longest string containing only characters in - characters (a space by default) from the end of - string. - - - rtrim('testxxzx', 'xyz') - test - - - - - - - substring - - substring ( string text FROM start integer FOR count integer ) - text - - - Extracts the substring of string starting at - the start'th character if that is specified, - and stopping after count characters if that is - specified. Provide at least one of start - and count. - - - substring('Thomas' from 2 for 3) - hom - - - substring('Thomas' from 3) - omas - - - substring('Thomas' for 2) - Th - - - - - - substring ( string text FROM pattern text ) - text - - - Extracts the first substring matching POSIX regular expression; see - . - - - substring('Thomas' from '...$') - mas - - - - - - substring ( string text SIMILAR pattern text ESCAPE escape text ) - text - - - substring ( string text FROM pattern text FOR escape text ) - text - - - Extracts the first substring matching SQL regular expression; - see . The first form has - been specified since SQL:2003; the second form was only in SQL:1999 - and should be considered obsolete. - - - substring('Thomas' similar '%#"o_a#"_' escape '#') - oma - - - - - - - trim - - trim ( LEADING | TRAILING | BOTH - characters text FROM - string text ) - text - - - Removes the longest string containing only characters in - characters (a space by default) from the - start, end, or both ends (BOTH is the default) - of string. - - - trim(both 'xyz' from 'yxTomxx') - Tom - - - - - - trim ( LEADING | TRAILING | BOTH FROM - string text , - characters text ) - text - - - This is a non-standard syntax for trim(). - - - trim(both from 'yxTomxx', 'xyz') - Tom - - - - - - - unicode_assigned - - unicode_assigned ( text ) - boolean - - - Returns true if all characters in the string are - assigned Unicode codepoints; false otherwise. This - function can only be used when the server encoding is - UTF8. - - - - - - - upper - - upper ( text ) - text - - - Converts the string to all upper case, according to the rules of the - database's locale. - - - upper('tom') - TOM - - - - -
- - - Additional string manipulation functions and operators are available - and are listed in . (Some of - these are used internally to implement - the SQL-standard string functions listed in - .) - There are also pattern-matching operators, which are described in - , and operators for full-text - search, which are described in . - - - - Other String Functions and Operators - - - - - Function/Operator - - - Description - - - Example(s) - - - - - - - - - character string - prefix test - - text ^@ text - boolean - - - Returns true if the first string starts with the second string - (equivalent to the starts_with() function). - - - 'alphabet' ^@ 'alph' - t - - - - - - - ascii - - ascii ( text ) - integer - - - Returns the numeric code of the first character of the argument. - In UTF8 encoding, returns the Unicode code point - of the character. In other multibyte encodings, the argument must - be an ASCII character. - - - ascii('x') - 120 - - - - - - - chr - - chr ( integer ) - text - - - Returns the character with the given code. In UTF8 - encoding the argument is treated as a Unicode code point. In other - multibyte encodings the argument must designate - an ASCII character. chr(0) is - disallowed because text data types cannot store that character. - - - chr(65) - A - - - - - - - concat - - concat ( val1 "any" - , val2 "any" , ... ) - text - - - Concatenates the text representations of all the arguments. - NULL arguments are ignored. - - - concat('abcde', 2, NULL, 22) - abcde222 - - - - - - - concat_ws - - concat_ws ( sep text, - val1 "any" - , val2 "any" , ... ) - text - - - Concatenates all but the first argument, with separators. The first - argument is used as the separator string, and should not be NULL. - Other NULL arguments are ignored. - - - concat_ws(',', 'abcde', 2, NULL, 22) - abcde,2,22 - - - - - - - format - - format ( formatstr text - , formatarg "any" , ... ) - text - - - Formats arguments according to a format string; - see . - This function is similar to the C function sprintf. - - - format('Hello %s, %1$s', 'World') - Hello World, World - - - - - - - initcap - - initcap ( text ) - text - - - Converts the first letter of each word to upper case and the - rest to lower case. Words are sequences of alphanumeric - characters separated by non-alphanumeric characters. - - - initcap('hi THOMAS') - Hi Thomas - - - - - - - casefold - - casefold ( text ) - text - - - Performs case folding of the input string according to the collation. - Case folding is similar to case conversion, but the purpose of case - folding is to facilitate case-insensitive matching of strings, - whereas the purpose of case conversion is to convert to a particular - cased form. This function can only be used when the server encoding - is UTF8. - - - Ordinarily, case folding simply converts to lowercase, but there may - be exceptions depending on the collation. For instance, some - characters have more than two lowercase variants, or fold to uppercase. - - - Case folding may change the length of the string. For instance, in - the PG_UNICODE_FAST collation, ß - (U+00DF) folds to ss. - - - casefold can be used for Unicode Default Caseless - Matching. It does not always preserve the normalized form of the - input string (see ). - - - The libc provider doesn't support case folding, so - casefold is identical to . - - - - - - - left - - left ( string text, - n integer ) - text - - - Returns first n characters in the - string, or when n is negative, returns - all but last |n| characters. - - - left('abcde', 2) - ab - - - - - - - length - - length ( text ) - integer - - - Returns the number of characters in the string. - - - length('jose') - 4 - - - - - - - md5 - - md5 ( text ) - text - - - Computes the MD5 hash of - the argument, with the result written in hexadecimal. - - - md5('abc') - 900150983cd24fb0&zwsp;d6963f7d28e17f72 - - - - - - - parse_ident - - parse_ident ( qualified_identifier text - , strict_mode boolean DEFAULT true ) - text[] - - - Splits qualified_identifier into an array of - identifiers, removing any quoting of individual identifiers. By - default, extra characters after the last identifier are considered an - error; but if the second parameter is false, then such - extra characters are ignored. (This behavior is useful for parsing - names for objects like functions.) Note that this function does not - truncate over-length identifiers. If you want truncation you can cast - the result to name[]. - - - parse_ident('"SomeSchema".someTable') - {SomeSchema,sometable} - - - - - - - pg_client_encoding - - pg_client_encoding ( ) - name - - - Returns current client encoding name. - - - pg_client_encoding() - UTF8 - - - - - - - quote_ident - - quote_ident ( text ) - text - - - Returns the given string suitably quoted to be used as an identifier - in an SQL statement string. - Quotes are added only if necessary (i.e., if the string contains - non-identifier characters or would be case-folded). - Embedded quotes are properly doubled. - See also . - - - quote_ident('Foo bar') - "Foo bar" - - - - - - - quote_literal - - quote_literal ( text ) - text - - - Returns the given string suitably quoted to be used as a string literal - in an SQL statement string. - Embedded single-quotes and backslashes are properly doubled. - Note that quote_literal returns null on null - input; if the argument might be null, - quote_nullable is often more suitable. - See also . - - - quote_literal(E'O\'Reilly') - 'O''Reilly' - - - - - - quote_literal ( anyelement ) - text - - - Converts the given value to text and then quotes it as a literal. - Embedded single-quotes and backslashes are properly doubled. - - - quote_literal(42.5) - '42.5' - - - - - - - quote_nullable - - quote_nullable ( text ) - text - - - Returns the given string suitably quoted to be used as a string literal - in an SQL statement string; or, if the argument - is null, returns NULL. - Embedded single-quotes and backslashes are properly doubled. - See also . - - - quote_nullable(NULL) - NULL - - - - - - quote_nullable ( anyelement ) - text - - - Converts the given value to text and then quotes it as a literal; - or, if the argument is null, returns NULL. - Embedded single-quotes and backslashes are properly doubled. - - - quote_nullable(42.5) - '42.5' - - - - - - - regexp_count - - regexp_count ( string text, pattern text - , start integer - , flags text ) - integer - - - Returns the number of times the POSIX regular - expression pattern matches in - the string; see - . - - - regexp_count('123456789012', '\d\d\d', 2) - 3 - - - - - - - regexp_instr - - regexp_instr ( string text, pattern text - , start integer - , N integer - , endoption integer - , flags text - , subexpr integer ) - integer - - - Returns the position within string where - the N'th match of the POSIX regular - expression pattern occurs, or zero if there is - no such match; see . - - - regexp_instr('ABCDEF', 'c(.)(..)', 1, 1, 0, 'i') - 3 - - - regexp_instr('ABCDEF', 'c(.)(..)', 1, 1, 0, 'i', 2) - 5 - - - - - - - regexp_like - - regexp_like ( string text, pattern text - , flags text ) - boolean - - - Checks whether a match of the POSIX regular - expression pattern occurs - within string; see - . - - - regexp_like('Hello World', 'world$', 'i') - t - - - - - - - regexp_match - - regexp_match ( string text, pattern text , flags text ) - text[] - - - Returns substrings within the first match of the POSIX regular - expression pattern to - the string; see - . - - - regexp_match('foobarbequebaz', '(bar)(beque)') - {bar,beque} - - - - - - - regexp_matches - - regexp_matches ( string text, pattern text , flags text ) - setof text[] - - - Returns substrings within the first match of the POSIX regular - expression pattern to - the string, or substrings within all - such matches if the g flag is used; - see . - - - regexp_matches('foobarbequebaz', 'ba.', 'g') - - - {bar} - {baz} - - - - - - - - regexp_replace - - regexp_replace ( string text, pattern text, replacement text - , flags text ) - text - - - Replaces the substring that is the first match to the POSIX - regular expression pattern, or all such - matches if the g flag is used; see - . - - - regexp_replace('Thomas', '.[mN]a.', 'M') - ThM - - - - - - regexp_replace ( string text, pattern text, replacement text, - start integer - , N integer - , flags text ) - text - - - Replaces the substring that is the N'th - match to the POSIX regular expression pattern, - or all such matches if N is zero, with the - search beginning at the start'th character - of string. If N is - omitted, it defaults to 1. See - . - - - regexp_replace('Thomas', '.', 'X', 3, 2) - ThoXas - - - regexp_replace(string=>'hello world', pattern=>'l', replacement=>'XX', start=>1, "N"=>2) - helXXo world - - - - - - - regexp_split_to_array - - regexp_split_to_array ( string text, pattern text , flags text ) - text[] - - - Splits string using a POSIX regular - expression as the delimiter, producing an array of results; see - . - - - regexp_split_to_array('hello world', '\s+') - {hello,world} - - - - - - - regexp_split_to_table - - regexp_split_to_table ( string text, pattern text , flags text ) - setof text - - - Splits string using a POSIX regular - expression as the delimiter, producing a set of results; see - . - - - regexp_split_to_table('hello world', '\s+') - - - hello - world - - - - - - - - regexp_substr - - regexp_substr ( string text, pattern text - , start integer - , N integer - , flags text - , subexpr integer ) - text - - - Returns the substring within string that - matches the N'th occurrence of the POSIX - regular expression pattern, - or NULL if there is no such match; see - . - - - regexp_substr('ABCDEF', 'c(.)(..)', 1, 1, 'i') - CDEF - - - regexp_substr('ABCDEF', 'c(.)(..)', 1, 1, 'i', 2) - EF - - - - - - - repeat - - repeat ( string text, number integer ) - text - - - Repeats string the specified - number of times. - - - repeat('Pg', 4) - PgPgPgPg - - - - - - - replace - - replace ( string text, - from text, - to text ) - text - - - Replaces all occurrences in string of - substring from with - substring to. - - - replace('abcdefabcdef', 'cd', 'XX') - abXXefabXXef - - - - - - - reverse - - reverse ( text ) - text - - - Reverses the order of the characters in the string. - - - reverse('abcde') - edcba - - - - - - - right - - right ( string text, - n integer ) - text - - - Returns last n characters in the string, - or when n is negative, returns all but - first |n| characters. - - - right('abcde', 2) - de - - - - - - - split_part - - split_part ( string text, - delimiter text, - n integer ) - text - - - Splits string at occurrences - of delimiter and returns - the n'th field (counting from one), - or when n is negative, returns - the |n|'th-from-last field. - - - split_part('abc~@~def~@~ghi', '~@~', 2) - def - - - split_part('abc,def,ghi,jkl', ',', -2) - ghi - - - - - - - starts_with - - starts_with ( string text, prefix text ) - boolean - - - Returns true if string starts - with prefix. - - - starts_with('alphabet', 'alph') - t - - - - - - - string_to_array - - string_to_array ( string text, delimiter text , null_string text ) - text[] - - - Splits the string at occurrences - of delimiter and forms the resulting fields - into a text array. - If delimiter is NULL, - each character in the string will become a - separate element in the array. - If delimiter is an empty string, then - the string is treated as a single field. - If null_string is supplied and is - not NULL, fields matching that string are - replaced by NULL. - See also array_to_string. - - - string_to_array('xx~~yy~~zz', '~~', 'yy') - {xx,NULL,zz} - - - - - - - string_to_table - - string_to_table ( string text, delimiter text , null_string text ) - setof text - - - Splits the string at occurrences - of delimiter and returns the resulting fields - as a set of text rows. - If delimiter is NULL, - each character in the string will become a - separate row of the result. - If delimiter is an empty string, then - the string is treated as a single field. - If null_string is supplied and is - not NULL, fields matching that string are - replaced by NULL. - - - string_to_table('xx~^~yy~^~zz', '~^~', 'yy') - - - xx - NULL - zz - - - - - - - - strpos - - strpos ( string text, substring text ) - integer - - - Returns first starting index of the specified substring - within string, or zero if it's not present. - (Same as position(substring in - string), but note the reversed - argument order.) - - - strpos('high', 'ig') - 2 - - - - - - - substr - - substr ( string text, start integer , count integer ) - text - - - Extracts the substring of string starting at - the start'th character, - and extending for count characters if that is - specified. (Same - as substring(string - from start - for count).) - - - substr('alphabet', 3) - phabet - - - substr('alphabet', 3, 2) - ph - - - - - - - to_ascii - - to_ascii ( string text ) - text - - - to_ascii ( string text, - encoding name ) - text - - - to_ascii ( string text, - encoding integer ) - text - - - Converts string to ASCII - from another encoding, which may be identified by name or number. - If encoding is omitted the database encoding - is assumed (which in practice is the only useful case). - The conversion consists primarily of dropping accents. - Conversion is only supported - from LATIN1, LATIN2, - LATIN9, and WIN1250 encodings. - (See the module for another, more flexible - solution.) - - - to_ascii('Karél') - Karel - - - - - - - to_bin - - to_bin ( integer ) - text - - - to_bin ( bigint ) - text - - - Converts the number to its equivalent two's complement binary - representation. - - - to_bin(2147483647) - 1111111111111111111111111111111 - - - to_bin(-1234) - 11111111111111111111101100101110 - - - - - - - to_hex - - to_hex ( integer ) - text - - - to_hex ( bigint ) - text - - - Converts the number to its equivalent two's complement hexadecimal - representation. - - - to_hex(2147483647) - 7fffffff - - - to_hex(-1234) - fffffb2e - - - - - - - to_oct - - to_oct ( integer ) - text - - - to_oct ( bigint ) - text - - - Converts the number to its equivalent two's complement octal - representation. - - - to_oct(2147483647) - 17777777777 - - - to_oct(-1234) - 37777775456 - - - - - - - translate - - translate ( string text, - from text, - to text ) - text - - - Replaces each character in string that - matches a character in the from set with the - corresponding character in the to - set. If from is longer than - to, occurrences of the extra characters in - from are deleted. - - - translate('12345', '143', 'ax') - a2x5 - - - - - - - unistr - - unistr ( text ) - text - - - Evaluate escaped Unicode characters in the argument. Unicode characters - can be specified as - \XXXX (4 hexadecimal - digits), \+XXXXXX (6 - hexadecimal digits), - \uXXXX (4 hexadecimal - digits), or \UXXXXXXXX - (8 hexadecimal digits). To specify a backslash, write two - backslashes. All other characters are taken literally. - - - - If the server encoding is not UTF-8, the Unicode code point identified - by one of these escape sequences is converted to the actual server - encoding; an error is reported if that's not possible. - - - - This function provides a (non-standard) alternative to string - constants with Unicode escapes (see ). - - - - unistr('d\0061t\+000061') - data - - - unistr('d\u0061t\U00000061') - data - - - - - -
- - - The concat, concat_ws and - format functions are variadic, so it is possible to - pass the values to be concatenated or formatted as an array marked with - the VARIADIC keyword (see ). The array's elements are - treated as if they were separate ordinary arguments to the function. - If the variadic array argument is NULL, concat - and concat_ws return NULL, but - format treats a NULL as a zero-element array. - - - - See also the aggregate function string_agg in - , and the functions for - converting between strings and the bytea type in - . - - - - <function>format</function> - - - format - - - - The function format produces output formatted according to - a format string, in a style similar to the C function - sprintf. - - - - -format(formatstr text , formatarg "any" , ... ) - - formatstr is a format string that specifies how the - result should be formatted. Text in the format string is copied - directly to the result, except where format specifiers are - used. Format specifiers act as placeholders in the string, defining how - subsequent function arguments should be formatted and inserted into the - result. Each formatarg argument is converted to text - according to the usual output rules for its data type, and then formatted - and inserted into the result string according to the format specifier(s). - - - - Format specifiers are introduced by a % character and have - the form - -%[position][flags][width]type - - where the component fields are: - - - - position (optional) - - - A string of the form n$ where - n is the index of the argument to print. - Index 1 means the first argument after - formatstr. If the position is - omitted, the default is to use the next argument in sequence. - - - - - - flags (optional) - - - Additional options controlling how the format specifier's output is - formatted. Currently the only supported flag is a minus sign - (-) which will cause the format specifier's output to be - left-justified. This has no effect unless the width - field is also specified. - - - - - - width (optional) - - - Specifies the minimum number of characters to use to - display the format specifier's output. The output is padded on the - left or right (depending on the - flag) with spaces as - needed to fill the width. A too-small width does not cause - truncation of the output, but is simply ignored. The width may be - specified using any of the following: a positive integer; an - asterisk (*) to use the next function argument as the - width; or a string of the form *n$ to - use the nth function argument as the width. - - - - If the width comes from a function argument, that argument is - consumed before the argument that is used for the format specifier's - value. If the width argument is negative, the result is left - aligned (as if the - flag had been specified) within a - field of length abs(width). - - - - - - type (required) - - - The type of format conversion to use to produce the format - specifier's output. The following types are supported: - - - - s formats the argument value as a simple - string. A null value is treated as an empty string. - - - - - I treats the argument value as an SQL - identifier, double-quoting it if necessary. - It is an error for the value to be null (equivalent to - quote_ident). - - - - - L quotes the argument value as an SQL literal. - A null value is displayed as the string NULL, without - quotes (equivalent to quote_nullable). - - - - - - - - - - - In addition to the format specifiers described above, the special sequence - %% may be used to output a literal % character. - - - - Here are some examples of the basic format conversions: - - -SELECT format('Hello %s', 'World'); -Result: Hello World - -SELECT format('Testing %s, %s, %s, %%', 'one', 'two', 'three'); -Result: Testing one, two, three, % - -SELECT format('INSERT INTO %I VALUES(%L)', 'Foo bar', E'O\'Reilly'); -Result: INSERT INTO "Foo bar" VALUES('O''Reilly') - -SELECT format('INSERT INTO %I VALUES(%L)', 'locations', 'C:\Program Files'); -Result: INSERT INTO locations VALUES('C:\Program Files') - - - - - Here are examples using width fields - and the - flag: - - -SELECT format('|%10s|', 'foo'); -Result: | foo| - -SELECT format('|%-10s|', 'foo'); -Result: |foo | - -SELECT format('|%*s|', 10, 'foo'); -Result: | foo| - -SELECT format('|%*s|', -10, 'foo'); -Result: |foo | - -SELECT format('|%-*s|', 10, 'foo'); -Result: |foo | - -SELECT format('|%-*s|', -10, 'foo'); -Result: |foo | - - - - - These examples show use of position fields: - - -SELECT format('Testing %3$s, %2$s, %1$s', 'one', 'two', 'three'); -Result: Testing three, two, one - -SELECT format('|%*2$s|', 'foo', 10, 'bar'); -Result: | bar| - -SELECT format('|%1$*2$s|', 'foo', 10, 'bar'); -Result: | foo| - - - - - Unlike the standard C function sprintf, - PostgreSQL's format function allows format - specifiers with and without position fields to be mixed - in the same format string. A format specifier without a - position field always uses the next argument after the - last argument consumed. - In addition, the format function does not require all - function arguments to be used in the format string. - For example: - - -SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); -Result: Testing three, two, three - - - - - The %I and %L format specifiers are particularly - useful for safely constructing dynamic SQL statements. See - . - - - -
- - - - Binary String Functions and Operators - - - binary data - functions - - - - This section describes functions and operators for examining and - manipulating binary strings, that is values of type bytea. - Many of these are equivalent, in purpose and syntax, to the - text-string functions described in the previous section. - - - - SQL defines some string functions that use - key words, rather than commas, to separate - arguments. Details are in - . - PostgreSQL also provides versions of these functions - that use the regular function invocation syntax - (see ). - - - - <acronym>SQL</acronym> Binary String Functions and Operators - - - - - Function/Operator - - - Description - - - Example(s) - - - - - - - - - binary string - concatenation - - bytea || bytea - bytea - - - Concatenates the two binary strings. - - - '\x123456'::bytea || '\x789a00bcde'::bytea - \x123456789a00bcde - - - - - - - bit_length - - bit_length ( bytea ) - integer - - - Returns number of bits in the binary string (8 - times the octet_length). - - - bit_length('\x123456'::bytea) - 24 - - - - - - - btrim - - btrim ( bytes bytea, - bytesremoved bytea ) - bytea - - - Removes the longest string containing only bytes appearing in - bytesremoved from the start and end of - bytes. - - - btrim('\x1234567890'::bytea, '\x9012'::bytea) - \x345678 - - - - - - - ltrim - - ltrim ( bytes bytea, - bytesremoved bytea ) - bytea - - - Removes the longest string containing only bytes appearing in - bytesremoved from the start of - bytes. - - - ltrim('\x1234567890'::bytea, '\x9012'::bytea) - \x34567890 - - - - - - - octet_length - - octet_length ( bytea ) - integer - - - Returns number of bytes in the binary string. - - - octet_length('\x123456'::bytea) - 3 - - - - - - - overlay - - overlay ( bytes bytea PLACING newsubstring bytea FROM start integer FOR count integer ) - bytea - - - Replaces the substring of bytes that starts at - the start'th byte and extends - for count bytes - with newsubstring. - If count is omitted, it defaults to the length - of newsubstring. - - - overlay('\x1234567890'::bytea placing '\002\003'::bytea from 2 for 3) - \x12020390 - - - - - - - position - - position ( substring bytea IN bytes bytea ) - integer - - - Returns first starting index of the specified - substring within - bytes, or zero if it's not present. - - - position('\x5678'::bytea in '\x1234567890'::bytea) - 3 - - - - - - - rtrim - - rtrim ( bytes bytea, - bytesremoved bytea ) - bytea - - - Removes the longest string containing only bytes appearing in - bytesremoved from the end of - bytes. - - - rtrim('\x1234567890'::bytea, '\x9012'::bytea) - \x12345678 - - - - - - - substring - - substring ( bytes bytea FROM start integer FOR count integer ) - bytea - - - Extracts the substring of bytes starting at - the start'th byte if that is specified, - and stopping after count bytes if that is - specified. Provide at least one of start - and count. - - - substring('\x1234567890'::bytea from 3 for 2) - \x5678 - - - - - - - trim - - trim ( LEADING | TRAILING | BOTH - bytesremoved bytea FROM - bytes bytea ) - bytea - - - Removes the longest string containing only bytes appearing in - bytesremoved from the start, - end, or both ends (BOTH is the default) - of bytes. - - - trim('\x9012'::bytea from '\x1234567890'::bytea) - \x345678 - - - - - - trim ( LEADING | TRAILING | BOTH FROM - bytes bytea, - bytesremoved bytea ) - bytea - - - This is a non-standard syntax for trim(). - - - trim(both from '\x1234567890'::bytea, '\x9012'::bytea) - \x345678 - - - - -
- - - Additional binary string manipulation functions are available and - are listed in . Some - of them are used internally to implement the - SQL-standard string functions listed in . - - - - Other Binary String Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - bit_count - - - popcount - bit_count - - bit_count ( bytes bytea ) - bigint - - - Returns the number of bits set in the binary string (also known as - popcount). - - - bit_count('\x1234567890'::bytea) - 15 - - - - - - - crc32 - - crc32 ( bytea ) - bigint - - - Computes the CRC-32 value of the binary string. - - - crc32('abc'::bytea) - 891568578 - - - - - - - crc32c - - crc32c ( bytea ) - bigint - - - Computes the CRC-32C value of the binary string. - - - crc32c('abc'::bytea) - 910901175 - - - - - - - get_bit - - get_bit ( bytes bytea, - n bigint ) - integer - - - Extracts n'th bit - from binary string. - - - get_bit('\x1234567890'::bytea, 30) - 1 - - - - - - - get_byte - - get_byte ( bytes bytea, - n integer ) - integer - - - Extracts n'th byte - from binary string. - - - get_byte('\x1234567890'::bytea, 4) - 144 - - - - - - - length - - - binary string - length - - - length - of a binary string - binary strings, length - - length ( bytea ) - integer - - - Returns the number of bytes in the binary string. - - - length('\x1234567890'::bytea) - 5 - - - - - - length ( bytes bytea, - encoding name ) - integer - - - Returns the number of characters in the binary string, assuming - that it is text in the given encoding. - - - length('jose'::bytea, 'UTF8') - 4 - - - - - - - md5 - - md5 ( bytea ) - text - - - Computes the MD5 hash of - the binary string, with the result written in hexadecimal. - - - md5('Th\000omas'::bytea) - 8ab2d3c9689aaf18&zwsp;b4958c334c82d8b1 - - - - - - - reverse - - reverse ( bytea ) - bytea - - - Reverses the order of the bytes in the binary string. - - - reverse('\xabcd'::bytea) - \xcdab - - - - - - - set_bit - - set_bit ( bytes bytea, - n bigint, - newvalue integer ) - bytea - - - Sets n'th bit in - binary string to newvalue. - - - set_bit('\x1234567890'::bytea, 30, 0) - \x1234563890 - - - - - - - set_byte - - set_byte ( bytes bytea, - n integer, - newvalue integer ) - bytea - - - Sets n'th byte in - binary string to newvalue. - - - set_byte('\x1234567890'::bytea, 4, 64) - \x1234567840 - - - - - - - sha224 - - sha224 ( bytea ) - bytea - - - Computes the SHA-224 hash - of the binary string. - - - sha224('abc'::bytea) - \x23097d223405d8228642a477bda2&zwsp;55b32aadbce4bda0b3f7e36c9da7 - - - - - - - sha256 - - sha256 ( bytea ) - bytea - - - Computes the SHA-256 hash - of the binary string. - - - sha256('abc'::bytea) - \xba7816bf8f01cfea414140de5dae2223&zwsp;b00361a396177a9cb410ff61f20015ad - - - - - - - sha384 - - sha384 ( bytea ) - bytea - - - Computes the SHA-384 hash - of the binary string. - - - sha384('abc'::bytea) - \xcb00753f45a35e8bb5a03d699ac65007&zwsp;272c32ab0eded1631a8b605a43ff5bed&zwsp;8086072ba1e7cc2358baeca134c825a7 - - - - - - - sha512 - - sha512 ( bytea ) - bytea - - - Computes the SHA-512 hash - of the binary string. - - - sha512('abc'::bytea) - \xddaf35a193617abacc417349ae204131&zwsp;12e6fa4e89a97ea20a9eeee64b55d39a&zwsp;2192992a274fc1a836ba3c23a3feebbd&zwsp;454d4423643ce80e2a9ac94fa54ca49f - - - - - - - substr - - substr ( bytes bytea, start integer , count integer ) - bytea - - - Extracts the substring of bytes starting at - the start'th byte, - and extending for count bytes if that is - specified. (Same - as substring(bytes - from start - for count).) - - - substr('\x1234567890'::bytea, 3, 2) - \x5678 - - - - -
- - - Functions get_byte and set_byte - number the first byte of a binary string as byte 0. - Functions get_bit and set_bit - number bits from the right within each byte; for example bit 0 is the least - significant bit of the first byte, and bit 15 is the most significant bit - of the second byte. - - - - For historical reasons, the function md5 - returns a hex-encoded value of type text whereas the SHA-2 - functions return type bytea. Use the functions - encode - and decode to - convert between the two. For example write encode(sha256('abc'), - 'hex') to get a hex-encoded text representation, - or decode(md5('abc'), 'hex') to get - a bytea value. - - - - - character string - converting to binary string - - - binary string - converting to character string - - Functions for converting strings between different character sets - (encodings), and for representing arbitrary binary data in textual - form, are shown in - . For these - functions, an argument or result of type text is expressed - in the database's default encoding, while arguments or results of - type bytea are in an encoding named by another argument. - - - - Text/Binary String Conversion Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - convert - - convert ( bytes bytea, - src_encoding name, - dest_encoding name ) - bytea - - - Converts a binary string representing text in - encoding src_encoding - to a binary string in encoding dest_encoding - (see for - available conversions). - - - convert('text_in_utf8', 'UTF8', 'LATIN1') - \x746578745f696e5f75746638 - - - - - - - convert_from - - convert_from ( bytes bytea, - src_encoding name ) - text - - - Converts a binary string representing text in - encoding src_encoding - to text in the database encoding - (see for - available conversions). - - - convert_from('text_in_utf8', 'UTF8') - text_in_utf8 - - - - - - - convert_to - - convert_to ( string text, - dest_encoding name ) - bytea - - - Converts a text string (in the database encoding) to a - binary string encoded in encoding dest_encoding - (see for - available conversions). - - - convert_to('some_text', 'UTF8') - \x736f6d655f74657874 - - - - - - - encode - - encode ( bytes bytea, - format text ) - text - - - Encodes binary data into a textual representation; supported - format values are: - base64, - escape, - hex. - - - encode('123\000\001', 'base64') - MTIzAAE= - - - - - - - decode - - decode ( string text, - format text ) - bytea - - - Decodes binary data from a textual representation; supported - format values are the same as - for encode. - - - decode('MTIzAAE=', 'base64') - \x3132330001 - - - - -
- - - The encode and decode - functions support the following textual formats: - - - - base64 - - base64 format - - - - The base64 format is that - of RFC - 2045 Section 6.8. As per the RFC, encoded lines are - broken at 76 characters. However instead of the MIME CRLF - end-of-line marker, only a newline is used for end-of-line. - The decode function ignores carriage-return, - newline, space, and tab characters. Otherwise, an error is - raised when decode is supplied invalid - base64 data — including when trailing padding is incorrect. - - - - - - escape - - escape format - - - - The escape format converts zero bytes and - bytes with the high bit set into octal escape sequences - (\nnn), and it doubles - backslashes. Other byte values are represented literally. - The decode function will raise an error if a - backslash is not followed by either a second backslash or three - octal digits; it accepts other byte values unchanged. - - - - - - hex - - hex format - - - - The hex format represents each 4 bits of - data as one hexadecimal digit, 0 - through f, writing the higher-order digit of - each byte first. The encode function outputs - the a-f hex digits in lower - case. Because the smallest unit of data is 8 bits, there are - always an even number of characters returned - by encode. - The decode function - accepts the a-f characters in - either upper or lower case. An error is raised - when decode is given invalid hex data - — including when given an odd number of characters. - - - - - - - - In addition, it is possible to cast integral values to and from type - bytea. Casting an integer to bytea produces - 2, 4, or 8 bytes, depending on the width of the integer type. The result - is the two's complement representation of the integer, with the most - significant byte first. Some examples: - -1234::smallint::bytea \x04d2 -cast(1234 as bytea) \x000004d2 -cast(-1234 as bytea) \xfffffb2e -'\x8000'::bytea::smallint -32768 -'\x8000'::bytea::integer 32768 - - Casting a bytea to an integer will raise an error if the - length of the bytea exceeds the width of the integer type. - - - - See also the aggregate function string_agg in - and the large object functions - in . - -
- - - - Bit String Functions and Operators - - - bit strings - functions - - - - This section describes functions and operators for examining and - manipulating bit strings, that is values of the types - bit and bit varying. (While only - type bit is mentioned in these tables, values of - type bit varying can be used interchangeably.) - Bit strings support the usual comparison operators shown in - , as well as the - operators shown in . - - - - Bit String Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - bit || bit - bit - - - Concatenation - - - B'10001' || B'011' - 10001011 - - - - - - bit & bit - bit - - - Bitwise AND (inputs must be of equal length) - - - B'10001' & B'01101' - 00001 - - - - - - bit | bit - bit - - - Bitwise OR (inputs must be of equal length) - - - B'10001' | B'01101' - 11101 - - - - - - bit # bit - bit - - - Bitwise exclusive OR (inputs must be of equal length) - - - B'10001' # B'01101' - 11100 - - - - - - ~ bit - bit - - - Bitwise NOT - - - ~ B'10001' - 01110 - - - - - - bit << integer - bit - - - Bitwise shift left - (string length is preserved) - - - B'10001' << 3 - 01000 - - - - - - bit >> integer - bit - - - Bitwise shift right - (string length is preserved) - - - B'10001' >> 2 - 00100 - - - - -
- - - Some of the functions available for binary strings are also available - for bit strings, as shown in . - - - - Bit String Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - bit_count - - bit_count ( bit ) - bigint - - - Returns the number of bits set in the bit string (also known as - popcount). - - - bit_count(B'10111') - 4 - - - - - - - bit_length - - bit_length ( bit ) - integer - - - Returns number of bits in the bit string. - - - bit_length(B'10111') - 5 - - - - - - - length - - - bit string - length - - length ( bit ) - integer - - - Returns number of bits in the bit string. - - - length(B'10111') - 5 - - - - - - - octet_length - - octet_length ( bit ) - integer - - - Returns number of bytes in the bit string. - - - octet_length(B'1011111011') - 2 - - - - - - - overlay - - overlay ( bits bit PLACING newsubstring bit FROM start integer FOR count integer ) - bit - - - Replaces the substring of bits that starts at - the start'th bit and extends - for count bits - with newsubstring. - If count is omitted, it defaults to the length - of newsubstring. - - - overlay(B'01010101010101010' placing B'11111' from 2 for 3) - 0111110101010101010 - - - - - - - position - - position ( substring bit IN bits bit ) - integer - - - Returns first starting index of the specified substring - within bits, or zero if it's not present. - - - position(B'010' in B'000001101011') - 8 - - - - - - - substring - - substring ( bits bit FROM start integer FOR count integer ) - bit - - - Extracts the substring of bits starting at - the start'th bit if that is specified, - and stopping after count bits if that is - specified. Provide at least one of start - and count. - - - substring(B'110010111111' from 3 for 2) - 00 - - - - - - - get_bit - - get_bit ( bits bit, - n integer ) - integer - - - Extracts n'th bit - from bit string; the first (leftmost) bit is bit 0. - - - get_bit(B'101010101010101010', 6) - 1 - - - - - - - set_bit - - set_bit ( bits bit, - n integer, - newvalue integer ) - bit - - - Sets n'th bit in - bit string to newvalue; - the first (leftmost) bit is bit 0. - - - set_bit(B'101010101010101010', 6, 0) - 101010001010101010 - - - - -
- - - In addition, it is possible to cast integral values to and from type - bit. - Casting an integer to bit(n) copies the rightmost - n bits. Casting an integer to a bit string width wider - than the integer itself will sign-extend on the left. - Some examples: - -44::bit(10) 0000101100 -44::bit(3) 100 -cast(-44 as bit(12)) 111111010100 -'1110'::bit(4)::integer 14 - - Note that casting to just bit means casting to - bit(1), and so will deliver only the least significant - bit of the integer. - -
- - - - Pattern Matching - - - pattern matching - - - - There are three separate approaches to pattern matching provided - by PostgreSQL: the traditional - SQL LIKE operator, the - more recent SIMILAR TO operator (added in - SQL:1999), and POSIX-style regular - expressions. Aside from the basic does this string match - this pattern? operators, functions are available to extract - or replace matching substrings and to split a string at matching - locations. - - - - - If you have pattern matching needs that go beyond this, - consider writing a user-defined function in Perl or Tcl. - - - - - - While most regular-expression searches can be executed very quickly, - regular expressions can be contrived that take arbitrary amounts of - time and memory to process. Be wary of accepting regular-expression - search patterns from hostile sources. If you must do so, it is - advisable to impose a statement timeout. - - - - Searches using SIMILAR TO patterns have the same - security hazards, since SIMILAR TO provides many - of the same capabilities as POSIX-style regular - expressions. - - - - LIKE searches, being much simpler than the other - two options, are safer to use with possibly-hostile pattern sources. - - - - - SIMILAR TO and POSIX-style regular - expressions do not support nondeterministic collations. If required, use - LIKE or apply a different collation to the expression - to work around this limitation. - - - - <function>LIKE</function> - - - LIKE - - - -string LIKE pattern ESCAPE escape-character -string NOT LIKE pattern ESCAPE escape-character - - - - The LIKE expression returns true if the - string matches the supplied - pattern. (As - expected, the NOT LIKE expression returns - false if LIKE returns true, and vice versa. - An equivalent expression is - NOT (string LIKE - pattern).) - - - - If pattern does not contain percent - signs or underscores, then the pattern only represents the string - itself; in that case LIKE acts like the - equals operator. An underscore (_) in - pattern stands for (matches) any single - character; a percent sign (%) matches any sequence - of zero or more characters. - - - - Some examples: - -'abc' LIKE 'abc' true -'abc' LIKE 'a%' true -'abc' LIKE '_b_' true -'abc' LIKE 'c' false - - - - - LIKE pattern matching supports nondeterministic - collations (see ), such as - case-insensitive collations or collations that, say, ignore punctuation. - So with a case-insensitive collation, one could have: - -'AbC' LIKE 'abc' COLLATE case_insensitive true -'AbC' LIKE 'a%' COLLATE case_insensitive true - - With collations that ignore certain characters or in general that consider - strings of different lengths equal, the semantics can become a bit more - complicated. Consider these examples: - -'.foo.' LIKE 'foo' COLLATE ign_punct true -'.foo.' LIKE 'f_o' COLLATE ign_punct true -'.foo.' LIKE '_oo' COLLATE ign_punct false - - The way the matching works is that the pattern is partitioned into - sequences of wildcards and non-wildcard strings (wildcards being - _ and %). For example, the pattern - f_o is partitioned into f, _, o, the - pattern _oo is partitioned into _, - oo. The input string matches the pattern if it can be - partitioned in such a way that the wildcards match one character or any - number of characters respectively and the non-wildcard partitions are - equal under the applicable collation. So for example, '.foo.' - LIKE 'f_o' COLLATE ign_punct is true because one can partition - .foo. into .f, o, o., and then - '.f' = 'f' COLLATE ign_punct, 'o' - matches the _ wildcard, and 'o.' = 'o' COLLATE - ign_punct. But '.foo.' LIKE '_oo' COLLATE - ign_punct is false because .foo. cannot be - partitioned in a way that the first character is any character and the - rest of the string compares equal to oo. (Note that - the single-character wildcard always matches exactly one character, - independent of the collation. So in this example, the - _ would match ., but then the rest - of the input string won't match the rest of the pattern.) - - - - LIKE pattern matching always covers the entire - string. Therefore, if it's desired to match a sequence anywhere within - a string, the pattern must start and end with a percent sign. - - - - To match a literal underscore or percent sign without matching - other characters, the respective character in - pattern must be - preceded by the escape character. The default escape - character is the backslash but a different one can be selected by - using the ESCAPE clause. To match the escape - character itself, write two escape characters. - - - - - If you have turned off, - any backslashes you write in literal string constants will need to be - doubled. See for more information. - - - - - It's also possible to select no escape character by writing - ESCAPE ''. This effectively disables the - escape mechanism, which makes it impossible to turn off the - special meaning of underscore and percent signs in the pattern. - - - - According to the SQL standard, omitting ESCAPE - means there is no escape character (rather than defaulting to a - backslash), and a zero-length ESCAPE value is - disallowed. PostgreSQL's behavior in - this regard is therefore slightly nonstandard. - - - - The key word ILIKE can be used instead of - LIKE to make the match case-insensitive according to the - active locale. (But this does not support nondeterministic collations.) - This is not in the SQL standard but is a - PostgreSQL extension. - - - - The operator ~~ is equivalent to - LIKE, and ~~* corresponds to - ILIKE. There are also - !~~ and !~~* operators that - represent NOT LIKE and NOT - ILIKE, respectively. All of these operators are - PostgreSQL-specific. You may see these - operator names in EXPLAIN output and similar - places, since the parser actually translates LIKE - et al. to these operators. - - - - The phrases LIKE, ILIKE, - NOT LIKE, and NOT ILIKE are - generally treated as operators - in PostgreSQL syntax; for example they can - be used in expression - operator ANY - (subquery) constructs, although - an ESCAPE clause cannot be included there. In some - obscure cases it may be necessary to use the underlying operator names - instead. - - - - Also see the starts-with operator ^@ and the - corresponding starts_with() function, which are - useful in cases where simply matching the beginning of a string is - needed. - - - - - - <function>SIMILAR TO</function> Regular Expressions - - - regular expression - - - - - SIMILAR TO - - - substring - - - -string SIMILAR TO pattern ESCAPE escape-character -string NOT SIMILAR TO pattern ESCAPE escape-character - - - - The SIMILAR TO operator returns true or - false depending on whether its pattern matches the given string. - It is similar to LIKE, except that it - interprets the pattern using the SQL standard's definition of a - regular expression. SQL regular expressions are a curious cross - between LIKE notation and common (POSIX) regular - expression notation. - - - - Like LIKE, the SIMILAR TO - operator succeeds only if its pattern matches the entire string; - this is unlike common regular expression behavior where the pattern - can match any part of the string. - Also like - LIKE, SIMILAR TO uses - _ and % as wildcard characters denoting - any single character and any string, respectively (these are - comparable to . and .* in POSIX regular - expressions). - - - - In addition to these facilities borrowed from LIKE, - SIMILAR TO supports these pattern-matching - metacharacters borrowed from POSIX regular expressions: - - - - - | denotes alternation (either of two alternatives). - - - - - * denotes repetition of the previous item zero - or more times. - - - - - + denotes repetition of the previous item one - or more times. - - - - - ? denotes repetition of the previous item zero - or one time. - - - - - {m} denotes repetition - of the previous item exactly m times. - - - - - {m,} denotes repetition - of the previous item m or more times. - - - - - {m,n} - denotes repetition of the previous item at least m and - not more than n times. - - - - - Parentheses () can be used to group items into - a single logical item. - - - - - A bracket expression [...] specifies a character - class, just as in POSIX regular expressions. - - - - - Notice that the period (.) is not a metacharacter - for SIMILAR TO. - - - - As with LIKE, a backslash disables the special - meaning of any of these metacharacters. A different escape character - can be specified with ESCAPE, or the escape - capability can be disabled by writing ESCAPE ''. - - - - According to the SQL standard, omitting ESCAPE - means there is no escape character (rather than defaulting to a - backslash), and a zero-length ESCAPE value is - disallowed. PostgreSQL's behavior in - this regard is therefore slightly nonstandard. - - - - Another nonstandard extension is that following the escape character - with a letter or digit provides access to the escape sequences - defined for POSIX regular expressions; see - , - , and - below. - - - - Some examples: - -'abc' SIMILAR TO 'abc' true -'abc' SIMILAR TO 'a' false -'abc' SIMILAR TO '%(b|d)%' true -'abc' SIMILAR TO '(b|c)%' false -'-abc-' SIMILAR TO '%\mabc\M%' true -'xabcy' SIMILAR TO '%\mabc\M%' false - - - - - The substring function with three parameters - provides extraction of a substring that matches an SQL - regular expression pattern. The function can be written according - to standard SQL syntax: - -substring(string similar pattern escape escape-character) - - or using the now obsolete SQL:1999 syntax: - -substring(string from pattern for escape-character) - - or as a plain three-argument function: - -substring(string, pattern, escape-character) - - As with SIMILAR TO, the - specified pattern must match the entire data string, or else the - function fails and returns null. To indicate the part of the - pattern for which the matching data sub-string is of interest, - the pattern should contain - two occurrences of the escape character followed by a double quote - ("). - The text matching the portion of the pattern - between these separators is returned when the match is successful. - - - - The escape-double-quote separators actually - divide substring's pattern into three independent - regular expressions; for example, a vertical bar (|) - in any of the three sections affects only that section. Also, the first - and third of these regular expressions are defined to match the smallest - possible amount of text, not the largest, when there is any ambiguity - about how much of the data string matches which pattern. (In POSIX - parlance, the first and third regular expressions are forced to be - non-greedy.) - - - - As an extension to the SQL standard, PostgreSQL - allows there to be just one escape-double-quote separator, in which case - the third regular expression is taken as empty; or no separators, in which - case the first and third regular expressions are taken as empty. - - - - Some examples, with #" delimiting the return string: - -substring('foobar' similar '%#"o_b#"%' escape '#') oob -substring('foobar' similar '#"o_b#"%' escape '#') NULL - - - - - - <acronym>POSIX</acronym> Regular Expressions - - - regular expression - pattern matching - - - substring - - - regexp_count - - - regexp_instr - - - regexp_like - - - regexp_match - - - regexp_matches - - - regexp_replace - - - regexp_split_to_table - - - regexp_split_to_array - - - regexp_substr - - - - lists the available - operators for pattern matching using POSIX regular expressions. - - - - Regular Expression Match Operators - - - - - - Operator - - - Description - - - Example(s) - - - - - - - - text ~ text - boolean - - - String matches regular expression, case sensitively - - - 'thomas' ~ 't.*ma' - t - - - - - - text ~* text - boolean - - - String matches regular expression, case-insensitively - - - 'thomas' ~* 'T.*ma' - t - - - - - - text !~ text - boolean - - - String does not match regular expression, case sensitively - - - 'thomas' !~ 't.*max' - t - - - - - - text !~* text - boolean - - - String does not match regular expression, case-insensitively - - - 'thomas' !~* 'T.*ma' - f - - - - -
- - - POSIX regular expressions provide a more - powerful means for pattern matching than the LIKE and - SIMILAR TO operators. - Many Unix tools such as egrep, - sed, or awk use a pattern - matching language that is similar to the one described here. - - - - A regular expression is a character sequence that is an - abbreviated definition of a set of strings (a regular - set). A string is said to match a regular expression - if it is a member of the regular set described by the regular - expression. As with LIKE, pattern characters - match string characters exactly unless they are special characters - in the regular expression language — but regular expressions use - different special characters than LIKE does. - Unlike LIKE patterns, a - regular expression is allowed to match anywhere within a string, unless - the regular expression is explicitly anchored to the beginning or - end of the string. - - - - Some examples: - -'abcd' ~ 'bc' true -'abcd' ~ 'a.c' true — dot matches any character -'abcd' ~ 'a.*d' true — * repeats the preceding pattern item -'abcd' ~ '(b|x)' true — | means OR, parentheses group -'abcd' ~ '^a' true — ^ anchors to start of string -'abcd' ~ '^(b|c)' false — would match except for anchoring - - - - - The POSIX pattern language is described in much - greater detail below. - - - - The substring function with two parameters, - substring(string from - pattern), provides extraction of a - substring - that matches a POSIX regular expression pattern. It returns null if - there is no match, otherwise the first portion of the text that matched the - pattern. But if the pattern contains any parentheses, the portion - of the text that matched the first parenthesized subexpression (the - one whose left parenthesis comes first) is - returned. You can put parentheses around the whole expression - if you want to use parentheses within it without triggering this - exception. If you need parentheses in the pattern before the - subexpression you want to extract, see the non-capturing parentheses - described below. - - - - Some examples: - -substring('foobar' from 'o.b') oob -substring('foobar' from 'o(.)b') o - - - - - The regexp_count function counts the number of - places where a POSIX regular expression pattern matches a string. - It has the syntax - regexp_count(string, - pattern - , start - , flags - ). - pattern is searched for - in string, normally from the beginning of - the string, but if the start parameter is - provided then beginning from that character index. - The flags parameter is an optional text - string containing zero or more single-letter flags that change the - function's behavior. For example, including i in - flags specifies case-insensitive matching. - Supported flags are described in - . - - - - Some examples: - -regexp_count('ABCABCAXYaxy', 'A.') 3 -regexp_count('ABCABCAXYaxy', 'A.', 1, 'i') 4 - - - - - The regexp_instr function returns the starting or - ending position of the N'th match of a - POSIX regular expression pattern to a string, or zero if there is no - such match. It has the syntax - regexp_instr(string, - pattern - , start - , N - , endoption - , flags - , subexpr - ). - pattern is searched for - in string, normally from the beginning of - the string, but if the start parameter is - provided then beginning from that character index. - If N is specified - then the N'th match of the pattern - is located, otherwise the first match is located. - If the endoption parameter is omitted or - specified as zero, the function returns the position of the first - character of the match. Otherwise, endoption - must be one, and the function returns the position of the character - following the match. - The flags parameter is an optional text - string containing zero or more single-letter flags that change the - function's behavior. Supported flags are described - in . - For a pattern containing parenthesized - subexpressions, subexpr is an integer - indicating which subexpression is of interest: the result identifies - the position of the substring matching that subexpression. - Subexpressions are numbered in the order of their leading parentheses. - When subexpr is omitted or zero, the result - identifies the position of the whole match regardless of - parenthesized subexpressions. - - - - Some examples: - -regexp_instr('number of your street, town zip, FR', '[^,]+', 1, 2) - 23 -regexp_instr(string=>'ABCDEFGHI', pattern=>'(c..)(...)', start=>1, "N"=>1, endoption=>0, flags=>'i', subexpr=>2) - 6 - - - - - The regexp_like function checks whether a match - of a POSIX regular expression pattern occurs within a string, - returning boolean true or false. It has the syntax - regexp_like(string, - pattern - , flags ). - The flags parameter is an optional text - string containing zero or more single-letter flags that change the - function's behavior. Supported flags are described - in . - This function has the same results as the ~ - operator if no flags are specified. If only the i - flag is specified, it has the same results as - the ~* operator. - - - - Some examples: - -regexp_like('Hello World', 'world') false -regexp_like('Hello World', 'world', 'i') true - - - - - The regexp_match function returns a text array of - matching substring(s) within the first match of a POSIX - regular expression pattern to a string. It has the syntax - regexp_match(string, - pattern , flags ). - If there is no match, the result is NULL. - If a match is found, and the pattern contains no - parenthesized subexpressions, then the result is a single-element text - array containing the substring matching the whole pattern. - If a match is found, and the pattern contains - parenthesized subexpressions, then the result is a text array - whose n'th element is the substring matching - the n'th parenthesized subexpression of - the pattern (not counting non-capturing - parentheses; see below for details). - The flags parameter is an optional text string - containing zero or more single-letter flags that change the function's - behavior. Supported flags are described - in . - - - - Some examples: - -SELECT regexp_match('foobarbequebaz', 'bar.*que'); - regexp_match --------------- - {barbeque} -(1 row) - -SELECT regexp_match('foobarbequebaz', '(bar)(beque)'); - regexp_match --------------- - {bar,beque} -(1 row) - - - - - - In the common case where you just want the whole matching substring - or NULL for no match, the best solution is to - use regexp_substr(). - However, regexp_substr() only exists - in PostgreSQL version 15 and up. When - working in older versions, you can extract the first element - of regexp_match()'s result, for example: - -SELECT (regexp_match('foobarbequebaz', 'bar.*que'))[1]; - regexp_match --------------- - barbeque -(1 row) - - - - - - The regexp_matches function returns a set of text arrays - of matching substring(s) within matches of a POSIX regular - expression pattern to a string. It has the same syntax as - regexp_match. - This function returns no rows if there is no match, one row if there is - a match and the g flag is not given, or N - rows if there are N matches and the g flag - is given. Each returned row is a text array containing the whole - matched substring or the substrings matching parenthesized - subexpressions of the pattern, just as described above - for regexp_match. - regexp_matches accepts all the flags shown - in , plus - the g flag which commands it to return all matches, not - just the first one. - - - - Some examples: - -SELECT regexp_matches('foo', 'not there'); - regexp_matches ----------------- -(0 rows) - -SELECT regexp_matches('foobarbequebazilbarfbonk', '(b[^b]+)(b[^b]+)', 'g'); - regexp_matches ----------------- - {bar,beque} - {bazil,barf} -(2 rows) - - - - - - In most cases regexp_matches() should be used with - the g flag, since if you only want the first match, it's - easier and more efficient to use regexp_match(). - However, regexp_match() only exists - in PostgreSQL version 10 and up. When working in older - versions, a common trick is to place a regexp_matches() - call in a sub-select, for example: - -SELECT col1, (SELECT regexp_matches(col2, '(bar)(beque)')) FROM tab; - - This produces a text array if there's a match, or NULL if - not, the same as regexp_match() would do. Without the - sub-select, this query would produce no output at all for table rows - without a match, which is typically not the desired behavior. - - - - - The regexp_replace function provides substitution of - new text for substrings that match POSIX regular expression patterns. - It has the syntax - regexp_replace(string, - pattern, replacement - , flags ) - or - regexp_replace(string, - pattern, replacement, - start - , N - , flags ). - The source string is returned unchanged if - there is no match to the pattern. If there is a - match, the string is returned with the - replacement string substituted for the matching - substring. The replacement string can contain - \n, where n is 1 - through 9, to indicate that the source substring matching the - n'th parenthesized subexpression of the pattern should be - inserted, and it can contain \& to indicate that the - substring matching the entire pattern should be inserted. Write - \\ if you need to put a literal backslash in the replacement - text. - pattern is searched for - in string, normally from the beginning of - the string, but if the start parameter is - provided then beginning from that character index. - By default, only the first match of the pattern is replaced. - If N is specified and is greater than zero, - then the N'th match of the pattern - is replaced. - If the g flag is given, or - if N is specified and is zero, then all - matches at or after the start position are - replaced. (The g flag is ignored - when N is specified.) - The flags parameter is an optional text - string containing zero or more single-letter flags that change the - function's behavior. Supported flags (though - not g) are - described in . - - - - Some examples: - -regexp_replace('foobarbaz', 'b..', 'X') - fooXbaz -regexp_replace('foobarbaz', 'b..', 'X', 'g') - fooXX -regexp_replace('foobarbaz', 'b(..)', 'X\1Y', 'g') - fooXarYXazY -regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', 1, 0, 'i') - X PXstgrXSQL fXnctXXn -regexp_replace(string=>'A PostgreSQL function', pattern=>'a|e|i|o|u', replacement=>'X', start=>1, "N"=>3, flags=>'i') - A PostgrXSQL function - - - - - The regexp_split_to_table function splits a string using a POSIX - regular expression pattern as a delimiter. It has the syntax - regexp_split_to_table(string, pattern - , flags ). - If there is no match to the pattern, the function returns the - string. If there is at least one match, for each match it returns - the text from the end of the last match (or the beginning of the string) - to the beginning of the match. When there are no more matches, it - returns the text from the end of the last match to the end of the string. - The flags parameter is an optional text string containing - zero or more single-letter flags that change the function's behavior. - regexp_split_to_table supports the flags described in - . - - - - The regexp_split_to_array function behaves the same as - regexp_split_to_table, except that regexp_split_to_array - returns its result as an array of text. It has the syntax - regexp_split_to_array(string, pattern - , flags ). - The parameters are the same as for regexp_split_to_table. - - - - Some examples: - -SELECT foo FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', '\s+') AS foo; - foo -------- - the - quick - brown - fox - jumps - over - the - lazy - dog -(9 rows) - -SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', '\s+'); - regexp_split_to_array ------------------------------------------------ - {the,quick,brown,fox,jumps,over,the,lazy,dog} -(1 row) - -SELECT foo FROM regexp_split_to_table('the quick brown fox', '\s*') AS foo; - foo ------ - t - h - e - q - u - i - c - k - b - r - o - w - n - f - o - x -(16 rows) - - - - - As the last example demonstrates, the regexp split functions ignore - zero-length matches that occur at the start or end of the string - or immediately after a previous match. This is contrary to the strict - definition of regexp matching that is implemented by - the other regexp functions, but is usually the most convenient behavior - in practice. Other software systems such as Perl use similar definitions. - - - - The regexp_substr function returns the substring - that matches a POSIX regular expression pattern, - or NULL if there is no match. It has the syntax - regexp_substr(string, - pattern - , start - , N - , flags - , subexpr - ). - pattern is searched for - in string, normally from the beginning of - the string, but if the start parameter is - provided then beginning from that character index. - If N is specified - then the N'th match of the pattern - is returned, otherwise the first match is returned. - The flags parameter is an optional text - string containing zero or more single-letter flags that change the - function's behavior. Supported flags are described - in . - For a pattern containing parenthesized - subexpressions, subexpr is an integer - indicating which subexpression is of interest: the result is the - substring matching that subexpression. - Subexpressions are numbered in the order of their leading parentheses. - When subexpr is omitted or zero, the result - is the whole match regardless of parenthesized subexpressions. - - - - Some examples: - -regexp_substr('number of your street, town zip, FR', '[^,]+', 1, 2) - town zip -regexp_substr('ABCDEFGHI', '(c..)(...)', 1, 1, 'i', 2) - FGH - - - - - - - Regular Expression Details - - - PostgreSQL's regular expressions are implemented - using a software package written by Henry Spencer. Much of - the description of regular expressions below is copied verbatim from his - manual. - - - - Regular expressions (REs), as defined in - POSIX 1003.2, come in two forms: - extended REs or EREs - (roughly those of egrep), and - basic REs or BREs - (roughly those of ed). - PostgreSQL supports both forms, and - also implements some extensions - that are not in the POSIX standard, but have become widely used - due to their availability in programming languages such as Perl and Tcl. - REs using these non-POSIX extensions are called - advanced REs or AREs - in this documentation. AREs are almost an exact superset of EREs, - but BREs have several notational incompatibilities (as well as being - much more limited). - We first describe the ARE and ERE forms, noting features that apply - only to AREs, and then describe how BREs differ. - - - - - PostgreSQL always initially presumes that a regular - expression follows the ARE rules. However, the more limited ERE or - BRE rules can be chosen by prepending an embedded option - to the RE pattern, as described in . - This can be useful for compatibility with applications that expect - exactly the POSIX 1003.2 rules. - - - - - A regular expression is defined as one or more - branches, separated by - |. It matches anything that matches one of the - branches. - - - - A branch is zero or more quantified atoms or - constraints, concatenated. - It matches a match for the first, followed by a match for the second, etc.; - an empty branch matches the empty string. - - - - A quantified atom is an atom possibly followed - by a single quantifier. - Without a quantifier, it matches a match for the atom. - With a quantifier, it can match some number of matches of the atom. - An atom can be any of the possibilities - shown in . - The possible quantifiers and their meanings are shown in - . - - - - A constraint matches an empty string, but matches only when - specific conditions are met. A constraint can be used where an atom - could be used, except it cannot be followed by a quantifier. - The simple constraints are shown in - ; - some more constraints are described later. - - - - - Regular Expression Atoms - - - - - Atom - Description - - - - - - (re) - (where re is any regular expression) - matches a match for - re, with the match noted for possible reporting - - - - (?:re) - as above, but the match is not noted for reporting - (a non-capturing set of parentheses) - (AREs only) - - - - . - matches any single character - - - - [chars] - a bracket expression, - matching any one of the chars (see - for more detail) - - - - \k - (where k is a non-alphanumeric character) - matches that character taken as an ordinary character, - e.g., \\ matches a backslash character - - - - \c - where c is alphanumeric - (possibly followed by other characters) - is an escape, see - (AREs only; in EREs and BREs, this matches c) - - - - { - when followed by a character other than a digit, - matches the left-brace character {; - when followed by a digit, it is the beginning of a - bound (see below) - - - - x - where x is a single character with no other - significance, matches that character - - - -
- - - An RE cannot end with a backslash (\). - - - - - If you have turned off, - any backslashes you write in literal string constants will need to be - doubled. See for more information. - - - - - Regular Expression Quantifiers - - - - - Quantifier - Matches - - - - - - * - a sequence of 0 or more matches of the atom - - - - + - a sequence of 1 or more matches of the atom - - - - ? - a sequence of 0 or 1 matches of the atom - - - - {m} - a sequence of exactly m matches of the atom - - - - {m,} - a sequence of m or more matches of the atom - - - - - {m,n} - a sequence of m through n - (inclusive) matches of the atom; m cannot exceed - n - - - - *? - non-greedy version of * - - - - +? - non-greedy version of + - - - - ?? - non-greedy version of ? - - - - {m}? - non-greedy version of {m} - - - - {m,}? - non-greedy version of {m,} - - - - - {m,n}? - non-greedy version of {m,n} - - - -
- - - The forms using {...} - are known as bounds. - The numbers m and n within a bound are - unsigned decimal integers with permissible values from 0 to 255 inclusive. - - - - Non-greedy quantifiers (available in AREs only) match the - same possibilities as their corresponding normal (greedy) - counterparts, but prefer the smallest number rather than the largest - number of matches. - See for more detail. - - - - - A quantifier cannot immediately follow another quantifier, e.g., - ** is invalid. - A quantifier cannot - begin an expression or subexpression or follow - ^ or |. - - - - - Regular Expression Constraints - - - - - Constraint - Description - - - - - - ^ - matches at the beginning of the string - - - - $ - matches at the end of the string - - - - (?=re) - positive lookahead matches at any point - where a substring matching re begins - (AREs only) - - - - (?!re) - negative lookahead matches at any point - where no substring matching re begins - (AREs only) - - - - (?<=re) - positive lookbehind matches at any point - where a substring matching re ends - (AREs only) - - - - (?<!re) - negative lookbehind matches at any point - where no substring matching re ends - (AREs only) - - - -
- - - Lookahead and lookbehind constraints cannot contain back - references (see ), - and all parentheses within them are considered non-capturing. - -
- - - Bracket Expressions - - - A bracket expression is a list of - characters enclosed in []. It normally matches - any single character from the list (but see below). If the list - begins with ^, it matches any single character - not from the rest of the list. - If two characters - in the list are separated by -, this is - shorthand for the full range of characters between those two - (inclusive) in the collating sequence, - e.g., [0-9] in ASCII matches - any decimal digit. It is illegal for two ranges to share an - endpoint, e.g., a-c-e. Ranges are very - collating-sequence-dependent, so portable programs should avoid - relying on them. - - - - To include a literal ] in the list, make it the - first character (after ^, if that is used). To - include a literal -, make it the first or last - character, or the second endpoint of a range. To use a literal - - as the first endpoint of a range, enclose it - in [. and .] to make it a - collating element (see below). With the exception of these characters, - some combinations using [ - (see next paragraphs), and escapes (AREs only), all other special - characters lose their special significance within a bracket expression. - In particular, \ is not special when following - ERE or BRE rules, though it is special (as introducing an escape) - in AREs. - - - - Within a bracket expression, a collating element (a character, a - multiple-character sequence that collates as if it were a single - character, or a collating-sequence name for either) enclosed in - [. and .] stands for the - sequence of characters of that collating element. The sequence is - treated as a single element of the bracket expression's list. This - allows a bracket - expression containing a multiple-character collating element to - match more than one character, e.g., if the collating sequence - includes a ch collating element, then the RE - [[.ch.]]*c matches the first five characters of - chchcc. - - - - - PostgreSQL currently does not support multi-character collating - elements. This information describes possible future behavior. - - - - - Within a bracket expression, a collating element enclosed in - [= and =] is an equivalence - class, standing for the sequences of characters of all collating - elements equivalent to that one, including itself. (If there are - no other equivalent collating elements, the treatment is as if the - enclosing delimiters were [. and - .].) For example, if o and - ^ are the members of an equivalence class, then - [[=o=]], [[=^=]], and - [o^] are all synonymous. An equivalence class - cannot be an endpoint of a range. - - - - Within a bracket expression, the name of a character class - enclosed in [: and :] stands - for the list of all characters belonging to that class. A character - class cannot be used as an endpoint of a range. - The POSIX standard defines these character class - names: - alnum (letters and numeric digits), - alpha (letters), - blank (space and tab), - cntrl (control characters), - digit (numeric digits), - graph (printable characters except space), - lower (lower-case letters), - print (printable characters including space), - punct (punctuation), - space (any white space), - upper (upper-case letters), - and xdigit (hexadecimal digits). - The behavior of these standard character classes is generally - consistent across platforms for characters in the 7-bit ASCII set. - Whether a given non-ASCII character is considered to belong to one - of these classes depends on the collation - that is used for the regular-expression function or operator - (see ), or by default on the - database's LC_CTYPE locale setting (see - ). The classification of non-ASCII - characters can vary across platforms even in similarly-named - locales. (But the C locale never considers any - non-ASCII characters to belong to any of these classes.) - In addition to these standard character - classes, PostgreSQL defines - the word character class, which is the same as - alnum plus the underscore (_) - character, and - the ascii character class, which contains exactly - the 7-bit ASCII set. - - - - There are two special cases of bracket expressions: the bracket - expressions [[:<:]] and - [[:>:]] are constraints, - matching empty strings at the beginning - and end of a word respectively. A word is defined as a sequence - of word characters that is neither preceded nor followed by word - characters. A word character is any character belonging to the - word character class, that is, any letter, digit, - or underscore. This is an extension, compatible with but not - specified by POSIX 1003.2, and should be used with - caution in software intended to be portable to other systems. - The constraint escapes described below are usually preferable; they - are no more standard, but are easier to type. - - - - - Regular Expression Escapes - - - Escapes are special sequences beginning with \ - followed by an alphanumeric character. Escapes come in several varieties: - character entry, class shorthands, constraint escapes, and back references. - A \ followed by an alphanumeric character but not constituting - a valid escape is illegal in AREs. - In EREs, there are no escapes: outside a bracket expression, - a \ followed by an alphanumeric character merely stands for - that character as an ordinary character, and inside a bracket expression, - \ is an ordinary character. - (The latter is the one actual incompatibility between EREs and AREs.) - - - - Character-entry escapes exist to make it easier to specify - non-printing and other inconvenient characters in REs. They are - shown in . - - - - Class-shorthand escapes provide shorthands for certain - commonly-used character classes. They are - shown in . - - - - A constraint escape is a constraint, - matching the empty string if specific conditions are met, - written as an escape. They are - shown in . - - - - A back reference (\n) matches the - same string matched by the previous parenthesized subexpression specified - by the number n - (see ). For example, - ([bc])\1 matches bb or cc - but not bc or cb. - The subexpression must entirely precede the back reference in the RE. - Subexpressions are numbered in the order of their leading parentheses. - Non-capturing parentheses do not define subexpressions. - The back reference considers only the string characters matched by the - referenced subexpression, not any constraints contained in it. For - example, (^\d)\1 will match 22. - - - - Regular Expression Character-Entry Escapes - - - - - Escape - Description - - - - - - \a - alert (bell) character, as in C - - - - \b - backspace, as in C - - - - \B - synonym for backslash (\) to help reduce the need for backslash - doubling - - - - \cX - (where X is any character) the character whose - low-order 5 bits are the same as those of - X, and whose other bits are all zero - - - - \e - the character whose collating-sequence name - is ESC, - or failing that, the character with octal value 033 - - - - \f - form feed, as in C - - - - \n - newline, as in C - - - - \r - carriage return, as in C - - - - \t - horizontal tab, as in C - - - - \uwxyz - (where wxyz is exactly four hexadecimal digits) - the character whose hexadecimal value is - 0xwxyz - - - - - \Ustuvwxyz - (where stuvwxyz is exactly eight hexadecimal - digits) - the character whose hexadecimal value is - 0xstuvwxyz - - - - - \v - vertical tab, as in C - - - - \xhhh - (where hhh is any sequence of hexadecimal - digits) - the character whose hexadecimal value is - 0xhhh - (a single character no matter how many hexadecimal digits are used) - - - - - \0 - the character whose value is 0 (the null byte) - - - - \xy - (where xy is exactly two octal digits, - and is not a back reference) - the character whose octal value is - 0xy - - - - \xyz - (where xyz is exactly three octal digits, - and is not a back reference) - the character whose octal value is - 0xyz - - - -
- - - Hexadecimal digits are 0-9, - a-f, and A-F. - Octal digits are 0-7. - - - - Numeric character-entry escapes specifying values outside the ASCII range - (0–127) have meanings dependent on the database encoding. When the - encoding is UTF-8, escape values are equivalent to Unicode code points, - for example \u1234 means the character U+1234. - For other multibyte encodings, character-entry escapes usually just - specify the concatenation of the byte values for the character. If the - escape value does not correspond to any legal character in the database - encoding, no error will be raised, but it will never match any data. - - - - The character-entry escapes are always taken as ordinary characters. - For example, \135 is ] in ASCII, but - \135 does not terminate a bracket expression. - - - - Regular Expression Class-Shorthand Escapes - - - - - Escape - Description - - - - - - \d - matches any digit, like - [[:digit:]] - - - - \s - matches any whitespace character, like - [[:space:]] - - - - \w - matches any word character, like - [[:word:]] - - - - \D - matches any non-digit, like - [^[:digit:]] - - - - \S - matches any non-whitespace character, like - [^[:space:]] - - - - \W - matches any non-word character, like - [^[:word:]] - - - -
- - - The class-shorthand escapes also work within bracket expressions, - although the definitions shown above are not quite syntactically - valid in that context. - For example, [a-c\d] is equivalent to - [a-c[:digit:]]. - - - - Regular Expression Constraint Escapes - - - - - Escape - Description - - - - - - \A - matches only at the beginning of the string - (see for how this differs from - ^) - - - - \m - matches only at the beginning of a word - - - - \M - matches only at the end of a word - - - - \y - matches only at the beginning or end of a word - - - - \Y - matches only at a point that is not the beginning or end of a - word - - - - \Z - matches only at the end of the string - (see for how this differs from - $) - - - -
- - - A word is defined as in the specification of - [[:<:]] and [[:>:]] above. - Constraint escapes are illegal within bracket expressions. - - - - Regular Expression Back References - - - - - Escape - Description - - - - - - \m - (where m is a nonzero digit) - a back reference to the m'th subexpression - - - - \mnn - (where m is a nonzero digit, and - nn is some more digits, and the decimal value - mnn is not greater than the number of closing capturing - parentheses seen so far) - a back reference to the mnn'th subexpression - - - -
- - - - There is an inherent ambiguity between octal character-entry - escapes and back references, which is resolved by the following heuristics, - as hinted at above. - A leading zero always indicates an octal escape. - A single non-zero digit, not followed by another digit, - is always taken as a back reference. - A multi-digit sequence not starting with a zero is taken as a back - reference if it comes after a suitable subexpression - (i.e., the number is in the legal range for a back reference), - and otherwise is taken as octal. - - -
- - - Regular Expression Metasyntax - - - In addition to the main syntax described above, there are some special - forms and miscellaneous syntactic facilities available. - - - - An RE can begin with one of two special director prefixes. - If an RE begins with ***:, - the rest of the RE is taken as an ARE. (This normally has no effect in - PostgreSQL, since REs are assumed to be AREs; - but it does have an effect if ERE or BRE mode had been specified by - the flags parameter to a regex function.) - If an RE begins with ***=, - the rest of the RE is taken to be a literal string, - with all characters considered ordinary characters. - - - - An ARE can begin with embedded options: - a sequence (?xyz) - (where xyz is one or more alphabetic characters) - specifies options affecting the rest of the RE. - These options override any previously determined options — - in particular, they can override the case-sensitivity behavior implied by - a regex operator, or the flags parameter to a regex - function. - The available option letters are - shown in . - Note that these same option letters are used in the flags - parameters of regex functions. - - - - ARE Embedded-Option Letters - - - - - Option - Description - - - - - - b - rest of RE is a BRE - - - - c - case-sensitive matching (overrides operator type) - - - - e - rest of RE is an ERE - - - - i - case-insensitive matching (see - ) (overrides operator type) - - - - m - historical synonym for n - - - - n - newline-sensitive matching (see - ) - - - - p - partial newline-sensitive matching (see - ) - - - - q - rest of RE is a literal (quoted) string, all ordinary - characters - - - - s - non-newline-sensitive matching (default) - - - - t - tight syntax (default; see below) - - - - w - inverse partial newline-sensitive (weird) matching - (see ) - - - - x - expanded syntax (see below) - - - -
- - - Embedded options take effect at the ) terminating the sequence. - They can appear only at the start of an ARE (after the - ***: director if any). - - - - In addition to the usual (tight) RE syntax, in which all - characters are significant, there is an expanded syntax, - available by specifying the embedded x option. - In the expanded syntax, - white-space characters in the RE are ignored, as are - all characters between a # - and the following newline (or the end of the RE). This - permits paragraphing and commenting a complex RE. - There are three exceptions to that basic rule: - - - - - a white-space character or # preceded by \ is - retained - - - - - white space or # within a bracket expression is retained - - - - - white space and comments cannot appear within multi-character symbols, - such as (?: - - - - - For this purpose, white-space characters are blank, tab, newline, and - any character that belongs to the space character class. - - - - Finally, in an ARE, outside bracket expressions, the sequence - (?#ttt) - (where ttt is any text not containing a )) - is a comment, completely ignored. - Again, this is not allowed between the characters of - multi-character symbols, like (?:. - Such comments are more a historical artifact than a useful facility, - and their use is deprecated; use the expanded syntax instead. - - - - None of these metasyntax extensions is available if - an initial ***= director - has specified that the user's input be treated as a literal string - rather than as an RE. - -
- - - Regular Expression Matching Rules - - - In the event that an RE could match more than one substring of a given - string, the RE matches the one starting earliest in the string. - If the RE could match more than one substring starting at that point, - either the longest possible match or the shortest possible match will - be taken, depending on whether the RE is greedy or - non-greedy. - - - - Whether an RE is greedy or not is determined by the following rules: - - - - Most atoms, and all constraints, have no greediness attribute (because - they cannot match variable amounts of text anyway). - - - - - Adding parentheses around an RE does not change its greediness. - - - - - A quantified atom with a fixed-repetition quantifier - ({m} - or - {m}?) - has the same greediness (possibly none) as the atom itself. - - - - - A quantified atom with other normal quantifiers (including - {m,n} - with m equal to n) - is greedy (prefers longest match). - - - - - A quantified atom with a non-greedy quantifier (including - {m,n}? - with m equal to n) - is non-greedy (prefers shortest match). - - - - - A branch — that is, an RE that has no top-level - | operator — has the same greediness as the first - quantified atom in it that has a greediness attribute. - - - - - An RE consisting of two or more branches connected by the - | operator is always greedy. - - - - - - - The above rules associate greediness attributes not only with individual - quantified atoms, but with branches and entire REs that contain quantified - atoms. What that means is that the matching is done in such a way that - the branch, or whole RE, matches the longest or shortest possible - substring as a whole. Once the length of the entire match - is determined, the part of it that matches any particular subexpression - is determined on the basis of the greediness attribute of that - subexpression, with subexpressions starting earlier in the RE taking - priority over ones starting later. - - - - An example of what this means: - -SELECT SUBSTRING('XY1234Z', 'Y*([0-9]{1,3})'); -Result: 123 -SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); -Result: 1 - - In the first case, the RE as a whole is greedy because Y* - is greedy. It can match beginning at the Y, and it matches - the longest possible string starting there, i.e., Y123. - The output is the parenthesized part of that, or 123. - In the second case, the RE as a whole is non-greedy because Y*? - is non-greedy. It can match beginning at the Y, and it matches - the shortest possible string starting there, i.e., Y1. - The subexpression [0-9]{1,3} is greedy but it cannot change - the decision as to the overall match length; so it is forced to match - just 1. - - - - In short, when an RE contains both greedy and non-greedy subexpressions, - the total match length is either as long as possible or as short as - possible, according to the attribute assigned to the whole RE. The - attributes assigned to the subexpressions only affect how much of that - match they are allowed to eat relative to each other. - - - - The quantifiers {1,1} and {1,1}? - can be used to force greediness or non-greediness, respectively, - on a subexpression or a whole RE. - This is useful when you need the whole RE to have a greediness attribute - different from what's deduced from its elements. As an example, - suppose that we are trying to separate a string containing some digits - into the digits and the parts before and after them. We might try to - do that like this: - -SELECT regexp_match('abc01234xyz', '(.*)(\d+)(.*)'); -Result: {abc0123,4,xyz} - - That didn't work: the first .* is greedy so - it eats as much as it can, leaving the \d+ to - match at the last possible place, the last digit. We might try to fix - that by making it non-greedy: - -SELECT regexp_match('abc01234xyz', '(.*?)(\d+)(.*)'); -Result: {abc,0,""} - - That didn't work either, because now the RE as a whole is non-greedy - and so it ends the overall match as soon as possible. We can get what - we want by forcing the RE as a whole to be greedy: - -SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); -Result: {abc,01234,xyz} - - Controlling the RE's overall greediness separately from its components' - greediness allows great flexibility in handling variable-length patterns. - - - - When deciding what is a longer or shorter match, - match lengths are measured in characters, not collating elements. - An empty string is considered longer than no match at all. - For example: - bb* - matches the three middle characters of abbbc; - (week|wee)(night|knights) - matches all ten characters of weeknights; - when (.*).* - is matched against abc the parenthesized subexpression - matches all three characters; and when - (a*)* is matched against bc - both the whole RE and the parenthesized - subexpression match an empty string. - - - - If case-independent matching is specified, - the effect is much as if all case distinctions had vanished from the - alphabet. - When an alphabetic that exists in multiple cases appears as an - ordinary character outside a bracket expression, it is effectively - transformed into a bracket expression containing both cases, - e.g., x becomes [xX]. - When it appears inside a bracket expression, all case counterparts - of it are added to the bracket expression, e.g., - [x] becomes [xX] - and [^x] becomes [^xX]. - - - - If newline-sensitive matching is specified, . - and bracket expressions using ^ - will never match the newline character - (so that matches will not cross lines unless the RE - explicitly includes a newline) - and ^ and $ - will match the empty string after and before a newline - respectively, in addition to matching at beginning and end of string - respectively. - But the ARE escapes \A and \Z - continue to match beginning or end of string only. - Also, the character class shorthands \D - and \W will match a newline regardless of this mode. - (Before PostgreSQL 14, they did not match - newlines when in newline-sensitive mode. - Write [^[:digit:]] - or [^[:word:]] to get the old behavior.) - - - - If partial newline-sensitive matching is specified, - this affects . and bracket expressions - as with newline-sensitive matching, but not ^ - and $. - - - - If inverse partial newline-sensitive matching is specified, - this affects ^ and $ - as with newline-sensitive matching, but not . - and bracket expressions. - This isn't very useful but is provided for symmetry. - - - - - Limits and Compatibility - - - No particular limit is imposed on the length of REs in this - implementation. However, - programs intended to be highly portable should not employ REs longer - than 256 bytes, - as a POSIX-compliant implementation can refuse to accept such REs. - - - - The only feature of AREs that is actually incompatible with - POSIX EREs is that \ does not lose its special - significance inside bracket expressions. - All other ARE features use syntax which is illegal or has - undefined or unspecified effects in POSIX EREs; - the *** syntax of directors likewise is outside the POSIX - syntax for both BREs and EREs. - - - - Many of the ARE extensions are borrowed from Perl, but some have - been changed to clean them up, and a few Perl extensions are not present. - Incompatibilities of note include \b, \B, - the lack of special treatment for a trailing newline, - the addition of complemented bracket expressions to the things - affected by newline-sensitive matching, - the restrictions on parentheses and back references in lookahead/lookbehind - constraints, and the longest/shortest-match (rather than first-match) - matching semantics. - - - - - Basic Regular Expressions - - - BREs differ from EREs in several respects. - In BREs, |, +, and ? - are ordinary characters and there is no equivalent - for their functionality. - The delimiters for bounds are - \{ and \}, - with { and } - by themselves ordinary characters. - The parentheses for nested subexpressions are - \( and \), - with ( and ) by themselves ordinary characters. - ^ is an ordinary character except at the beginning of the - RE or the beginning of a parenthesized subexpression, - $ is an ordinary character except at the end of the - RE or the end of a parenthesized subexpression, - and * is an ordinary character if it appears at the beginning - of the RE or the beginning of a parenthesized subexpression - (after a possible leading ^). - Finally, single-digit back references are available, and - \< and \> - are synonyms for - [[:<:]] and [[:>:]] - respectively; no other escapes are available in BREs. - - - - - - - Differences from SQL Standard and XQuery - - - LIKE_REGEX - - - - OCCURRENCES_REGEX - - - - POSITION_REGEX - - - - SUBSTRING_REGEX - - - - TRANSLATE_REGEX - - - - XQuery regular expressions - - - - Since SQL:2008, the SQL standard includes regular expression operators - and functions that performs pattern - matching according to the XQuery regular expression - standard: - - LIKE_REGEX - OCCURRENCES_REGEX - POSITION_REGEX - SUBSTRING_REGEX - TRANSLATE_REGEX - - PostgreSQL does not currently implement these - operators and functions. You can get approximately equivalent - functionality in each case as shown in . (Various optional clauses on - both sides have been omitted in this table.) - - - - Regular Expression Functions Equivalencies - - - - - SQL standard - PostgreSQL - - - - - - string LIKE_REGEX pattern - regexp_like(string, pattern) or string ~ pattern - - - - OCCURRENCES_REGEX(pattern IN string) - regexp_count(string, pattern) - - - - POSITION_REGEX(pattern IN string) - regexp_instr(string, pattern) - - - - SUBSTRING_REGEX(pattern IN string) - regexp_substr(string, pattern) - - - - TRANSLATE_REGEX(pattern IN string WITH replacement) - regexp_replace(string, pattern, replacement) - - - -
- - - Regular expression functions similar to those provided by PostgreSQL are - also available in a number of other SQL implementations, whereas the - SQL-standard functions are not as widely implemented. Some of the - details of the regular expression syntax will likely differ in each - implementation. - - - - The SQL-standard operators and functions use XQuery regular expressions, - which are quite close to the ARE syntax described above. - Notable differences between the existing POSIX-based - regular-expression feature and XQuery regular expressions include: - - - - - XQuery character class subtraction is not supported. An example of - this feature is using the following to match only English - consonants: [a-z-[aeiou]]. - - - - - XQuery character class shorthands \c, - \C, \i, - and \I are not supported. - - - - - XQuery character class elements - using \p{UnicodeProperty} or the - inverse \P{UnicodeProperty} are not supported. - - - - - POSIX interprets character classes such as \w - (see ) - according to the prevailing locale (which you can control by - attaching a COLLATE clause to the operator or - function). XQuery specifies these classes by reference to Unicode - character properties, so equivalent behavior is obtained only with - a locale that follows the Unicode rules. - - - - - The SQL standard (not XQuery itself) attempts to cater for more - variants of newline than POSIX does. The - newline-sensitive matching options described above consider only - ASCII NL (\n) to be a newline, but SQL would have - us treat CR (\r), CRLF (\r\n) - (a Windows-style newline), and some Unicode-only characters like - LINE SEPARATOR (U+2028) as newlines as well. - Notably, . and \s should - count \r\n as one character not two according to - SQL. - - - - - Of the character-entry escapes described in - , - XQuery supports only \n, \r, - and \t. - - - - - XQuery does not support - the [:name:] syntax - for character classes within bracket expressions. - - - - - XQuery does not have lookahead or lookbehind constraints, - nor any of the constraint escapes described in - . - - - - - The metasyntax forms described in - do not exist in XQuery. - - - - - The regular expression flag letters defined by XQuery are - related to but not the same as the option letters for POSIX - (). While the - i and q options behave the - same, others do not: - - - - XQuery's s (allow dot to match newline) - and m (allow ^ - and $ to match at newlines) flags provide - access to the same behaviors as - POSIX's n, p - and w flags, but they - do not match the behavior of - POSIX's s and m flags. - Note in particular that dot-matches-newline is the default - behavior in POSIX but not XQuery. - - - - - XQuery's x (ignore whitespace in pattern) flag - is noticeably different from POSIX's expanded-mode flag. - POSIX's x flag also - allows # to begin a comment in the pattern, - and POSIX will not ignore a whitespace character after a - backslash. - - - - - - - - -
-
-
- - - - Data Type Formatting Functions - - - formatting - - - - The PostgreSQL formatting functions - provide a powerful set of tools for converting various data types - (date/time, integer, floating point, numeric) to formatted strings - and for converting from formatted strings to specific data types. - lists them. - These functions all follow a common calling convention: the first - argument is the value to be formatted and the second argument is a - template that defines the output or input format. - - - - Formatting Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - to_char - - to_char ( timestamp, text ) - text - - - to_char ( timestamp with time zone, text ) - text - - - Converts time stamp to string according to the given format. - - - to_char(timestamp '2002-04-20 17:31:12.66', 'HH12:MI:SS') - 05:31:12 - - - - - - to_char ( interval, text ) - text - - - Converts interval to string according to the given format. - - - to_char(interval '15h 2m 12s', 'HH24:MI:SS') - 15:02:12 - - - - - - to_char ( numeric_type, text ) - text - - - Converts number to string according to the given format; available - for integer, bigint, numeric, - real, double precision. - - - to_char(125, '999') - 125 - - - to_char(125.8::real, '999D9') - 125.8 - - - to_char(-125.8, '999D99S') - 125.80- - - - - - - - to_date - - to_date ( text, text ) - date - - - Converts string to date according to the given format. - - - to_date('05 Dec 2000', 'DD Mon YYYY') - 2000-12-05 - - - - - - - to_number - - to_number ( text, text ) - numeric - - - Converts string to numeric according to the given format. - - - to_number('12,454.8-', '99G999D9S') - -12454.8 - - - - - - - to_timestamp - - to_timestamp ( text, text ) - timestamp with time zone - - - Converts string to time stamp according to the given format. - (See also to_timestamp(double precision) in - .) - - - to_timestamp('05 Dec 2000', 'DD Mon YYYY') - 2000-12-05 00:00:00-05 - - - - -
- - - - to_timestamp and to_date - exist to handle input formats that cannot be converted by - simple casting. For most standard date/time formats, simply casting the - source string to the required data type works, and is much easier. - Similarly, to_number is unnecessary for standard numeric - representations. - - - - - In a to_char output template string, there are certain - patterns that are recognized and replaced with appropriately-formatted - data based on the given value. Any text that is not a template pattern is - simply copied verbatim. Similarly, in an input template string (for the - other functions), template patterns identify the values to be supplied by - the input data string. If there are characters in the template string - that are not template patterns, the corresponding characters in the input - data string are simply skipped over (whether or not they are equal to the - template string characters). - - - - shows the - template patterns available for formatting date and time values. - - - - Template Patterns for Date/Time Formatting - - - - Pattern - Description - - - - - HH - hour of day (01–12) - - - HH12 - hour of day (01–12) - - - HH24 - hour of day (00–23) - - - MI - minute (00–59) - - - SS - second (00–59) - - - MS - millisecond (000–999) - - - US - microsecond (000000–999999) - - - FF1 - tenth of second (0–9) - - - FF2 - hundredth of second (00–99) - - - FF3 - millisecond (000–999) - - - FF4 - tenth of a millisecond (0000–9999) - - - FF5 - hundredth of a millisecond (00000–99999) - - - FF6 - microsecond (000000–999999) - - - SSSS, SSSSS - seconds past midnight (0–86399) - - - AM, am, - PM or pm - meridiem indicator (without periods) - - - A.M., a.m., - P.M. or p.m. - meridiem indicator (with periods) - - - Y,YYY - year (4 or more digits) with comma - - - YYYY - year (4 or more digits) - - - YYY - last 3 digits of year - - - YY - last 2 digits of year - - - Y - last digit of year - - - IYYY - ISO 8601 week-numbering year (4 or more digits) - - - IYY - last 3 digits of ISO 8601 week-numbering year - - - IY - last 2 digits of ISO 8601 week-numbering year - - - I - last digit of ISO 8601 week-numbering year - - - BC, bc, - AD or ad - era indicator (without periods) - - - B.C., b.c., - A.D. or a.d. - era indicator (with periods) - - - MONTH - full upper case month name (blank-padded to 9 chars) - - - Month - full capitalized month name (blank-padded to 9 chars) - - - month - full lower case month name (blank-padded to 9 chars) - - - MON - abbreviated upper case month name (3 chars in English, localized lengths vary) - - - Mon - abbreviated capitalized month name (3 chars in English, localized lengths vary) - - - mon - abbreviated lower case month name (3 chars in English, localized lengths vary) - - - MM - month number (01–12) - - - DAY - full upper case day name (blank-padded to 9 chars) - - - Day - full capitalized day name (blank-padded to 9 chars) - - - day - full lower case day name (blank-padded to 9 chars) - - - DY - abbreviated upper case day name (3 chars in English, localized lengths vary) - - - Dy - abbreviated capitalized day name (3 chars in English, localized lengths vary) - - - dy - abbreviated lower case day name (3 chars in English, localized lengths vary) - - - DDD - day of year (001–366) - - - IDDD - day of ISO 8601 week-numbering year (001–371; day 1 of the year is Monday of the first ISO week) - - - DD - day of month (01–31) - - - D - day of the week, Sunday (1) to Saturday (7) - - - ID - ISO 8601 day of the week, Monday (1) to Sunday (7) - - - W - week of month (1–5) (the first week starts on the first day of the month) - - - WW - week number of year (1–53) (the first week starts on the first day of the year) - - - IW - week number of ISO 8601 week-numbering year (01–53; the first Thursday of the year is in week 1) - - - CC - century (2 digits) (the twenty-first century starts on 2001-01-01) - - - J - Julian Date (integer days since November 24, 4714 BC at local - midnight; see ) - - - Q - quarter - - - RM - month in upper case Roman numerals (I–XII; I=January) - - - rm - month in lower case Roman numerals (i–xii; i=January) - - - TZ - upper case time-zone abbreviation - - - tz - lower case time-zone abbreviation - - - TZH - time-zone hours - - - TZM - time-zone minutes - - - OF - time-zone offset from UTC (HH - or HH:MM) - - - -
- - - Modifiers can be applied to any template pattern to alter its - behavior. For example, FMMonth - is the Month pattern with the - FM modifier. - shows the - modifier patterns for date/time formatting. - - - - Template Pattern Modifiers for Date/Time Formatting - - - - Modifier - Description - Example - - - - - FM prefix - fill mode (suppress leading zeroes and padding blanks) - FMMonth - - - TH suffix - upper case ordinal number suffix - DDTH, e.g., 12TH - - - th suffix - lower case ordinal number suffix - DDth, e.g., 12th - - - FX prefix - fixed format global option (see usage notes) - FX Month DD Day - - - TM prefix - translation mode (use localized day and month names based on - ) - TMMonth - - - SP suffix - spell mode (not implemented) - DDSP - - - -
- - - Usage notes for date/time formatting: - - - - - FM suppresses leading zeroes and trailing blanks - that would otherwise be added to make the output of a pattern be - fixed-width. In PostgreSQL, - FM modifies only the next specification, while in - Oracle FM affects all subsequent - specifications, and repeated FM modifiers - toggle fill mode on and off. - - - - - - TM suppresses trailing blanks whether or - not FM is specified. - - - - - - to_timestamp and to_date - ignore letter case in the input; so for - example MON, Mon, - and mon all accept the same strings. When using - the TM modifier, case-folding is done according to - the rules of the function's input collation (see - ). - - - - - - to_timestamp and to_date - skip multiple blank spaces at the beginning of the input string and - around date and time values unless the FX option is used. For example, - to_timestamp(' 2000    JUN', 'YYYY MON') and - to_timestamp('2000 - JUN', 'YYYY-MON') work, but - to_timestamp('2000    JUN', 'FXYYYY MON') returns an error - because to_timestamp expects only a single space. - FX must be specified as the first item in - the template. - - - - - - A separator (a space or non-letter/non-digit character) in the template string of - to_timestamp and to_date - matches any single separator in the input string or is skipped, - unless the FX option is used. - For example, to_timestamp('2000JUN', 'YYYY///MON') and - to_timestamp('2000/JUN', 'YYYY MON') work, but - to_timestamp('2000//JUN', 'YYYY/MON') - returns an error because the number of separators in the input string - exceeds the number of separators in the template. - - - If FX is specified, a separator in the template string - matches exactly one character in the input string. But note that the - input string character is not required to be the same as the separator from the template string. - For example, to_timestamp('2000/JUN', 'FXYYYY MON') - works, but to_timestamp('2000/JUN', 'FXYYYY  MON') - returns an error because the second space in the template string consumes - the letter J from the input string. - - - - - - A TZH template pattern can match a signed number. - Without the FX option, minus signs may be ambiguous, - and could be interpreted as a separator. - This ambiguity is resolved as follows: If the number of separators before - TZH in the template string is less than the number of - separators before the minus sign in the input string, the minus sign - is interpreted as part of TZH. - Otherwise, the minus sign is considered to be a separator between values. - For example, to_timestamp('2000 -10', 'YYYY TZH') matches - -10 to TZH, but - to_timestamp('2000 -10', 'YYYY  TZH') - matches 10 to TZH. - - - - - - Ordinary text is allowed in to_char - templates and will be output literally. You can put a substring - in double quotes to force it to be interpreted as literal text - even if it contains template patterns. For example, in - '"Hello Year "YYYY', the YYYY - will be replaced by the year data, but the single Y in Year - will not be. - In to_date, to_number, - and to_timestamp, literal text and double-quoted - strings result in skipping the number of characters contained in the - string; for example "XX" skips two input characters - (whether or not they are XX). - - - - Prior to PostgreSQL 12, it was possible to - skip arbitrary text in the input string using non-letter or non-digit - characters. For example, - to_timestamp('2000y6m1d', 'yyyy-MM-DD') used to - work. Now you can only use letter characters for this purpose. For example, - to_timestamp('2000y6m1d', 'yyyytMMtDDt') and - to_timestamp('2000y6m1d', 'yyyy"y"MM"m"DD"d"') - skip y, m, and - d. - - - - - - - If you want to have a double quote in the output you must - precede it with a backslash, for example '\"YYYY - Month\"'. - Backslashes are not otherwise special outside of double-quoted - strings. Within a double-quoted string, a backslash causes the - next character to be taken literally, whatever it is (but this - has no special effect unless the next character is a double quote - or another backslash). - - - - - - In to_timestamp and to_date, - if the year format specification is less than four digits, e.g., - YYY, and the supplied year is less than four digits, - the year will be adjusted to be nearest to the year 2020, e.g., - 95 becomes 1995. - - - - - - In to_timestamp and to_date, - negative years are treated as signifying BC. If you write both a - negative year and an explicit BC field, you get AD - again. An input of year zero is treated as 1 BC. - - - - - - In to_timestamp and to_date, - the YYYY conversion has a restriction when - processing years with more than 4 digits. You must - use some non-digit character or template after YYYY, - otherwise the year is always interpreted as 4 digits. For example - (with the year 20000): - to_date('200001130', 'YYYYMMDD') will be - interpreted as a 4-digit year; instead use a non-digit - separator after the year, like - to_date('20000-1130', 'YYYY-MMDD') or - to_date('20000Nov30', 'YYYYMonDD'). - - - - - - In to_timestamp and to_date, - the CC (century) field is accepted but ignored - if there is a YYY, YYYY or - Y,YYY field. If CC is used with - YY or Y then the result is - computed as that year in the specified century. If the century is - specified but the year is not, the first year of the century - is assumed. - - - - - - In to_timestamp and to_date, - weekday names or numbers (DAY, D, - and related field types) are accepted but are ignored for purposes of - computing the result. The same is true for quarter - (Q) fields. - - - - - - In to_timestamp and to_date, - an ISO 8601 week-numbering date (as distinct from a Gregorian date) - can be specified in one of two ways: - - - - Year, week number, and weekday: for - example to_date('2006-42-4', 'IYYY-IW-ID') - returns the date 2006-10-19. - If you omit the weekday it is assumed to be 1 (Monday). - - - - - Year and day of year: for example to_date('2006-291', - 'IYYY-IDDD') also returns 2006-10-19. - - - - - - Attempting to enter a date using a mixture of ISO 8601 week-numbering - fields and Gregorian date fields is nonsensical, and will cause an - error. In the context of an ISO 8601 week-numbering year, the - concept of a month or day of month has no - meaning. In the context of a Gregorian year, the ISO week has no - meaning. - - - - While to_date will reject a mixture of - Gregorian and ISO week-numbering date - fields, to_char will not, since output format - specifications like YYYY-MM-DD (IYYY-IDDD) can be - useful. But avoid writing something like IYYY-MM-DD; - that would yield surprising results near the start of the year. - (See for more - information.) - - - - - - - In to_timestamp, millisecond - (MS) or microsecond (US) - fields are used as the - seconds digits after the decimal point. For example - to_timestamp('12.3', 'SS.MS') is not 3 milliseconds, - but 300, because the conversion treats it as 12 + 0.3 seconds. - So, for the format SS.MS, the input values - 12.3, 12.30, - and 12.300 specify the - same number of milliseconds. To get three milliseconds, one must write - 12.003, which the conversion treats as - 12 + 0.003 = 12.003 seconds. - - - - Here is a more - complex example: - to_timestamp('15:12:02.020.001230', 'HH24:MI:SS.MS.US') - is 15 hours, 12 minutes, and 2 seconds + 20 milliseconds + - 1230 microseconds = 2.021230 seconds. - - - - - - to_char(..., 'ID')'s day of the week numbering - matches the extract(isodow from ...) function, but - to_char(..., 'D')'s does not match - extract(dow from ...)'s day numbering. - - - - - - to_char(interval) formats HH and - HH12 as shown on a 12-hour clock, for example zero hours - and 36 hours both output as 12, while HH24 - outputs the full hour value, which can exceed 23 in - an interval value. - - - - - - - - shows the - template patterns available for formatting numeric values. - - - - Template Patterns for Numeric Formatting - - - - Pattern - Description - - - - - 9 - digit position (can be dropped if insignificant) - - - 0 - digit position (will not be dropped, even if insignificant) - - - . (period) - decimal point - - - , (comma) - group (thousands) separator - - - PR - negative value in angle brackets - - - S - sign anchored to number (uses locale) - - - L - currency symbol (uses locale) - - - D - decimal point (uses locale) - - - G - group separator (uses locale) - - - MI - minus sign in specified position (if number < 0) - - - PL - plus sign in specified position (if number > 0) - - - SG - plus/minus sign in specified position - - - RN or rn - Roman numeral (values between 1 and 3999) - - - TH or th - ordinal number suffix - - - V - shift specified number of digits (see notes) - - - EEEE - exponent for scientific notation - - - -
- - - Usage notes for numeric formatting: - - - - - 0 specifies a digit position that will always be printed, - even if it contains a leading/trailing zero. 9 also - specifies a digit position, but if it is a leading zero then it will - be replaced by a space, while if it is a trailing zero and fill mode - is specified then it will be deleted. (For to_number(), - these two pattern characters are equivalent.) - - - - - - If the format provides fewer fractional digits than the number being - formatted, to_char() will round the number to - the specified number of fractional digits. - - - - - - The pattern characters S, L, D, - and G represent the sign, currency symbol, decimal point, - and thousands separator characters defined by the current locale - (see - and ). The pattern characters period - and comma represent those exact characters, with the meanings of - decimal point and thousands separator, regardless of locale. - - - - - - If no explicit provision is made for a sign - in to_char()'s pattern, one column will be reserved for - the sign, and it will be anchored to (appear just left of) the - number. If S appears just left of some 9's, - it will likewise be anchored to the number. - - - - - - A sign formatted using SG, PL, or - MI is not anchored to - the number; for example, - to_char(-12, 'MI9999') produces '-  12' - but to_char(-12, 'S9999') produces '  -12'. - (The Oracle implementation does not allow the use of - MI before 9, but rather - requires that 9 precede - MI.) - - - - - - TH does not convert values less than zero - and does not convert fractional numbers. - - - - - - PL, SG, and - TH are PostgreSQL - extensions. - - - - - - In to_number, if non-data template patterns such - as L or TH are used, the - corresponding number of input characters are skipped, whether or not - they match the template pattern, unless they are data characters - (that is, digits, sign, decimal point, or comma). For - example, TH would skip two non-data characters. - - - - - - V with to_char - multiplies the input values by - 10^n, where - n is the number of digits following - V. V with - to_number divides in a similar manner. - The V can be thought of as marking the position - of an implicit decimal point in the input or output string. - to_char and to_number - do not support the use of - V combined with a decimal point - (e.g., 99.9V99 is not allowed). - - - - - - EEEE (scientific notation) cannot be used in - combination with any of the other formatting patterns or - modifiers other than digit and decimal point patterns, and must be at the end of the format string - (e.g., 9.99EEEE is a valid pattern). - - - - - - In to_number(), the RN - pattern converts Roman numerals (in standard form) to numbers. - Input is case-insensitive, so RN - and rn are equivalent. RN - cannot be used in combination with any other formatting patterns or - modifiers except FM, which is applicable only - in to_char() and is ignored - in to_number(). - - - - - - - Certain modifiers can be applied to any template pattern to alter its - behavior. For example, FM99.99 - is the 99.99 pattern with the - FM modifier. - shows the - modifier patterns for numeric formatting. - - - - Template Pattern Modifiers for Numeric Formatting - - - - Modifier - Description - Example - - - - - FM prefix - fill mode (suppress trailing zeroes and padding blanks) - FM99.99 - - - TH suffix - upper case ordinal number suffix - 999TH - - - th suffix - lower case ordinal number suffix - 999th - - - -
- - - shows some - examples of the use of the to_char function. - - - - <function>to_char</function> Examples - - - - Expression - Result - - - - - to_char(current_timestamp, 'Day, DD  HH12:MI:SS') - 'Tuesday  , 06  05:39:18' - - - to_char(current_timestamp, 'FMDay, FMDD  HH12:MI:SS') - 'Tuesday, 6  05:39:18' - - - to_char(current_timestamp AT TIME ZONE - 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS"Z"') - '2022-12-06T05:39:18Z', - ISO 8601 extended format - - - to_char(-0.1, '99.99') - '  -.10' - - - to_char(-0.1, 'FM9.99') - '-.1' - - - to_char(-0.1, 'FM90.99') - '-0.1' - - - to_char(0.1, '0.9') - ' 0.1' - - - to_char(12, '9990999.9') - '    0012.0' - - - to_char(12, 'FM9990999.9') - '0012.' - - - to_char(485, '999') - ' 485' - - - to_char(-485, '999') - '-485' - - - to_char(485, '9 9 9') - ' 4 8 5' - - - to_char(1485, '9,999') - ' 1,485' - - - to_char(1485, '9G999') - ' 1 485' - - - to_char(148.5, '999.999') - ' 148.500' - - - to_char(148.5, 'FM999.999') - '148.5' - - - to_char(148.5, 'FM999.990') - '148.500' - - - to_char(148.5, '999D999') - ' 148,500' - - - to_char(3148.5, '9G999D999') - ' 3 148,500' - - - to_char(-485, '999S') - '485-' - - - to_char(-485, '999MI') - '485-' - - - to_char(485, '999MI') - '485 ' - - - to_char(485, 'FM999MI') - '485' - - - to_char(485, 'PL999') - '+485' - - - to_char(485, 'SG999') - '+485' - - - to_char(-485, 'SG999') - '-485' - - - to_char(-485, '9SG99') - '4-85' - - - to_char(-485, '999PR') - '<485>' - - - to_char(485, 'L999') - 'DM 485' - - - to_char(485, 'RN') - '        CDLXXXV' - - - to_char(485, 'FMRN') - 'CDLXXXV' - - - to_char(5.2, 'FMRN') - 'V' - - - to_char(482, '999th') - ' 482nd' - - - to_char(485, '"Good number:"999') - 'Good number: 485' - - - to_char(485.8, '"Pre:"999" Post:" .999') - 'Pre: 485 Post: .800' - - - to_char(12, '99V999') - ' 12000' - - - to_char(12.4, '99V999') - ' 12400' - - - to_char(12.45, '99V9') - ' 125' - - - to_char(0.0004859, '9.99EEEE') - ' 4.86e-04' - - - -
- -
- - - - Date/Time Functions and Operators - - - shows the available - functions for date/time value processing, with details appearing in - the following subsections. illustrates the behaviors of - the basic arithmetic operators (+, - *, etc.). For formatting functions, refer to - . You should be familiar with - the background information on date/time data types from . - - - - In addition, the usual comparison operators shown in - are available for the - date/time types. Dates and timestamps (with or without time zone) are - all comparable, while times (with or without time zone) and intervals - can only be compared to other values of the same data type. When - comparing a timestamp without time zone to a timestamp with time zone, - the former value is assumed to be given in the time zone specified by - the configuration parameter, and is - rotated to UTC for comparison to the latter value (which is already - in UTC internally). Similarly, a date value is assumed to represent - midnight in the TimeZone zone when comparing it - to a timestamp. - - - - All the functions and operators described below that take time or timestamp - inputs actually come in two variants: one that takes time with time zone or timestamp - with time zone, and one that takes time without time zone or timestamp without time zone. - For brevity, these variants are not shown separately. Also, the - + and * operators come in commutative pairs (for - example both date + integer - and integer + date); we show - only one of each such pair. - - - - Date/Time Operators - - - - - - Operator - - - Description - - - Example(s) - - - - - - - - date + integer - date - - - Add a number of days to a date - - - date '2001-09-28' + 7 - 2001-10-05 - - - - - - date + interval - timestamp - - - Add an interval to a date - - - date '2001-09-28' + interval '1 hour' - 2001-09-28 01:00:00 - - - - - - date + time - timestamp - - - Add a time-of-day to a date - - - date '2001-09-28' + time '03:00' - 2001-09-28 03:00:00 - - - - - - interval + interval - interval - - - Add intervals - - - interval '1 day' + interval '1 hour' - 1 day 01:00:00 - - - - - - timestamp + interval - timestamp - - - Add an interval to a timestamp - - - timestamp '2001-09-28 01:00' + interval '23 hours' - 2001-09-29 00:00:00 - - - - - - time + interval - time - - - Add an interval to a time - - - time '01:00' + interval '3 hours' - 04:00:00 - - - - - - - interval - interval - - - Negate an interval - - - - interval '23 hours' - -23:00:00 - - - - - - date - date - integer - - - Subtract dates, producing the number of days elapsed - - - date '2001-10-01' - date '2001-09-28' - 3 - - - - - - date - integer - date - - - Subtract a number of days from a date - - - date '2001-10-01' - 7 - 2001-09-24 - - - - - - date - interval - timestamp - - - Subtract an interval from a date - - - date '2001-09-28' - interval '1 hour' - 2001-09-27 23:00:00 - - - - - - time - time - interval - - - Subtract times - - - time '05:00' - time '03:00' - 02:00:00 - - - - - - time - interval - time - - - Subtract an interval from a time - - - time '05:00' - interval '2 hours' - 03:00:00 - - - - - - timestamp - interval - timestamp - - - Subtract an interval from a timestamp - - - timestamp '2001-09-28 23:00' - interval '23 hours' - 2001-09-28 00:00:00 - - - - - - interval - interval - interval - - - Subtract intervals - - - interval '1 day' - interval '1 hour' - 1 day -01:00:00 - - - - - - timestamp - timestamp - interval - - - Subtract timestamps (converting 24-hour intervals into days, - similarly to justify_hours()) - - - timestamp '2001-09-29 03:00' - timestamp '2001-07-27 12:00' - 63 days 15:00:00 - - - - - - interval * double precision - interval - - - Multiply an interval by a scalar - - - interval '1 second' * 900 - 00:15:00 - - - interval '1 day' * 21 - 21 days - - - interval '1 hour' * 3.5 - 03:30:00 - - - - - - interval / double precision - interval - - - Divide an interval by a scalar - - - interval '1 hour' / 1.5 - 00:40:00 - - - - -
- - - Date/Time Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - age - - age ( timestamp, timestamp ) - interval - - - Subtract arguments, producing a symbolic result that - uses years and months, rather than just days - - - age(timestamp '2001-04-10', timestamp '1957-06-13') - 43 years 9 mons 27 days - - - - - - age ( timestamp ) - interval - - - Subtract argument from current_date (at midnight) - - - age(timestamp '1957-06-13') - 62 years 6 mons 10 days - - - - - - - clock_timestamp - - clock_timestamp ( ) - timestamp with time zone - - - Current date and time (changes during statement execution); - see - - - clock_timestamp() - 2019-12-23 14:39:53.662522-05 - - - - - - - current_date - - current_date - date - - - Current date; see - - - current_date - 2019-12-23 - - - - - - - current_time - - current_time - time with time zone - - - Current time of day; see - - - current_time - 14:39:53.662522-05 - - - - - - current_time ( integer ) - time with time zone - - - Current time of day, with limited precision; - see - - - current_time(2) - 14:39:53.66-05 - - - - - - - current_timestamp - - current_timestamp - timestamp with time zone - - - Current date and time (start of current transaction); - see - - - current_timestamp - 2019-12-23 14:39:53.662522-05 - - - - - - current_timestamp ( integer ) - timestamp with time zone - - - Current date and time (start of current transaction), with limited precision; - see - - - current_timestamp(0) - 2019-12-23 14:39:53-05 - - - - - - - date_add - - date_add ( timestamp with time zone, interval , text ) - timestamp with time zone - - - Add an interval to a timestamp with time - zone, computing times of day and daylight-savings adjustments - according to the time zone named by the third argument, or the - current setting if that is omitted. - The form with two arguments is equivalent to the timestamp with - time zone + interval operator. - - - date_add('2021-10-31 00:00:00+02'::timestamptz, '1 day'::interval, 'Europe/Warsaw') - 2021-10-31 23:00:00+00 - - - - - - date_bin ( interval, timestamp, timestamp ) - timestamp - - - Bin input into specified interval aligned with specified origin; see - - - date_bin('15 minutes', timestamp '2001-02-16 20:38:40', timestamp '2001-02-16 20:05:00') - 2001-02-16 20:35:00 - - - - - - - date_part - - date_part ( text, timestamp ) - double precision - - - Get timestamp subfield (equivalent to extract); - see - - - date_part('hour', timestamp '2001-02-16 20:38:40') - 20 - - - - - - date_part ( text, interval ) - double precision - - - Get interval subfield (equivalent to extract); - see - - - date_part('month', interval '2 years 3 months') - 3 - - - - - - - date_subtract - - date_subtract ( timestamp with time zone, interval , text ) - timestamp with time zone - - - Subtract an interval from a timestamp with time - zone, computing times of day and daylight-savings adjustments - according to the time zone named by the third argument, or the - current setting if that is omitted. - The form with two arguments is equivalent to the timestamp with - time zone - interval operator. - - - date_subtract('2021-11-01 00:00:00+01'::timestamptz, '1 day'::interval, 'Europe/Warsaw') - 2021-10-30 22:00:00+00 - - - - - - - date_trunc - - date_trunc ( text, timestamp ) - timestamp - - - Truncate to specified precision; see - - - date_trunc('hour', timestamp '2001-02-16 20:38:40') - 2001-02-16 20:00:00 - - - - - - date_trunc ( text, timestamp with time zone, text ) - timestamp with time zone - - - Truncate to specified precision in the specified time zone; see - - - - date_trunc('day', timestamptz '2001-02-16 20:38:40+00', 'Australia/Sydney') - 2001-02-16 13:00:00+00 - - - - - - date_trunc ( text, interval ) - interval - - - Truncate to specified precision; see - - - - date_trunc('hour', interval '2 days 3 hours 40 minutes') - 2 days 03:00:00 - - - - - - - extract - - extract ( field from timestamp ) - numeric - - - Get timestamp subfield; see - - - extract(hour from timestamp '2001-02-16 20:38:40') - 20 - - - - - - extract ( field from interval ) - numeric - - - Get interval subfield; see - - - extract(month from interval '2 years 3 months') - 3 - - - - - - - isfinite - - isfinite ( date ) - boolean - - - Test for finite date (not +/-infinity) - - - isfinite(date '2001-02-16') - true - - - - - - isfinite ( timestamp ) - boolean - - - Test for finite timestamp (not +/-infinity) - - - isfinite(timestamp 'infinity') - false - - - - - - isfinite ( interval ) - boolean - - - Test for finite interval (not +/-infinity) - - - isfinite(interval '4 hours') - true - - - - - - - justify_days - - justify_days ( interval ) - interval - - - Adjust interval, converting 30-day time periods to months - - - justify_days(interval '1 year 65 days') - 1 year 2 mons 5 days - - - - - - - justify_hours - - justify_hours ( interval ) - interval - - - Adjust interval, converting 24-hour time periods to days - - - justify_hours(interval '50 hours 10 minutes') - 2 days 02:10:00 - - - - - - - justify_interval - - justify_interval ( interval ) - interval - - - Adjust interval using justify_days - and justify_hours, with additional sign - adjustments - - - justify_interval(interval '1 mon -1 hour') - 29 days 23:00:00 - - - - - - - localtime - - localtime - time - - - Current time of day; - see - - - localtime - 14:39:53.662522 - - - - - - localtime ( integer ) - time - - - Current time of day, with limited precision; - see - - - localtime(0) - 14:39:53 - - - - - - - localtimestamp - - localtimestamp - timestamp - - - Current date and time (start of current transaction); - see - - - localtimestamp - 2019-12-23 14:39:53.662522 - - - - - - localtimestamp ( integer ) - timestamp - - - Current date and time (start of current - transaction), with limited precision; - see - - - localtimestamp(2) - 2019-12-23 14:39:53.66 - - - - - - - make_date - - make_date ( year int, - month int, - day int ) - date - - - Create date from year, month and day fields - (negative years signify BC) - - - make_date(2013, 7, 15) - 2013-07-15 - - - - - - make_interval - - make_interval ( years int - , months int - , weeks int - , days int - , hours int - , mins int - , secs double precision - ) - interval - - - Create interval from years, months, weeks, days, hours, minutes and - seconds fields, each of which can default to zero - - - make_interval(days => 10) - 10 days - - - - - - - make_time - - make_time ( hour int, - min int, - sec double precision ) - time - - - Create time from hour, minute and seconds fields - - - make_time(8, 15, 23.5) - 08:15:23.5 - - - - - - - make_timestamp - - make_timestamp ( year int, - month int, - day int, - hour int, - min int, - sec double precision ) - timestamp - - - Create timestamp from year, month, day, hour, minute and seconds fields - (negative years signify BC) - - - make_timestamp(2013, 7, 15, 8, 15, 23.5) - 2013-07-15 08:15:23.5 - - - - - - - make_timestamptz - - make_timestamptz ( year int, - month int, - day int, - hour int, - min int, - sec double precision - , timezone text ) - timestamp with time zone - - - Create timestamp with time zone from year, month, day, hour, minute - and seconds fields (negative years signify BC). - If timezone is not - specified, the current time zone is used; the examples assume the - session time zone is Europe/London - - - make_timestamptz(2013, 7, 15, 8, 15, 23.5) - 2013-07-15 08:15:23.5+01 - - - make_timestamptz(2013, 7, 15, 8, 15, 23.5, 'America/New_York') - 2013-07-15 13:15:23.5+01 - - - - - - - now - - now ( ) - timestamp with time zone - - - Current date and time (start of current transaction); - see - - - now() - 2019-12-23 14:39:53.662522-05 - - - - - - - statement_timestamp - - statement_timestamp ( ) - timestamp with time zone - - - Current date and time (start of current statement); - see - - - statement_timestamp() - 2019-12-23 14:39:53.662522-05 - - - - - - - timeofday - - timeofday ( ) - text - - - Current date and time - (like clock_timestamp, but as a text string); - see - - - timeofday() - Mon Dec 23 14:39:53.662522 2019 EST - - - - - - - transaction_timestamp - - transaction_timestamp ( ) - timestamp with time zone - - - Current date and time (start of current transaction); - see - - - transaction_timestamp() - 2019-12-23 14:39:53.662522-05 - - - - - - - to_timestamp - - to_timestamp ( double precision ) - timestamp with time zone - - - Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to - timestamp with time zone - - - to_timestamp(1284352323) - 2010-09-13 04:32:03+00 - - - - -
- - - - OVERLAPS - - In addition to these functions, the SQL OVERLAPS operator is - supported: - -(start1, end1) OVERLAPS (start2, end2) -(start1, length1) OVERLAPS (start2, length2) - - This expression yields true when two time periods (defined by their - endpoints) overlap, false when they do not overlap. The endpoints - can be specified as pairs of dates, times, or time stamps; or as - a date, time, or time stamp followed by an interval. When a pair - of values is provided, either the start or the end can be written - first; OVERLAPS automatically takes the earlier value - of the pair as the start. Each time period is considered to - represent the half-open interval start <= - time < end, unless - start and end are equal in which case it - represents that single time instant. This means for instance that two - time periods with only an endpoint in common do not overlap. - - - -SELECT (DATE '2001-02-16', DATE '2001-12-21') OVERLAPS - (DATE '2001-10-30', DATE '2002-10-30'); -Result: true -SELECT (DATE '2001-02-16', INTERVAL '100 days') OVERLAPS - (DATE '2001-10-30', DATE '2002-10-30'); -Result: false -SELECT (DATE '2001-10-29', DATE '2001-10-30') OVERLAPS - (DATE '2001-10-30', DATE '2001-10-31'); -Result: false -SELECT (DATE '2001-10-30', DATE '2001-10-30') OVERLAPS - (DATE '2001-10-30', DATE '2001-10-31'); -Result: true - - - - When adding an interval value to (or subtracting an - interval value from) a timestamp - or timestamp with time zone value, the months, days, and - microseconds fields of the interval value are handled in turn. - First, a nonzero months field advances or decrements the date of the - timestamp by the indicated number of months, keeping the day of month the - same unless it would be past the end of the new month, in which case the - last day of that month is used. (For example, March 31 plus 1 month - becomes April 30, but March 31 plus 2 months becomes May 31.) - Then the days field advances or decrements the date of the timestamp by - the indicated number of days. In both these steps the local time of day - is kept the same. Finally, if there is a nonzero microseconds field, it - is added or subtracted literally. - When doing arithmetic on a timestamp with time zone value in - a time zone that recognizes DST, this means that adding or subtracting - (say) interval '1 day' does not necessarily have the - same result as adding or subtracting interval '24 - hours'. - For example, with the session time zone set - to America/Denver: - -SELECT timestamp with time zone '2005-04-02 12:00:00-07' + interval '1 day'; -Result: 2005-04-03 12:00:00-06 -SELECT timestamp with time zone '2005-04-02 12:00:00-07' + interval '24 hours'; -Result: 2005-04-03 13:00:00-06 - - This happens because an hour was skipped due to a change in daylight saving - time at 2005-04-03 02:00:00 in time zone - America/Denver. - - - - Note there can be ambiguity in the months field returned by - age because different months have different numbers of - days. PostgreSQL's approach uses the month from the - earlier of the two dates when calculating partial months. For example, - age('2004-06-01', '2004-04-30') uses April to yield - 1 mon 1 day, while using May would yield 1 mon 2 - days because May has 31 days, while April has only 30. - - - - Subtraction of dates and timestamps can also be complex. One conceptually - simple way to perform subtraction is to convert each value to a number - of seconds using EXTRACT(EPOCH FROM ...), then subtract the - results; this produces the - number of seconds between the two values. This will adjust - for the number of days in each month, timezone changes, and daylight - saving time adjustments. Subtraction of date or timestamp - values with the - operator - returns the number of days (24-hours) and hours/minutes/seconds - between the values, making the same adjustments. The age - function returns years, months, days, and hours/minutes/seconds, - performing field-by-field subtraction and then adjusting for negative - field values. The following queries illustrate the differences in these - approaches. The sample results were produced with timezone - = 'US/Eastern'; there is a daylight saving time change between the - two dates used: - - - -SELECT EXTRACT(EPOCH FROM timestamptz '2013-07-01 12:00:00') - - EXTRACT(EPOCH FROM timestamptz '2013-03-01 12:00:00'); -Result: 10537200.000000 -SELECT (EXTRACT(EPOCH FROM timestamptz '2013-07-01 12:00:00') - - EXTRACT(EPOCH FROM timestamptz '2013-03-01 12:00:00')) - / 60 / 60 / 24; -Result: 121.9583333333333333 -SELECT timestamptz '2013-07-01 12:00:00' - timestamptz '2013-03-01 12:00:00'; -Result: 121 days 23:00:00 -SELECT age(timestamptz '2013-07-01 12:00:00', timestamptz '2013-03-01 12:00:00'); -Result: 4 mons - - - - <function>EXTRACT</function>, <function>date_part</function> - - - date_part - - - extract - - - -EXTRACT(field FROM source) - - - - The extract function retrieves subfields - such as year or hour from date/time values. - source must be a value expression of - type timestamp, date, time, - or interval. (Timestamps and times can be with or - without time zone.) - field is an identifier or - string that selects what field to extract from the source value. - Not all fields are valid for every input data type; for example, fields - smaller than a day cannot be extracted from a date, while - fields of a day or more cannot be extracted from a time. - The extract function returns values of type - numeric. - - - - The following are valid field names: - - - - - century - - - The century; for interval values, the year field - divided by 100 - - - -SELECT EXTRACT(CENTURY FROM TIMESTAMP '2000-12-16 12:21:13'); -Result: 20 -SELECT EXTRACT(CENTURY FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 21 -SELECT EXTRACT(CENTURY FROM DATE '0001-01-01 AD'); -Result: 1 -SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); -Result: -1 -SELECT EXTRACT(CENTURY FROM INTERVAL '2001 years'); -Result: 20 - - - - - - day - - - The day of the month (1–31); for interval - values, the number of days - - - -SELECT EXTRACT(DAY FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 16 -SELECT EXTRACT(DAY FROM INTERVAL '40 days 1 minute'); -Result: 40 - - - - - - - decade - - - The year field divided by 10 - - - -SELECT EXTRACT(DECADE FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 200 - - - - - - dow - - - The day of the week as Sunday (0) to - Saturday (6) - - - -SELECT EXTRACT(DOW FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 5 - - - Note that extract's day of the week numbering - differs from that of the to_char(..., - 'D') function. - - - - - - - doy - - - The day of the year (1–365/366) - - - -SELECT EXTRACT(DOY FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 47 - - - - - - epoch - - - For timestamp with time zone values, the - number of seconds since 1970-01-01 00:00:00 UTC (negative for - timestamps before that); - for date and timestamp values, the - nominal number of seconds since 1970-01-01 00:00:00, - without regard to timezone or daylight-savings rules; - for interval values, the total number - of seconds in the interval - - - -SELECT EXTRACT(EPOCH FROM TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40.12-08'); -Result: 982384720.120000 -SELECT EXTRACT(EPOCH FROM TIMESTAMP '2001-02-16 20:38:40.12'); -Result: 982355920.120000 -SELECT EXTRACT(EPOCH FROM INTERVAL '5 days 3 hours'); -Result: 442800.000000 - - - - You can convert an epoch value back to a timestamp with time zone - with to_timestamp: - - -SELECT to_timestamp(982384720.12); -Result: 2001-02-17 04:38:40.12+00 - - - - Beware that applying to_timestamp to an epoch - extracted from a date or timestamp value - could produce a misleading result: the result will effectively - assume that the original value had been given in UTC, which might - not be the case. - - - - - - hour - - - The hour field (0–23 in timestamps, unrestricted in - intervals) - - - -SELECT EXTRACT(HOUR FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 20 - - - - - - isodow - - - The day of the week as Monday (1) to - Sunday (7) - - - -SELECT EXTRACT(ISODOW FROM TIMESTAMP '2001-02-18 20:38:40'); -Result: 7 - - - This is identical to dow except for Sunday. This - matches the ISO 8601 day of the week numbering. - - - - - - - isoyear - - - The ISO 8601 week-numbering year that the date - falls in - - - -SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-01'); -Result: 2005 -SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-02'); -Result: 2006 - - - - Each ISO 8601 week-numbering year begins with the - Monday of the week containing the 4th of January, so in early - January or late December the ISO year may be - different from the Gregorian year. See the week - field for more information. - - - - - - julian - - - The Julian Date corresponding to the - date or timestamp. Timestamps - that are not local midnight result in a fractional value. See - for more information. - - - -SELECT EXTRACT(JULIAN FROM DATE '2006-01-01'); -Result: 2453737 -SELECT EXTRACT(JULIAN FROM TIMESTAMP '2006-01-01 12:00'); -Result: 2453737.50000000000000000000 - - - - - - microseconds - - - The seconds field, including fractional parts, multiplied by 1 - 000 000; note that this includes full seconds - - - -SELECT EXTRACT(MICROSECONDS FROM TIME '17:12:28.5'); -Result: 28500000 - - - - - - millennium - - - The millennium; for interval values, the year field - divided by 1000 - - - -SELECT EXTRACT(MILLENNIUM FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 3 -SELECT EXTRACT(MILLENNIUM FROM INTERVAL '2001 years'); -Result: 2 - - - - Years in the 1900s are in the second millennium. - The third millennium started January 1, 2001. - - - - - - milliseconds - - - The seconds field, including fractional parts, multiplied by - 1000. Note that this includes full seconds. - - - -SELECT EXTRACT(MILLISECONDS FROM TIME '17:12:28.5'); -Result: 28500.000 - - - - - - minute - - - The minutes field (0–59) - - - -SELECT EXTRACT(MINUTE FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 38 - - - - - - month - - - The number of the month within the year (1–12); - for interval values, the number of months modulo 12 - (0–11) - - - -SELECT EXTRACT(MONTH FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 2 -SELECT EXTRACT(MONTH FROM INTERVAL '2 years 3 months'); -Result: 3 -SELECT EXTRACT(MONTH FROM INTERVAL '2 years 13 months'); -Result: 1 - - - - - - quarter - - - The quarter of the year (1–4) that the date is in; - for interval values, the month field divided by 3 - plus 1 - - - -SELECT EXTRACT(QUARTER FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 1 -SELECT EXTRACT(QUARTER FROM INTERVAL '1 year 6 months'); -Result: 3 - - - - - - second - - - The seconds field, including any fractional seconds - - - -SELECT EXTRACT(SECOND FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 40.000000 -SELECT EXTRACT(SECOND FROM TIME '17:12:28.5'); -Result: 28.500000 - - - - - timezone - - - The time zone offset from UTC, measured in seconds. Positive values - correspond to time zones east of UTC, negative values to - zones west of UTC. (Technically, - PostgreSQL does not use UTC because - leap seconds are not handled.) - - - - - - timezone_hour - - - The hour component of the time zone offset - - - - - - timezone_minute - - - The minute component of the time zone offset - - - - - - week - - - The number of the ISO 8601 week-numbering week of - the year. By definition, ISO weeks start on Mondays and the first - week of a year contains January 4 of that year. In other words, the - first Thursday of a year is in week 1 of that year. - - - In the ISO week-numbering system, it is possible for early-January - dates to be part of the 52nd or 53rd week of the previous year, and for - late-December dates to be part of the first week of the next year. - For example, 2005-01-01 is part of the 53rd week of year - 2004, and 2006-01-01 is part of the 52nd week of year - 2005, while 2012-12-31 is part of the first week of 2013. - It's recommended to use the isoyear field together with - week to get consistent results. - - - - For interval values, the week field is simply the number - of integral days divided by 7. - - - -SELECT EXTRACT(WEEK FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 7 -SELECT EXTRACT(WEEK FROM INTERVAL '13 days 24 hours'); -Result: 1 - - - - - - year - - - The year field. Keep in mind there is no 0 AD, so subtracting - BC years from AD years should be done with care. - - - -SELECT EXTRACT(YEAR FROM TIMESTAMP '2001-02-16 20:38:40'); -Result: 2001 - - - - - - - - - When processing an interval value, - the extract function produces field values that - match the interpretation used by the interval output function. This - can produce surprising results if one starts with a non-normalized - interval representation, for example: - -SELECT INTERVAL '80 minutes'; -Result: 01:20:00 -SELECT EXTRACT(MINUTES FROM INTERVAL '80 minutes'); -Result: 20 - - - - - - When the input value is +/-Infinity, extract returns - +/-Infinity for monotonically-increasing fields (epoch, - julian, year, isoyear, - decade, century, and millennium - for timestamp inputs; epoch, hour, - day, year, decade, - century, and millennium for - interval inputs). - For other fields, NULL is returned. PostgreSQL - versions before 9.6 returned zero for all cases of infinite input. - - - - - The extract function is primarily intended - for computational processing. For formatting date/time values for - display, see . - - - - The date_part function is modeled on the traditional - Ingres equivalent to the - SQL-standard function extract: - -date_part('field', source) - - Note that here the field parameter needs to - be a string value, not a name. The valid field names for - date_part are the same as for - extract. - For historical reasons, the date_part function - returns values of type double precision. This can result in - a loss of precision in certain uses. Using extract - is recommended instead. - - - -SELECT date_part('day', TIMESTAMP '2001-02-16 20:38:40'); -Result: 16 -SELECT date_part('hour', INTERVAL '4 hours 3 minutes'); -Result: 4 - - - - - - <function>date_trunc</function> - - - date_trunc - - - - The function date_trunc is conceptually - similar to the trunc function for numbers. - - - - -date_trunc(field, source , time_zone ) - - source is a value expression of type - timestamp, timestamp with time zone, - or interval. - (Values of type date and - time are cast automatically to timestamp or - interval, respectively.) - field selects to which precision to - truncate the input value. The return value is likewise of type - timestamp, timestamp with time zone, - or interval, - and it has all fields that are less significant than the - selected one set to zero (or one, for day and month). - - - - Valid values for field are: - - microseconds - milliseconds - second - minute - hour - day - week - month - quarter - year - decade - century - millennium - - - - - When the input value is of type timestamp with time zone, - the truncation is performed with respect to a particular time zone; - for example, truncation to day produces a value that - is midnight in that zone. By default, truncation is done with respect - to the current setting, but the - optional time_zone argument can be provided - to specify a different time zone. The time zone name can be specified - in any of the ways described in . - - - - A time zone cannot be specified when processing timestamp without - time zone or interval inputs. These are always - taken at face value. - - - - Examples (assuming the local time zone is America/New_York): - -SELECT date_trunc('hour', TIMESTAMP '2001-02-16 20:38:40'); -Result: 2001-02-16 20:00:00 -SELECT date_trunc('year', TIMESTAMP '2001-02-16 20:38:40'); -Result: 2001-01-01 00:00:00 -SELECT date_trunc('day', TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40+00'); -Result: 2001-02-16 00:00:00-05 -SELECT date_trunc('day', TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40+00', 'Australia/Sydney'); -Result: 2001-02-16 08:00:00-05 -SELECT date_trunc('hour', INTERVAL '3 days 02:47:33'); -Result: 3 days 02:00:00 - - - - - - <function>date_bin</function> - - - date_bin - - - - The function date_bin bins the input - timestamp into the specified interval (the stride) - aligned with a specified origin. - - - - -date_bin(stride, source, origin) - - source is a value expression of type - timestamp or timestamp with time zone. (Values - of type date are cast automatically to - timestamp.) stride is a value - expression of type interval. The return value is likewise - of type timestamp or timestamp with time zone, - and it marks the beginning of the bin into which the - source is placed. - - - - Examples: - -SELECT date_bin('15 minutes', TIMESTAMP '2020-02-11 15:44:17', TIMESTAMP '2001-01-01'); -Result: 2020-02-11 15:30:00 -SELECT date_bin('15 minutes', TIMESTAMP '2020-02-11 15:44:17', TIMESTAMP '2001-01-01 00:02:30'); -Result: 2020-02-11 15:32:30 - - - - - In the case of full units (1 minute, 1 hour, etc.), it gives the same result as - the analogous date_trunc call, but the difference is - that date_bin can truncate to an arbitrary interval. - - - - The stride interval must be greater than zero and - cannot contain units of month or larger. - - - - - <literal>AT TIME ZONE</literal> and <literal>AT LOCAL</literal> - - - time zone - conversion - - - - AT TIME ZONE - - - - AT LOCAL - - - - The AT TIME ZONE operator converts time - stamp without time zone to/from - time stamp with time zone, and - time with time zone values to different time - zones. shows its - variants. - - - - <literal>AT TIME ZONE</literal> and <literal>AT LOCAL</literal> Variants - - - - - Operator - - - Description - - - Example(s) - - - - - - - - timestamp without time zone AT TIME ZONE zone - timestamp with time zone - - - Converts given time stamp without time zone to - time stamp with time zone, assuming the given - value is in the named time zone. - - - timestamp '2001-02-16 20:38:40' at time zone 'America/Denver' - 2001-02-17 03:38:40+00 - - - - - - timestamp without time zone AT LOCAL - timestamp with time zone - - - Converts given time stamp without time zone to - time stamp with the session's - TimeZone value as time zone. - - - timestamp '2001-02-16 20:38:40' at local - 2001-02-17 03:38:40+00 - - - - - - timestamp with time zone AT TIME ZONE zone - timestamp without time zone - - - Converts given time stamp with time zone to - time stamp without time zone, as the time would - appear in that zone. - - - timestamp with time zone '2001-02-16 20:38:40-05' at time zone 'America/Denver' - 2001-02-16 18:38:40 - - - - - - timestamp with time zone AT LOCAL - timestamp without time zone - - - Converts given time stamp with time zone to - time stamp without time zone, as the time would - appear with the session's TimeZone value as time zone. - - - timestamp with time zone '2001-02-16 20:38:40-05' at local - 2001-02-16 18:38:40 - - - - - - time with time zone AT TIME ZONE zone - time with time zone - - - Converts given time with time zone to a new time - zone. Since no date is supplied, this uses the currently active UTC - offset for the named destination zone. - - - time with time zone '05:34:17-05' at time zone 'UTC' - 10:34:17+00 - - - - - - time with time zone AT LOCAL - time with time zone - - - Converts given time with time zone to a new time - zone. Since no date is supplied, this uses the currently active UTC - offset for the session's TimeZone value. - - - Assuming the session's TimeZone is set to UTC: - - - time with time zone '05:34:17-05' at local - 10:34:17+00 - - - - -
- - - In these expressions, the desired time zone zone can be - specified either as a text value (e.g., 'America/Los_Angeles') - or as an interval (e.g., INTERVAL '-08:00'). - In the text case, a time zone name can be specified in any of the ways - described in . - The interval case is only useful for zones that have fixed offsets from - UTC, so it is not very common in practice. - - - - The syntax AT LOCAL may be used as shorthand for - AT TIME ZONE local, where - local is the session's - TimeZone value. - - - - Examples (assuming the current setting - is America/Los_Angeles): - -SELECT TIMESTAMP '2001-02-16 20:38:40' AT TIME ZONE 'America/Denver'; -Result: 2001-02-16 19:38:40-08 -SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT TIME ZONE 'America/Denver'; -Result: 2001-02-16 18:38:40 -SELECT TIMESTAMP '2001-02-16 20:38:40' AT TIME ZONE 'Asia/Tokyo' AT TIME ZONE 'America/Chicago'; -Result: 2001-02-16 05:38:40 -SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT LOCAL; -Result: 2001-02-16 17:38:40 -SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT TIME ZONE '+05'; -Result: 2001-02-16 20:38:40 -SELECT TIME WITH TIME ZONE '20:38:40-05' AT LOCAL; -Result: 17:38:40 - - The first example adds a time zone to a value that lacks it, and - displays the value using the current TimeZone - setting. The second example shifts the time stamp with time zone value - to the specified time zone, and returns the value without a time zone. - This allows storage and display of values different from the current - TimeZone setting. The third example converts - Tokyo time to Chicago time. The fourth example shifts the time stamp - with time zone value to the time zone currently specified by the - TimeZone setting and returns the value without a - time zone. The fifth example demonstrates that the sign in a POSIX-style - time zone specification has the opposite meaning of the sign in an - ISO-8601 datetime literal, as described in - and . - - - - The sixth example is a cautionary tale. Due to the fact that there is no - date associated with the input value, the conversion is made using the - current date of the session. Therefore, this static example may show a wrong - result depending on the time of the year it is viewed because - 'America/Los_Angeles' observes Daylight Savings Time. - - - - The function timezone(zone, - timestamp) is equivalent to the SQL-conforming construct - timestamp AT TIME ZONE - zone. - - - - The function timezone(zone, - time) is equivalent to the SQL-conforming construct - time AT TIME ZONE - zone. - - - - The function timezone(timestamp) - is equivalent to the SQL-conforming construct timestamp - AT LOCAL. - - - - The function timezone(time) - is equivalent to the SQL-conforming construct time - AT LOCAL. - -
- - - Current Date/Time - - - date - current - - - - time - current - - - - PostgreSQL provides a number of functions - that return values related to the current date and time. These - SQL-standard functions all return values based on the start time of - the current transaction: - -CURRENT_DATE -CURRENT_TIME -CURRENT_TIMESTAMP -CURRENT_TIME(precision) -CURRENT_TIMESTAMP(precision) -LOCALTIME -LOCALTIMESTAMP -LOCALTIME(precision) -LOCALTIMESTAMP(precision) - - - - - CURRENT_TIME and - CURRENT_TIMESTAMP deliver values with time zone; - LOCALTIME and - LOCALTIMESTAMP deliver values without time zone. - - - - CURRENT_TIME, - CURRENT_TIMESTAMP, - LOCALTIME, and - LOCALTIMESTAMP - can optionally take - a precision parameter, which causes the result to be rounded - to that many fractional digits in the seconds field. Without a precision parameter, - the result is given to the full available precision. - - - - Some examples: - -SELECT CURRENT_TIME; -Result: 14:39:53.662522-05 -SELECT CURRENT_DATE; -Result: 2019-12-23 -SELECT CURRENT_TIMESTAMP; -Result: 2019-12-23 14:39:53.662522-05 -SELECT CURRENT_TIMESTAMP(2); -Result: 2019-12-23 14:39:53.66-05 -SELECT LOCALTIMESTAMP; -Result: 2019-12-23 14:39:53.662522 - - - - - Since these functions return - the start time of the current transaction, their values do not - change during the transaction. This is considered a feature: - the intent is to allow a single transaction to have a consistent - notion of the current time, so that multiple - modifications within the same transaction bear the same - time stamp. - - - - - Other database systems might advance these values more - frequently. - - - - - PostgreSQL also provides functions that - return the start time of the current statement, as well as the actual - current time at the instant the function is called. The complete list - of non-SQL-standard time functions is: - -transaction_timestamp() -statement_timestamp() -clock_timestamp() -timeofday() -now() - - - - - transaction_timestamp() is equivalent to - CURRENT_TIMESTAMP, but is named to clearly reflect - what it returns. - statement_timestamp() returns the start time of the current - statement (more specifically, the time of receipt of the latest command - message from the client). - statement_timestamp() and transaction_timestamp() - return the same value during the first statement of a transaction, but might - differ during subsequent statements. - clock_timestamp() returns the actual current time, and - therefore its value changes even within a single SQL statement. - timeofday() is a historical - PostgreSQL function. Like - clock_timestamp(), it returns the actual current time, - but as a formatted text string rather than a timestamp - with time zone value. - now() is a traditional PostgreSQL - equivalent to transaction_timestamp(). - - - - All the date/time data types also accept the special literal value - now to specify the current date and time (again, - interpreted as the transaction start time). Thus, - the following three all return the same result: - -SELECT CURRENT_TIMESTAMP; -SELECT now(); -SELECT TIMESTAMP 'now'; -- but see tip below - - - - - - Do not use the third form when specifying a value to be evaluated later, - for example in a DEFAULT clause for a table column. - The system will convert now - to a timestamp as soon as the constant is parsed, so that when - the default value is needed, - the time of the table creation would be used! The first two - forms will not be evaluated until the default value is used, - because they are function calls. Thus they will give the desired - behavior of defaulting to the time of row insertion. - (See also .) - - - - - - Delaying Execution - - - pg_sleep - - - pg_sleep_for - - - pg_sleep_until - - - sleep - - - delay - - - - The following functions are available to delay execution of the server - process: - -pg_sleep ( double precision ) -pg_sleep_for ( interval ) -pg_sleep_until ( timestamp with time zone ) - - - pg_sleep makes the current session's process - sleep until the given number of seconds have - elapsed. Fractional-second delays can be specified. - pg_sleep_for is a convenience function to - allow the sleep time to be specified as an interval. - pg_sleep_until is a convenience function for when - a specific wake-up time is desired. - For example: - - -SELECT pg_sleep(1.5); -SELECT pg_sleep_for('5 minutes'); -SELECT pg_sleep_until('tomorrow 03:00'); - - - - - - The effective resolution of the sleep interval is platform-specific; - 0.01 seconds is a common value. The sleep delay will be at least as long - as specified. It might be longer depending on factors such as server load. - In particular, pg_sleep_until is not guaranteed to - wake up exactly at the specified time, but it will not wake up any earlier. - - - - - - Make sure that your session does not hold more locks than necessary - when calling pg_sleep or its variants. Otherwise - other sessions might have to wait for your sleeping process, slowing down - the entire system. - - - - -
- - - - Enum Support Functions - - - For enum types (described in ), - there are several functions that allow cleaner programming without - hard-coding particular values of an enum type. - These are listed in . The examples - assume an enum type created as: - - -CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple'); - - - - - - Enum Support Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - enum_first - - enum_first ( anyenum ) - anyenum - - - Returns the first value of the input enum type. - - - enum_first(null::rainbow) - red - - - - - - enum_last - - enum_last ( anyenum ) - anyenum - - - Returns the last value of the input enum type. - - - enum_last(null::rainbow) - purple - - - - - - enum_range - - enum_range ( anyenum ) - anyarray - - - Returns all values of the input enum type in an ordered array. - - - enum_range(null::rainbow) - {red,orange,yellow,&zwsp;green,blue,purple} - - - - - enum_range ( anyenum, anyenum ) - anyarray - - - Returns the range between the two given enum values, as an ordered - array. The values must be from the same enum type. If the first - parameter is null, the result will start with the first value of - the enum type. - If the second parameter is null, the result will end with the last - value of the enum type. - - - enum_range('orange'::rainbow, 'green'::rainbow) - {orange,yellow,green} - - - enum_range(NULL, 'green'::rainbow) - {red,orange,&zwsp;yellow,green} - - - enum_range('orange'::rainbow, NULL) - {orange,yellow,green,&zwsp;blue,purple} - - - - -
- - - Notice that except for the two-argument form of enum_range, - these functions disregard the specific value passed to them; they care - only about its declared data type. Either null or a specific value of - the type can be passed, with the same result. It is more common to - apply these functions to a table column or function argument than to - a hardwired type name as used in the examples. - -
- - - Geometric Functions and Operators - - - The geometric types point, box, - lseg, line, path, - polygon, and circle have a large set of - native support functions and operators, shown in , , and . - - - - Geometric Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - geometric_type + point - geometric_type - - - Adds the coordinates of the second point to those of each - point of the first argument, thus performing translation. - Available for point, box, path, - circle. - - - box '(1,1),(0,0)' + point '(2,0)' - (3,1),(2,0) - - - - - - path + path - path - - - Concatenates two open paths (returns NULL if either path is closed). - - - path '[(0,0),(1,1)]' + path '[(2,2),(3,3),(4,4)]' - [(0,0),(1,1),(2,2),(3,3),(4,4)] - - - - - - geometric_type - point - geometric_type - - - Subtracts the coordinates of the second point from those - of each point of the first argument, thus performing translation. - Available for point, box, path, - circle. - - - box '(1,1),(0,0)' - point '(2,0)' - (-1,1),(-2,0) - - - - - - geometric_type * point - geometric_type - - - Multiplies each point of the first argument by the second - point (treating a point as being a complex number - represented by real and imaginary parts, and performing standard - complex multiplication). If one interprets - the second point as a vector, this is equivalent to - scaling the object's size and distance from the origin by the length - of the vector, and rotating it counterclockwise around the origin by - the vector's angle from the x axis. - Available for point, box,Rotating a - box with these operators only moves its corner points: the box is - still considered to have sides parallel to the axes. Hence the box's - size is not preserved, as a true rotation would do. - path, circle. - - - path '((0,0),(1,0),(1,1))' * point '(3.0,0)' - ((0,0),(3,0),(3,3)) - - - path '((0,0),(1,0),(1,1))' * point(cosd(45), sind(45)) - ((0,0),&zwsp;(0.7071067811865475,0.7071067811865475),&zwsp;(0,1.414213562373095)) - - - - - - geometric_type / point - geometric_type - - - Divides each point of the first argument by the second - point (treating a point as being a complex number - represented by real and imaginary parts, and performing standard - complex division). If one interprets - the second point as a vector, this is equivalent to - scaling the object's size and distance from the origin down by the - length of the vector, and rotating it clockwise around the origin by - the vector's angle from the x axis. - Available for point, box, path, - circle. - - - path '((0,0),(1,0),(1,1))' / point '(2.0,0)' - ((0,0),(0.5,0),(0.5,0.5)) - - - path '((0,0),(1,0),(1,1))' / point(cosd(45), sind(45)) - ((0,0),&zwsp;(0.7071067811865476,-0.7071067811865476),&zwsp;(1.4142135623730951,0)) - - - - - - @-@ geometric_type - double precision - - - Computes the total length. - Available for lseg, path. - - - @-@ path '[(0,0),(1,0),(1,1)]' - 2 - - - - - - @@ geometric_type - point - - - Computes the center point. - Available for box, lseg, - polygon, circle. - - - @@ box '(2,2),(0,0)' - (1,1) - - - - - - # geometric_type - integer - - - Returns the number of points. - Available for path, polygon. - - - # path '((1,0),(0,1),(-1,0))' - 3 - - - - - - geometric_type # geometric_type - point - - - Computes the point of intersection, or NULL if there is none. - Available for lseg, line. - - - lseg '[(0,0),(1,1)]' # lseg '[(1,0),(0,1)]' - (0.5,0.5) - - - - - - box # box - box - - - Computes the intersection of two boxes, or NULL if there is none. - - - box '(2,2),(-1,-1)' # box '(1,1),(-2,-2)' - (1,1),(-1,-1) - - - - - - geometric_type ## geometric_type - point - - - Computes the closest point to the first object on the second object. - Available for these pairs of types: - (point, box), - (point, lseg), - (point, line), - (lseg, box), - (lseg, lseg), - (line, lseg). - - - point '(0,0)' ## lseg '[(2,0),(0,2)]' - (1,1) - - - - - - geometric_type <-> geometric_type - double precision - - - Computes the distance between the objects. - Available for all seven geometric types, for all combinations - of point with another geometric type, and for - these additional pairs of types: - (box, lseg), - (lseg, line), - (polygon, circle) - (and the commutator cases). - - - circle '<(0,0),1>' <-> circle '<(5,0),1>' - 3 - - - - - - geometric_type @> geometric_type - boolean - - - Does first object contain second? - Available for these pairs of types: - (box, point), - (box, box), - (path, point), - (polygon, point), - (polygon, polygon), - (circle, point), - (circle, circle). - - - circle '<(0,0),2>' @> point '(1,1)' - t - - - - - - geometric_type <@ geometric_type - boolean - - - Is first object contained in or on second? - Available for these pairs of types: - (point, box), - (point, lseg), - (point, line), - (point, path), - (point, polygon), - (point, circle), - (box, box), - (lseg, box), - (lseg, line), - (polygon, polygon), - (circle, circle). - - - point '(1,1)' <@ circle '<(0,0),2>' - t - - - - - - geometric_type && geometric_type - boolean - - - Do these objects overlap? (One point in common makes this true.) - Available for box, polygon, - circle. - - - box '(1,1),(0,0)' && box '(2,2),(0,0)' - t - - - - - - geometric_type << geometric_type - boolean - - - Is first object strictly left of second? - Available for point, box, - polygon, circle. - - - circle '<(0,0),1>' << circle '<(5,0),1>' - t - - - - - - geometric_type >> geometric_type - boolean - - - Is first object strictly right of second? - Available for point, box, - polygon, circle. - - - circle '<(5,0),1>' >> circle '<(0,0),1>' - t - - - - - - geometric_type &< geometric_type - boolean - - - Does first object not extend to the right of second? - Available for box, polygon, - circle. - - - box '(1,1),(0,0)' &< box '(2,2),(0,0)' - t - - - - - - geometric_type &> geometric_type - boolean - - - Does first object not extend to the left of second? - Available for box, polygon, - circle. - - - box '(3,3),(0,0)' &> box '(2,2),(0,0)' - t - - - - - - geometric_type <<| geometric_type - boolean - - - Is first object strictly below second? - Available for point, box, polygon, - circle. - - - box '(3,3),(0,0)' <<| box '(5,5),(3,4)' - t - - - - - - geometric_type |>> geometric_type - boolean - - - Is first object strictly above second? - Available for point, box, polygon, - circle. - - - box '(5,5),(3,4)' |>> box '(3,3),(0,0)' - t - - - - - - geometric_type &<| geometric_type - boolean - - - Does first object not extend above second? - Available for box, polygon, - circle. - - - box '(1,1),(0,0)' &<| box '(2,2),(0,0)' - t - - - - - - geometric_type |&> geometric_type - boolean - - - Does first object not extend below second? - Available for box, polygon, - circle. - - - box '(3,3),(0,0)' |&> box '(2,2),(0,0)' - t - - - - - - box <^ box - boolean - - - Is first object below second (allows edges to touch)? - - - box '((1,1),(0,0))' <^ box '((2,2),(1,1))' - t - - - - - - box >^ box - boolean - - - Is first object above second (allows edges to touch)? - - - box '((2,2),(1,1))' >^ box '((1,1),(0,0))' - t - - - - - - geometric_type ?# geometric_type - boolean - - - Do these objects intersect? - Available for these pairs of types: - (box, box), - (lseg, box), - (lseg, lseg), - (lseg, line), - (line, box), - (line, line), - (path, path). - - - lseg '[(-1,0),(1,0)]' ?# box '(2,2),(-2,-2)' - t - - - - - - ?- line - boolean - - - ?- lseg - boolean - - - Is line horizontal? - - - ?- lseg '[(-1,0),(1,0)]' - t - - - - - - point ?- point - boolean - - - Are points horizontally aligned (that is, have same y coordinate)? - - - point '(1,0)' ?- point '(0,0)' - t - - - - - - ?| line - boolean - - - ?| lseg - boolean - - - Is line vertical? - - - ?| lseg '[(-1,0),(1,0)]' - f - - - - - - point ?| point - boolean - - - Are points vertically aligned (that is, have same x coordinate)? - - - point '(0,1)' ?| point '(0,0)' - t - - - - - - line ?-| line - boolean - - - lseg ?-| lseg - boolean - - - Are lines perpendicular? - - - lseg '[(0,0),(0,1)]' ?-| lseg '[(0,0),(1,0)]' - t - - - - - - line ?|| line - boolean - - - lseg ?|| lseg - boolean - - - Are lines parallel? - - - lseg '[(-1,0),(1,0)]' ?|| lseg '[(-1,2),(1,2)]' - t - - - - - - geometric_type ~= geometric_type - boolean - - - Are these objects the same? - Available for point, box, - polygon, circle. - - - polygon '((0,0),(1,1))' ~= polygon '((1,1),(0,0))' - t - - - - -
- - - - Note that the same as operator, ~=, - represents the usual notion of equality for the point, - box, polygon, and circle types. - Some of the geometric types also have an = operator, but - = compares for equal areas only. - The other scalar comparison operators (<= and so - on), where available for these types, likewise compare areas. - - - - - - Before PostgreSQL 14, the point - is strictly below/above comparison operators point - <<| point and point - |>> point were respectively - called <^ and >^. These - names are still available, but are deprecated and will eventually be - removed. - - - - - Geometric Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - area - - area ( geometric_type ) - double precision - - - Computes area. - Available for box, path, circle. - A path input must be closed, else NULL is returned. - Also, if the path is self-intersecting, the result may be - meaningless. - - - area(box '(2,2),(0,0)') - 4 - - - - - - - center - - center ( geometric_type ) - point - - - Computes center point. - Available for box, circle. - - - center(box '(1,2),(0,0)') - (0.5,1) - - - - - - - diagonal - - diagonal ( box ) - lseg - - - Extracts box's diagonal as a line segment - (same as lseg(box)). - - - diagonal(box '(1,2),(0,0)') - [(1,2),(0,0)] - - - - - - - diameter - - diameter ( circle ) - double precision - - - Computes diameter of circle. - - - diameter(circle '<(0,0),2>') - 4 - - - - - - - height - - height ( box ) - double precision - - - Computes vertical size of box. - - - height(box '(1,2),(0,0)') - 2 - - - - - - - isclosed - - isclosed ( path ) - boolean - - - Is path closed? - - - isclosed(path '((0,0),(1,1),(2,0))') - t - - - - - - - isopen - - isopen ( path ) - boolean - - - Is path open? - - - isopen(path '[(0,0),(1,1),(2,0)]') - t - - - - - - - length - - length ( geometric_type ) - double precision - - - Computes the total length. - Available for lseg, path. - - - length(path '((-1,0),(1,0))') - 4 - - - - - - - npoints - - npoints ( geometric_type ) - integer - - - Returns the number of points. - Available for path, polygon. - - - npoints(path '[(0,0),(1,1),(2,0)]') - 3 - - - - - - - pclose - - pclose ( path ) - path - - - Converts path to closed form. - - - pclose(path '[(0,0),(1,1),(2,0)]') - ((0,0),(1,1),(2,0)) - - - - - - - popen - - popen ( path ) - path - - - Converts path to open form. - - - popen(path '((0,0),(1,1),(2,0))') - [(0,0),(1,1),(2,0)] - - - - - - - radius - - radius ( circle ) - double precision - - - Computes radius of circle. - - - radius(circle '<(0,0),2>') - 2 - - - - - - - slope - - slope ( point, point ) - double precision - - - Computes slope of a line drawn through the two points. - - - slope(point '(0,0)', point '(2,1)') - 0.5 - - - - - - - width - - width ( box ) - double precision - - - Computes horizontal size of box. - - - width(box '(1,2),(0,0)') - 1 - - - - -
- - - Geometric Type Conversion Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - box - - box ( circle ) - box - - - Computes box inscribed within the circle. - - - box(circle '<(0,0),2>') - (1.414213562373095,1.414213562373095),&zwsp;(-1.414213562373095,-1.414213562373095) - - - - - - box ( point ) - box - - - Converts point to empty box. - - - box(point '(1,0)') - (1,0),(1,0) - - - - - - box ( point, point ) - box - - - Converts any two corner points to box. - - - box(point '(0,1)', point '(1,0)') - (1,1),(0,0) - - - - - - box ( polygon ) - box - - - Computes bounding box of polygon. - - - box(polygon '((0,0),(1,1),(2,0))') - (2,1),(0,0) - - - - - - - bound_box - - bound_box ( box, box ) - box - - - Computes bounding box of two boxes. - - - bound_box(box '(1,1),(0,0)', box '(4,4),(3,3)') - (4,4),(0,0) - - - - - - - circle - - circle ( box ) - circle - - - Computes smallest circle enclosing box. - - - circle(box '(1,1),(0,0)') - <(0.5,0.5),0.7071067811865476> - - - - - - circle ( point, double precision ) - circle - - - Constructs circle from center and radius. - - - circle(point '(0,0)', 2.0) - <(0,0),2> - - - - - - circle ( polygon ) - circle - - - Converts polygon to circle. The circle's center is the mean of the - positions of the polygon's points, and the radius is the average - distance of the polygon's points from that center. - - - circle(polygon '((0,0),(1,3),(2,0))') - <(1,1),1.6094757082487299> - - - - - - - line - - line ( point, point ) - line - - - Converts two points to the line through them. - - - line(point '(-1,0)', point '(1,0)') - {0,-1,0} - - - - - - - lseg - - lseg ( box ) - lseg - - - Extracts box's diagonal as a line segment. - - - lseg(box '(1,0),(-1,0)') - [(1,0),(-1,0)] - - - - - - lseg ( point, point ) - lseg - - - Constructs line segment from two endpoints. - - - lseg(point '(-1,0)', point '(1,0)') - [(-1,0),(1,0)] - - - - - - - path - - path ( polygon ) - path - - - Converts polygon to a closed path with the same list of points. - - - path(polygon '((0,0),(1,1),(2,0))') - ((0,0),(1,1),(2,0)) - - - - - - - point - - point ( double precision, double precision ) - point - - - Constructs point from its coordinates. - - - point(23.4, -44.5) - (23.4,-44.5) - - - - - - point ( box ) - point - - - Computes center of box. - - - point(box '(1,0),(-1,0)') - (0,0) - - - - - - point ( circle ) - point - - - Computes center of circle. - - - point(circle '<(0,0),2>') - (0,0) - - - - - - point ( lseg ) - point - - - Computes center of line segment. - - - point(lseg '[(-1,0),(1,0)]') - (0,0) - - - - - - point ( polygon ) - point - - - Computes center of polygon (the mean of the - positions of the polygon's points). - - - point(polygon '((0,0),(1,1),(2,0))') - (1,0.3333333333333333) - - - - - - - polygon - - polygon ( box ) - polygon - - - Converts box to a 4-point polygon. - - - polygon(box '(1,1),(0,0)') - ((0,0),(0,1),(1,1),(1,0)) - - - - - - polygon ( circle ) - polygon - - - Converts circle to a 12-point polygon. - - - polygon(circle '<(0,0),2>') - ((-2,0),&zwsp;(-1.7320508075688774,0.9999999999999999),&zwsp;(-1.0000000000000002,1.7320508075688772),&zwsp;(-1.2246063538223773e-16,2),&zwsp;(0.9999999999999996,1.7320508075688774),&zwsp;(1.732050807568877,1.0000000000000007),&zwsp;(2,2.4492127076447545e-16),&zwsp;(1.7320508075688776,-0.9999999999999994),&zwsp;(1.0000000000000009,-1.7320508075688767),&zwsp;(3.673819061467132e-16,-2),&zwsp;(-0.9999999999999987,-1.732050807568878),&zwsp;(-1.7320508075688767,-1.0000000000000009)) - - - - - - polygon ( integer, circle ) - polygon - - - Converts circle to an n-point polygon. - - - polygon(4, circle '<(3,0),1>') - ((2,0),&zwsp;(3,1),&zwsp;(4,1.2246063538223773e-16),&zwsp;(3,-1)) - - - - - - polygon ( path ) - polygon - - - Converts closed path to a polygon with the same list of points. - - - polygon(path '((0,0),(1,1),(2,0))') - ((0,0),(1,1),(2,0)) - - - - - -
- - - It is possible to access the two component numbers of a point - as though the point were an array with indexes 0 and 1. For example, if - t.p is a point column then - SELECT p[0] FROM t retrieves the X coordinate and - UPDATE t SET p[1] = ... changes the Y coordinate. - In the same way, a value of type box or lseg can be treated - as an array of two point values. - - -
- - - - Network Address Functions and Operators - - - The IP network address types, cidr and inet, - support the usual comparison operators shown in - - as well as the specialized operators and functions shown in - and - . - - - - Any cidr value can be cast to inet implicitly; - therefore, the operators and functions shown below as operating on - inet also work on cidr values. (Where there are - separate functions for inet and cidr, it is - because the behavior should be different for the two cases.) - Also, it is permitted to cast an inet value - to cidr. When this is done, any bits to the right of the - netmask are silently zeroed to create a valid cidr value. - - - - IP Address Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - inet << inet - boolean - - - Is subnet strictly contained by subnet? - This operator, and the next four, test for subnet inclusion. They - consider only the network parts of the two addresses (ignoring any - bits to the right of the netmasks) and determine whether one network - is identical to or a subnet of the other. - - - inet '192.168.1.5' << inet '192.168.1/24' - t - - - inet '192.168.0.5' << inet '192.168.1/24' - f - - - inet '192.168.1/24' << inet '192.168.1/24' - f - - - - - - inet <<= inet - boolean - - - Is subnet contained by or equal to subnet? - - - inet '192.168.1/24' <<= inet '192.168.1/24' - t - - - - - - inet >> inet - boolean - - - Does subnet strictly contain subnet? - - - inet '192.168.1/24' >> inet '192.168.1.5' - t - - - - - - inet >>= inet - boolean - - - Does subnet contain or equal subnet? - - - inet '192.168.1/24' >>= inet '192.168.1/24' - t - - - - - - inet && inet - boolean - - - Does either subnet contain or equal the other? - - - inet '192.168.1/24' && inet '192.168.1.80/28' - t - - - inet '192.168.1/24' && inet '192.168.2.0/28' - f - - - - - - ~ inet - inet - - - Computes bitwise NOT. - - - ~ inet '192.168.1.6' - 63.87.254.249 - - - - - - inet & inet - inet - - - Computes bitwise AND. - - - inet '192.168.1.6' & inet '0.0.0.255' - 0.0.0.6 - - - - - - inet | inet - inet - - - Computes bitwise OR. - - - inet '192.168.1.6' | inet '0.0.0.255' - 192.168.1.255 - - - - - - inet + bigint - inet - - - Adds an offset to an address. - - - inet '192.168.1.6' + 25 - 192.168.1.31 - - - - - - bigint + inet - inet - - - Adds an offset to an address. - - - 200 + inet '::ffff:fff0:1' - ::ffff:255.240.0.201 - - - - - - inet - bigint - inet - - - Subtracts an offset from an address. - - - inet '192.168.1.43' - 36 - 192.168.1.7 - - - - - - inet - inet - bigint - - - Computes the difference of two addresses. - - - inet '192.168.1.43' - inet '192.168.1.19' - 24 - - - inet '::1' - inet '::ffff:1' - -4294901760 - - - - -
- - - IP Address Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - abbrev - - abbrev ( inet ) - text - - - Creates an abbreviated display format as text. - (The result is the same as the inet output function - produces; it is abbreviated only in comparison to the - result of an explicit cast to text, which for historical - reasons will never suppress the netmask part.) - - - abbrev(inet '10.1.0.0/32') - 10.1.0.0 - - - - - - abbrev ( cidr ) - text - - - Creates an abbreviated display format as text. - (The abbreviation consists of dropping all-zero octets to the right - of the netmask; more examples are in - .) - - - abbrev(cidr '10.1.0.0/16') - 10.1/16 - - - - - - - broadcast - - broadcast ( inet ) - inet - - - Computes the broadcast address for the address's network. - - - broadcast(inet '192.168.1.5/24') - 192.168.1.255/24 - - - - - - - family - - family ( inet ) - integer - - - Returns the address's family: 4 for IPv4, - 6 for IPv6. - - - family(inet '::1') - 6 - - - - - - - host - - host ( inet ) - text - - - Returns the IP address as text, ignoring the netmask. - - - host(inet '192.168.1.0/24') - 192.168.1.0 - - - - - - - hostmask - - hostmask ( inet ) - inet - - - Computes the host mask for the address's network. - - - hostmask(inet '192.168.23.20/30') - 0.0.0.3 - - - - - - - inet_merge - - inet_merge ( inet, inet ) - cidr - - - Computes the smallest network that includes both of the given networks. - - - inet_merge(inet '192.168.1.5/24', inet '192.168.2.5/24') - 192.168.0.0/22 - - - - - - - inet_same_family - - inet_same_family ( inet, inet ) - boolean - - - Tests whether the addresses belong to the same IP family. - - - inet_same_family(inet '192.168.1.5/24', inet '::1') - f - - - - - - - masklen - - masklen ( inet ) - integer - - - Returns the netmask length in bits. - - - masklen(inet '192.168.1.5/24') - 24 - - - - - - - netmask - - netmask ( inet ) - inet - - - Computes the network mask for the address's network. - - - netmask(inet '192.168.1.5/24') - 255.255.255.0 - - - - - - - network - - network ( inet ) - cidr - - - Returns the network part of the address, zeroing out - whatever is to the right of the netmask. - (This is equivalent to casting the value to cidr.) - - - network(inet '192.168.1.5/24') - 192.168.1.0/24 - - - - - - - set_masklen - - set_masklen ( inet, integer ) - inet - - - Sets the netmask length for an inet value. - The address part does not change. - - - set_masklen(inet '192.168.1.5/24', 16) - 192.168.1.5/16 - - - - - - set_masklen ( cidr, integer ) - cidr - - - Sets the netmask length for a cidr value. - Address bits to the right of the new netmask are set to zero. - - - set_masklen(cidr '192.168.1.0/24', 16) - 192.168.0.0/16 - - - - - - - text - - text ( inet ) - text - - - Returns the unabbreviated IP address and netmask length as text. - (This has the same result as an explicit cast to text.) - - - text(inet '192.168.1.5') - 192.168.1.5/32 - - - - -
- - - - The abbrev, host, - and text functions are primarily intended to offer - alternative display formats for IP addresses. - - - - - The MAC address types, macaddr and macaddr8, - support the usual comparison operators shown in - - as well as the specialized functions shown in - . - In addition, they support the bitwise logical operators - ~, & and | - (NOT, AND and OR), just as shown above for IP addresses. - - - - MAC Address Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - trunc - - trunc ( macaddr ) - macaddr - - - Sets the last 3 bytes of the address to zero. The remaining prefix - can be associated with a particular manufacturer (using data not - included in PostgreSQL). - - - trunc(macaddr '12:34:56:78:90:ab') - 12:34:56:00:00:00 - - - - - - trunc ( macaddr8 ) - macaddr8 - - - Sets the last 5 bytes of the address to zero. The remaining prefix - can be associated with a particular manufacturer (using data not - included in PostgreSQL). - - - trunc(macaddr8 '12:34:56:78:90:ab:cd:ef') - 12:34:56:00:00:00:00:00 - - - - - - - macaddr8_set7bit - - macaddr8_set7bit ( macaddr8 ) - macaddr8 - - - Sets the 7th bit of the address to one, creating what is known as - modified EUI-64, for inclusion in an IPv6 address. - - - macaddr8_set7bit(macaddr8 '00:34:56:ab:cd:ef') - 02:34:56:ff:fe:ab:cd:ef - - - - -
- -
- - - - Text Search Functions and Operators - - - full text search - functions and operators - - - - text search - functions and operators - - - - , - and - - summarize the functions and operators that are provided - for full text searching. See for a detailed - explanation of PostgreSQL's text search - facility. - - - - Text Search Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - tsvector @@ tsquery - boolean - - - tsquery @@ tsvector - boolean - - - Does tsvector match tsquery? - (The arguments can be given in either order.) - - - to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') - t - - - - - - text @@ tsquery - boolean - - - Does text string, after implicit invocation - of to_tsvector(), match tsquery? - - - 'fat cats ate rats' @@ to_tsquery('cat & rat') - t - - - - - - tsvector || tsvector - tsvector - - - Concatenates two tsvectors. If both inputs contain - lexeme positions, the second input's positions are adjusted - accordingly. - - - 'a:1 b:2'::tsvector || 'c:1 d:2 b:3'::tsvector - 'a':1 'b':2,5 'c':3 'd':4 - - - - - - tsquery && tsquery - tsquery - - - ANDs two tsquerys together, producing a query that - matches documents that match both input queries. - - - 'fat | rat'::tsquery && 'cat'::tsquery - ( 'fat' | 'rat' ) & 'cat' - - - - - - tsquery || tsquery - tsquery - - - ORs two tsquerys together, producing a query that - matches documents that match either input query. - - - 'fat | rat'::tsquery || 'cat'::tsquery - 'fat' | 'rat' | 'cat' - - - - - - !! tsquery - tsquery - - - Negates a tsquery, producing a query that matches - documents that do not match the input query. - - - !! 'cat'::tsquery - !'cat' - - - - - - tsquery <-> tsquery - tsquery - - - Constructs a phrase query, which matches if the two input queries - match at successive lexemes. - - - to_tsquery('fat') <-> to_tsquery('rat') - 'fat' <-> 'rat' - - - - - - tsquery @> tsquery - boolean - - - Does first tsquery contain the second? (This considers - only whether all the lexemes appearing in one query appear in the - other, ignoring the combining operators.) - - - 'cat'::tsquery @> 'cat & rat'::tsquery - f - - - - - - tsquery <@ tsquery - boolean - - - Is first tsquery contained in the second? (This - considers only whether all the lexemes appearing in one query appear - in the other, ignoring the combining operators.) - - - 'cat'::tsquery <@ 'cat & rat'::tsquery - t - - - 'cat'::tsquery <@ '!cat & rat'::tsquery - t - - - - -
- - - In addition to these specialized operators, the usual comparison - operators shown in are - available for types tsvector and tsquery. - These are not very - useful for text searching but allow, for example, unique indexes to be - built on columns of these types. - - - - Text Search Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - array_to_tsvector - - array_to_tsvector ( text[] ) - tsvector - - - Converts an array of text strings to a tsvector. - The given strings are used as lexemes as-is, without further - processing. Array elements must not be empty strings - or NULL. - - - array_to_tsvector('{fat,cat,rat}'::text[]) - 'cat' 'fat' 'rat' - - - - - - - get_current_ts_config - - get_current_ts_config ( ) - regconfig - - - Returns the OID of the current default text search configuration - (as set by ). - - - get_current_ts_config() - english - - - - - - - length - - length ( tsvector ) - integer - - - Returns the number of lexemes in the tsvector. - - - length('fat:2,4 cat:3 rat:5A'::tsvector) - 3 - - - - - - - numnode - - numnode ( tsquery ) - integer - - - Returns the number of lexemes plus operators in - the tsquery. - - - numnode('(fat & rat) | cat'::tsquery) - 5 - - - - - - - plainto_tsquery - - plainto_tsquery ( - config regconfig, - query text ) - tsquery - - - Converts text to a tsquery, normalizing words according to - the specified or default configuration. Any punctuation in the string - is ignored (it does not determine query operators). The resulting - query matches documents containing all non-stopwords in the text. - - - plainto_tsquery('english', 'The Fat Rats') - 'fat' & 'rat' - - - - - - - phraseto_tsquery - - phraseto_tsquery ( - config regconfig, - query text ) - tsquery - - - Converts text to a tsquery, normalizing words according to - the specified or default configuration. Any punctuation in the string - is ignored (it does not determine query operators). The resulting - query matches phrases containing all non-stopwords in the text. - - - phraseto_tsquery('english', 'The Fat Rats') - 'fat' <-> 'rat' - - - phraseto_tsquery('english', 'The Cat and Rats') - 'cat' <2> 'rat' - - - - - - - websearch_to_tsquery - - websearch_to_tsquery ( - config regconfig, - query text ) - tsquery - - - Converts text to a tsquery, normalizing words according - to the specified or default configuration. Quoted word sequences are - converted to phrase tests. The word or is understood - as producing an OR operator, and a dash produces a NOT operator; - other punctuation is ignored. - This approximates the behavior of some common web search tools. - - - websearch_to_tsquery('english', '"fat rat" or cat dog') - 'fat' <-> 'rat' | 'cat' & 'dog' - - - - - - - querytree - - querytree ( tsquery ) - text - - - Produces a representation of the indexable portion of - a tsquery. A result that is empty or - just T indicates a non-indexable query. - - - querytree('foo & ! bar'::tsquery) - 'foo' - - - - - - - setweight - - setweight ( vector tsvector, weight "char" ) - tsvector - - - Assigns the specified weight to each element - of the vector. - - - setweight('fat:2,4 cat:3 rat:5B'::tsvector, 'A') - 'cat':3A 'fat':2A,4A 'rat':5A - - - - - - - setweight - setweight for specific lexeme(s) - - setweight ( vector tsvector, weight "char", lexemes text[] ) - tsvector - - - Assigns the specified weight to elements - of the vector that are listed - in lexemes. - The strings in lexemes are taken as lexemes - as-is, without further processing. Strings that do not match any - lexeme in vector are ignored. - - - setweight('fat:2,4 cat:3 rat:5,6B'::tsvector, 'A', '{cat,rat}') - 'cat':3A 'fat':2,4 'rat':5A,6A - - - - - - - strip - - strip ( tsvector ) - tsvector - - - Removes positions and weights from the tsvector. - - - strip('fat:2,4 cat:3 rat:5A'::tsvector) - 'cat' 'fat' 'rat' - - - - - - - to_tsquery - - to_tsquery ( - config regconfig, - query text ) - tsquery - - - Converts text to a tsquery, normalizing words according to - the specified or default configuration. The words must be combined - by valid tsquery operators. - - - to_tsquery('english', 'The & Fat & Rats') - 'fat' & 'rat' - - - - - - - to_tsvector - - to_tsvector ( - config regconfig, - document text ) - tsvector - - - Converts text to a tsvector, normalizing words according - to the specified or default configuration. Position information is - included in the result. - - - to_tsvector('english', 'The Fat Rats') - 'fat':2 'rat':3 - - - - - - to_tsvector ( - config regconfig, - document json ) - tsvector - - - to_tsvector ( - config regconfig, - document jsonb ) - tsvector - - - Converts each string value in the JSON document to - a tsvector, normalizing words according to the specified - or default configuration. The results are then concatenated in - document order to produce the output. Position information is - generated as though one stopword exists between each pair of string - values. (Beware that document order of the fields of a - JSON object is implementation-dependent when the input - is jsonb; observe the difference in the examples.) - - - to_tsvector('english', '{"aa": "The Fat Rats", "b": "dog"}'::json) - 'dog':5 'fat':2 'rat':3 - - - to_tsvector('english', '{"aa": "The Fat Rats", "b": "dog"}'::jsonb) - 'dog':1 'fat':4 'rat':5 - - - - - - - json_to_tsvector - - json_to_tsvector ( - config regconfig, - document json, - filter jsonb ) - tsvector - - - - jsonb_to_tsvector - - jsonb_to_tsvector ( - config regconfig, - document jsonb, - filter jsonb ) - tsvector - - - Selects each item in the JSON document that is requested by - the filter and converts each one to - a tsvector, normalizing words according to the specified - or default configuration. The results are then concatenated in - document order to produce the output. Position information is - generated as though one stopword exists between each pair of selected - items. (Beware that document order of the fields of a - JSON object is implementation-dependent when the input - is jsonb.) - The filter must be a jsonb - array containing zero or more of these keywords: - "string" (to include all string values), - "numeric" (to include all numeric values), - "boolean" (to include all boolean values), - "key" (to include all keys), or - "all" (to include all the above). - As a special case, the filter can also be a - simple JSON value that is one of these keywords. - - - json_to_tsvector('english', '{"a": "The Fat Rats", "b": 123}'::json, '["string", "numeric"]') - '123':5 'fat':2 'rat':3 - - - json_to_tsvector('english', '{"cat": "The Fat Rats", "dog": 123}'::json, '"all"') - '123':9 'cat':1 'dog':7 'fat':4 'rat':5 - - - - - - - ts_delete - - ts_delete ( vector tsvector, lexeme text ) - tsvector - - - Removes any occurrence of the given lexeme - from the vector. - The lexeme string is treated as a lexeme as-is, - without further processing. - - - ts_delete('fat:2,4 cat:3 rat:5A'::tsvector, 'fat') - 'cat':3 'rat':5A - - - - - - ts_delete ( vector tsvector, lexemes text[] ) - tsvector - - - Removes any occurrences of the lexemes - in lexemes - from the vector. - The strings in lexemes are taken as lexemes - as-is, without further processing. Strings that do not match any - lexeme in vector are ignored. - - - ts_delete('fat:2,4 cat:3 rat:5A'::tsvector, ARRAY['fat','rat']) - 'cat':3 - - - - - - - ts_filter - - ts_filter ( vector tsvector, weights "char"[] ) - tsvector - - - Selects only elements with the given weights - from the vector. - - - ts_filter('fat:2,4 cat:3b,7c rat:5A'::tsvector, '{a,b}') - 'cat':3B 'rat':5A - - - - - - - ts_headline - - ts_headline ( - config regconfig, - document text, - query tsquery - , options text ) - text - - - Displays, in an abbreviated form, the match(es) for - the query in - the document, which must be raw text not - a tsvector. Words in the document are normalized - according to the specified or default configuration before matching to - the query. Use of this function is discussed in - , which also describes the - available options. - - - ts_headline('The fat cat ate the rat.', 'cat') - The fat <b>cat</b> ate the rat. - - - - - - ts_headline ( - config regconfig, - document json, - query tsquery - , options text ) - text - - - ts_headline ( - config regconfig, - document jsonb, - query tsquery - , options text ) - text - - - Displays, in an abbreviated form, match(es) for - the query that occur in string values - within the JSON document. - See for more details. - - - ts_headline('{"cat":"raining cats and dogs"}'::jsonb, 'cat') - {"cat": "raining <b>cats</b> and dogs"} - - - - - - - ts_rank - - ts_rank ( - weights real[], - vector tsvector, - query tsquery - , normalization integer ) - real - - - Computes a score showing how well - the vector matches - the query. See - for details. - - - ts_rank(to_tsvector('raining cats and dogs'), 'cat') - 0.06079271 - - - - - - - ts_rank_cd - - ts_rank_cd ( - weights real[], - vector tsvector, - query tsquery - , normalization integer ) - real - - - Computes a score showing how well - the vector matches - the query, using a cover density - algorithm. See for details. - - - ts_rank_cd(to_tsvector('raining cats and dogs'), 'cat') - 0.1 - - - - - - - ts_rewrite - - ts_rewrite ( query tsquery, - target tsquery, - substitute tsquery ) - tsquery - - - Replaces occurrences of target - with substitute - within the query. - See for details. - - - ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'foo|bar'::tsquery) - 'b' & ( 'foo' | 'bar' ) - - - - - - ts_rewrite ( query tsquery, - select text ) - tsquery - - - Replaces portions of the query according to - target(s) and substitute(s) obtained by executing - a SELECT command. - See for details. - - - SELECT ts_rewrite('a & b'::tsquery, 'SELECT t,s FROM aliases') - 'b' & ( 'foo' | 'bar' ) - - - - - - - tsquery_phrase - - tsquery_phrase ( query1 tsquery, query2 tsquery ) - tsquery - - - Constructs a phrase query that searches - for matches of query1 - and query2 at successive lexemes (same - as <-> operator). - - - tsquery_phrase(to_tsquery('fat'), to_tsquery('cat')) - 'fat' <-> 'cat' - - - - - - tsquery_phrase ( query1 tsquery, query2 tsquery, distance integer ) - tsquery - - - Constructs a phrase query that searches - for matches of query1 and - query2 that occur exactly - distance lexemes apart. - - - tsquery_phrase(to_tsquery('fat'), to_tsquery('cat'), 10) - 'fat' <10> 'cat' - - - - - - - tsvector_to_array - - tsvector_to_array ( tsvector ) - text[] - - - Converts a tsvector to an array of lexemes. - - - tsvector_to_array('fat:2,4 cat:3 rat:5A'::tsvector) - {cat,fat,rat} - - - - - - - unnest - for tsvector - - unnest ( tsvector ) - setof record - ( lexeme text, - positions smallint[], - weights text ) - - - Expands a tsvector into a set of rows, one per lexeme. - - - select * from unnest('cat:3 fat:2,4 rat:5A'::tsvector) - - - lexeme | positions | weights ---------+-----------+--------- - cat | {3} | {D} - fat | {2,4} | {D,D} - rat | {5} | {A} - - - - - -
- - - - All the text search functions that accept an optional regconfig - argument will use the configuration specified by - - when that argument is omitted. - - - - - The functions in - - are listed separately because they are not usually used in everyday text - searching operations. They are primarily helpful for development and - debugging of new text search configurations. - - - - Text Search Debugging Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - ts_debug - - ts_debug ( - config regconfig, - document text ) - setof record - ( alias text, - description text, - token text, - dictionaries regdictionary[], - dictionary regdictionary, - lexemes text[] ) - - - Extracts and normalizes tokens from - the document according to the specified or - default text search configuration, and returns information about how - each token was processed. - See for details. - - - ts_debug('english', 'The Brightest supernovaes') - (asciiword,"Word, all ASCII",The,{english_stem},english_stem,{}) ... - - - - - - - ts_lexize - - ts_lexize ( dict regdictionary, token text ) - text[] - - - Returns an array of replacement lexemes if the input token is known to - the dictionary, or an empty array if the token is known to the - dictionary but it is a stop word, or NULL if it is not a known word. - See for details. - - - ts_lexize('english_stem', 'stars') - {star} - - - - - - - ts_parse - - ts_parse ( parser_name text, - document text ) - setof record - ( tokid integer, - token text ) - - - Extracts tokens from the document using the - named parser. - See for details. - - - ts_parse('default', 'foo - bar') - (1,foo) ... - - - - - - ts_parse ( parser_oid oid, - document text ) - setof record - ( tokid integer, - token text ) - - - Extracts tokens from the document using a - parser specified by OID. - See for details. - - - ts_parse(3722, 'foo - bar') - (1,foo) ... - - - - - - - ts_token_type - - ts_token_type ( parser_name text ) - setof record - ( tokid integer, - alias text, - description text ) - - - Returns a table that describes each type of token the named parser can - recognize. - See for details. - - - ts_token_type('default') - (1,asciiword,"Word, all ASCII") ... - - - - - - ts_token_type ( parser_oid oid ) - setof record - ( tokid integer, - alias text, - description text ) - - - Returns a table that describes each type of token a parser specified - by OID can recognize. - See for details. - - - ts_token_type(3722) - (1,asciiword,"Word, all ASCII") ... - - - - - - - ts_stat - - ts_stat ( sqlquery text - , weights text ) - setof record - ( word text, - ndoc integer, - nentry integer ) - - - Executes the sqlquery, which must return a - single tsvector column, and returns statistics about each - distinct lexeme contained in the data. - See for details. - - - ts_stat('SELECT vector FROM apod') - (foo,10,15) ... - - - - -
- -
- - - UUID Functions - - - UUID - generating - - - - gen_random_uuid - - - - uuidv4 - - - - uuidv7 - - - - uuid_extract_timestamp - - - - uuid_extract_version - - - - shows the PostgreSQL - functions that can be used to generate UUIDs. - - - - <acronym>UUID</acronym> Generation Functions - - - - - - Function - - - Description - - - Example(s) - - - - - - - - - - gen_random_uuid - uuid - - - uuidv4 - uuid - - - Generate a version 4 (random) UUID. - - - gen_random_uuid() - 5b30857f-0bfa-48b5-ac0b-5c64e28078d1 - - - uuidv4() - b42410ee-132f-42ee-9e4f-09a6485c95b8 - - - - - - - uuidv7 - ( shift interval ) - uuid - - - Generate a version 7 (time-ordered) UUID. The timestamp is computed using UNIX timestamp - with millisecond precision + sub-millisecond timestamp + random. The optional parameter - shift will shift the computed timestamp by the given interval. - - - uuidv7() - 019535d9-3df7-79fb-b466-fa907fa17f9e - - - - - -
- - - - The module provides additional functions that - implement other standard algorithms for generating UUIDs. - - - - - shows the PostgreSQL - functions that can be used to extract information from UUIDs. - - - - <acronym>UUID</acronym> Extraction Functions - - - - - - Function - - - Description - - - Example(s) - - - - - - - - - - uuid_extract_timestamp - ( uuid ) - timestamp with time zone - - - Extracts a timestamp with time zone from UUID - version 1 and 7. For other versions, this function returns null. Note that - the extracted timestamp is not necessarily exactly equal to the time the - UUID was generated; this depends on the implementation that generated the - UUID. - - - uuid_extract_timestamp('019535d9-3df7-79fb-b466-&zwsp;fa907fa17f9e'::uuid) - 2025-02-23 21:46:24.503-05 - - - - - - - uuid_extract_version - ( uuid ) - smallint - - - Extracts the version from a UUID of the variant described by - RFC 9562. For - other variants, this function returns null. For example, for a UUID - generated by gen_random_uuid, this function will - return 4. - - - uuid_extract_version('41db1265-8bc1-4ab3-992f-&zwsp;885799a4af1d'::uuid) - 4 - - - uuid_extract_version('019535d9-3df7-79fb-b466-&zwsp;fa907fa17f9e'::uuid) - 7 - - - - - -
- - - PostgreSQL also provides the usual comparison - operators shown in for - UUIDs. - - - See for details on the data type - uuid in PostgreSQL. - -
- - - - XML Functions - - - XML Functions - - - - The functions and function-like expressions described in this - section operate on values of type xml. See for information about the xml - type. The function-like expressions xmlparse - and xmlserialize for converting to and from - type xml are documented there, not in this section. - - - - Use of most of these functions - requires PostgreSQL to have been built - with configure --with-libxml. - - - - Producing XML Content - - - A set of functions and function-like expressions is available for - producing XML content from SQL data. As such, they are - particularly suitable for formatting query results into XML - documents for processing in client applications. - - - - <literal>xmltext</literal> - - - xmltext - - - -xmltext ( text ) xml - - - - The function xmltext returns an XML value with a single - text node containing the input argument as its content. Predefined entities - like ampersand (), left and right angle brackets - (]]>), and quotation marks () - are escaped. - - - - Example: -'); - xmltext -------------------------- - < foo & bar > -]]> - - - - - <literal>xmlcomment</literal> - - - xmlcomment - - - -xmlcomment ( text ) xml - - - - The function xmlcomment creates an XML value - containing an XML comment with the specified text as content. - The text cannot contain -- or end with a - -, otherwise the resulting construct - would not be a valid XML comment. - If the argument is null, the result is null. - - - - Example: - -]]> - - - - - <literal>xmlconcat</literal> - - - xmlconcat - - - -xmlconcat ( xml , ... ) xml - - - - The function xmlconcat concatenates a list - of individual XML values to create a single value containing an - XML content fragment. Null values are omitted; the result is - only null if there are no nonnull arguments. - - - - Example: -', 'foo'); - - xmlconcat ----------------------- - foo -]]> - - - - XML declarations, if present, are combined as follows. If all - argument values have the same XML version declaration, that - version is used in the result, else no version is used. If all - argument values have the standalone declaration value - yes, then that value is used in the result. If - all argument values have a standalone declaration value and at - least one is no, then that is used in the result. - Else the result will have no standalone declaration. If the - result is determined to require a standalone declaration but no - version declaration, a version declaration with version 1.0 will - be used because XML requires an XML declaration to contain a - version declaration. Encoding declarations are ignored and - removed in all cases. - - - - Example: -', ''); - - xmlconcat ------------------------------------ - -]]> - - - - - <literal>xmlelement</literal> - - - xmlelement - - - -xmlelement ( NAME name , XMLATTRIBUTES ( attvalue AS attname , ... ) , content , ... ) xml - - - - The xmlelement expression produces an XML - element with the given name, attributes, and content. - The name - and attname items shown in the syntax are - simple identifiers, not values. The attvalue - and content items are expressions, which can - yield any PostgreSQL data type. The - argument(s) within XMLATTRIBUTES generate attributes - of the XML element; the content value(s) are - concatenated to form its content. - - - - Examples: - - -SELECT xmlelement(name foo, xmlattributes('xyz' as bar)); - - xmlelement ------------------- - - -SELECT xmlelement(name foo, xmlattributes(current_date as bar), 'cont', 'ent'); - - xmlelement -------------------------------------- - content -]]> - - - - Element and attribute names that are not valid XML names are - escaped by replacing the offending characters by the sequence - _xHHHH_, where - HHHH is the character's Unicode - codepoint in hexadecimal notation. For example: - -]]> - - - - An explicit attribute name need not be specified if the attribute - value is a column reference, in which case the column's name will - be used as the attribute name by default. In other cases, the - attribute must be given an explicit name. So this example is - valid: - -CREATE TABLE test (a xml, b xml); -SELECT xmlelement(name test, xmlattributes(a, b)) FROM test; - - But these are not: - -SELECT xmlelement(name test, xmlattributes('constant'), a, b) FROM test; -SELECT xmlelement(name test, xmlattributes(func(a, b))) FROM test; - - - - - Element content, if specified, will be formatted according to - its data type. If the content is itself of type xml, - complex XML documents can be constructed. For example: - -]]> - - Content of other types will be formatted into valid XML character - data. This means in particular that the characters <, >, - and & will be converted to entities. Binary data (data type - bytea) will be represented in base64 or hex - encoding, depending on the setting of the configuration parameter - . The particular behavior for - individual data types is expected to evolve in order to align the - PostgreSQL mappings with those specified in SQL:2006 and later, - as discussed in . - - - - - <literal>xmlforest</literal> - - - xmlforest - - - -xmlforest ( content AS name , ... ) xml - - - - The xmlforest expression produces an XML - forest (sequence) of elements using the given names and content. - As for xmlelement, - each name must be a simple identifier, while - the content expressions can have any data - type. - - - - Examples: - -SELECT xmlforest('abc' AS foo, 123 AS bar); - - xmlforest ------------------------------- - <foo>abc</foo><bar>123</bar> - - -SELECT xmlforest(table_name, column_name) -FROM information_schema.columns -WHERE table_schema = 'pg_catalog'; - - xmlforest -------------------------------------&zwsp;----------------------------------- - <table_name>pg_authid</table_name>&zwsp;<column_name>rolname</column_name> - <table_name>pg_authid</table_name>&zwsp;<column_name>rolsuper</column_name> - ... - - - As seen in the second example, the element name can be omitted if - the content value is a column reference, in which case the column - name is used by default. Otherwise, a name must be specified. - - - - Element names that are not valid XML names are escaped as shown - for xmlelement above. Similarly, content - data is escaped to make valid XML content, unless it is already - of type xml. - - - - Note that XML forests are not valid XML documents if they consist - of more than one element, so it might be useful to wrap - xmlforest expressions in - xmlelement. - - - - - <literal>xmlpi</literal> - - - xmlpi - - - -xmlpi ( NAME name , content ) xml - - - - The xmlpi expression creates an XML - processing instruction. - As for xmlelement, - the name must be a simple identifier, while - the content expression can have any data type. - The content, if present, must not contain the - character sequence ?>. - - - - Example: - -]]> - - - - - <literal>xmlroot</literal> - - - xmlroot - - - -xmlroot ( xml, VERSION {text|NO VALUE} , STANDALONE {YES|NO|NO VALUE} ) xml - - - - The xmlroot expression alters the properties - of the root node of an XML value. If a version is specified, - it replaces the value in the root node's version declaration; if a - standalone setting is specified, it replaces the value in the - root node's standalone declaration. - - - -abc'), - version '1.0', standalone yes); - - xmlroot ----------------------------------------- - - abc -]]> - - - - - <literal>xmlagg</literal> - - - xmlagg - - - -xmlagg ( xml ) xml - - - - The function xmlagg is, unlike the other - functions described here, an aggregate function. It concatenates the - input values to the aggregate function call, - much like xmlconcat does, except that concatenation - occurs across rows rather than across expressions in a single row. - See for additional information - about aggregate functions. - - - - Example: -abc'); -INSERT INTO test VALUES (2, ''); -SELECT xmlagg(x) FROM test; - xmlagg ----------------------- - abc -]]> - - - - To determine the order of the concatenation, an ORDER BY - clause may be added to the aggregate call as described in - . For example: - -abc -]]> - - - - The following non-standard approach used to be recommended - in previous versions, and may still be useful in specific - cases: - -abc -]]> - - - - - - XML Predicates - - - The expressions described in this section check properties - of xml values. - - - - <literal>IS DOCUMENT</literal> - - - IS DOCUMENT - - - -xml IS DOCUMENT boolean - - - - The expression IS DOCUMENT returns true if the - argument XML value is a proper XML document, false if it is not - (that is, it is a content fragment), or null if the argument is - null. See about the difference - between documents and content fragments. - - - - - <literal>IS NOT DOCUMENT</literal> - - - IS NOT DOCUMENT - - - -xml IS NOT DOCUMENT boolean - - - - The expression IS NOT DOCUMENT returns false if the - argument XML value is a proper XML document, true if it is not (that is, - it is a content fragment), or null if the argument is null. - - - - - <literal>XMLEXISTS</literal> - - - XMLEXISTS - - - -XMLEXISTS ( text PASSING BY {REF|VALUE} xml BY {REF|VALUE} ) boolean - - - - The function xmlexists evaluates an XPath 1.0 - expression (the first argument), with the passed XML value as its context - item. The function returns false if the result of that evaluation - yields an empty node-set, true if it yields any other value. The - function returns null if any argument is null. A nonnull value - passed as the context item must be an XML document, not a content - fragment or any non-XML value. - - - - Example: - TorontoOttawa'); - - xmlexists ------------- - t -(1 row) -]]> - - - - The BY REF and BY VALUE clauses - are accepted in PostgreSQL, but are ignored, - as discussed in . - - - - In the SQL standard, the xmlexists function - evaluates an expression in the XML Query language, - but PostgreSQL allows only an XPath 1.0 - expression, as discussed in - . - - - - - <literal>xml_is_well_formed</literal> - - - xml_is_well_formed - - - - xml_is_well_formed_document - - - - xml_is_well_formed_content - - - -xml_is_well_formed ( text ) boolean -xml_is_well_formed_document ( text ) boolean -xml_is_well_formed_content ( text ) boolean - - - - These functions check whether a text string represents - well-formed XML, returning a Boolean result. - xml_is_well_formed_document checks for a well-formed - document, while xml_is_well_formed_content checks - for well-formed content. xml_is_well_formed does - the former if the configuration - parameter is set to DOCUMENT, or the latter if it is set to - CONTENT. This means that - xml_is_well_formed is useful for seeing whether - a simple cast to type xml will succeed, whereas the other two - functions are useful for seeing whether the corresponding variants of - XMLPARSE will succeed. - - - - Examples: - -'); - xml_is_well_formed --------------------- - f -(1 row) - -SELECT xml_is_well_formed(''); - xml_is_well_formed --------------------- - t -(1 row) - -SET xmloption TO CONTENT; -SELECT xml_is_well_formed('abc'); - xml_is_well_formed --------------------- - t -(1 row) - -SELECT xml_is_well_formed_document('bar'); - xml_is_well_formed_document ------------------------------ - t -(1 row) - -SELECT xml_is_well_formed_document('bar'); - xml_is_well_formed_document ------------------------------ - f -(1 row) -]]> - - The last example shows that the checks include whether - namespaces are correctly matched. - - - - - - Processing XML - - - To process values of data type xml, PostgreSQL offers - the functions xpath and - xpath_exists, which evaluate XPath 1.0 - expressions, and the XMLTABLE - table function. - - - - <literal>xpath</literal> - - - XPath - - - -xpath ( xpath text, xml xml , nsarray text[] ) xml[] - - - - The function xpath evaluates the XPath 1.0 - expression xpath (given as text) - against the XML value - xml. It returns an array of XML values - corresponding to the node-set produced by the XPath expression. - If the XPath expression returns a scalar value rather than a node-set, - a single-element array is returned. - - - - The second argument must be a well formed XML document. In particular, - it must have a single root node element. - - - - The optional third argument of the function is an array of namespace - mappings. This array should be a two-dimensional text array with - the length of the second axis being equal to 2 (i.e., it should be an - array of arrays, each of which consists of exactly 2 elements). - The first element of each array entry is the namespace name (alias), the - second the namespace URI. It is not required that aliases provided in - this array be the same as those being used in the XML document itself (in - other words, both in the XML document and in the xpath - function context, aliases are local). - - - - Example: -test', - ARRAY[ARRAY['my', 'http://example.com']]); - - xpath --------- - {test} -(1 row) -]]> - - - - To deal with default (anonymous) namespaces, do something like this: -test', - ARRAY[ARRAY['mydefns', 'http://example.com']]); - - xpath --------- - {test} -(1 row) -]]> - - - - - <literal>xpath_exists</literal> - - - xpath_exists - - - -xpath_exists ( xpath text, xml xml , nsarray text[] ) boolean - - - - The function xpath_exists is a specialized form - of the xpath function. Instead of returning the - individual XML values that satisfy the XPath 1.0 expression, this function - returns a Boolean indicating whether the query was satisfied or not - (specifically, whether it produced any value other than an empty node-set). - This function is equivalent to the XMLEXISTS predicate, - except that it also offers support for a namespace mapping argument. - - - - Example: -test', - ARRAY[ARRAY['my', 'http://example.com']]); - - xpath_exists --------------- - t -(1 row) -]]> - - - - - <literal>xmltable</literal> - - - xmltable - - - - table function - XMLTABLE - - - -XMLTABLE ( - XMLNAMESPACES ( namespace_uri AS namespace_name , ... ), - row_expression PASSING BY {REF|VALUE} document_expression BY {REF|VALUE} - COLUMNS name { type PATH column_expression DEFAULT default_expression NOT NULL | NULL - | FOR ORDINALITY } - , ... -) setof record - - - - The xmltable expression produces a table based - on an XML value, an XPath filter to extract rows, and a - set of column definitions. - Although it syntactically resembles a function, it can only appear - as a table in a query's FROM clause. - - - - The optional XMLNAMESPACES clause gives a - comma-separated list of namespace definitions, where - each namespace_uri is a text - expression and each namespace_name is a simple - identifier. It specifies the XML namespaces used in the document and - their aliases. A default namespace specification is not currently - supported. - - - - The required row_expression argument is an - XPath 1.0 expression (given as text) that is evaluated, - passing the XML value document_expression as - its context item, to obtain a set of XML nodes. These nodes are what - xmltable transforms into output rows. No rows - will be produced if the document_expression - is null, nor if the row_expression produces - an empty node-set or any value other than a node-set. - - - - document_expression provides the context - item for the row_expression. It must be a - well-formed XML document; fragments/forests are not accepted. - The BY REF and BY VALUE clauses - are accepted but ignored, as discussed in - . - - - - In the SQL standard, the xmltable function - evaluates expressions in the XML Query language, - but PostgreSQL allows only XPath 1.0 - expressions, as discussed in - . - - - - The required COLUMNS clause specifies the - column(s) that will be produced in the output table. - See the syntax summary above for the format. - A name is required for each column, as is a data type - (unless FOR ORDINALITY is specified, in which case - type integer is implicit). The path, default and - nullability clauses are optional. - - - - A column marked FOR ORDINALITY will be populated - with row numbers, starting with 1, in the order of nodes retrieved from - the row_expression's result node-set. - At most one column may be marked FOR ORDINALITY. - - - - - XPath 1.0 does not specify an order for nodes in a node-set, so code - that relies on a particular order of the results will be - implementation-dependent. Details can be found in - . - - - - - The column_expression for a column is an - XPath 1.0 expression that is evaluated for each row, with the current - node from the row_expression result as its - context item, to find the value of the column. If - no column_expression is given, then the - column name is used as an implicit path. - - - - If a column's XPath expression returns a non-XML value (which is limited - to string, boolean, or double in XPath 1.0) and the column has a - PostgreSQL type other than xml, the column will be set - as if by assigning the value's string representation to the PostgreSQL - type. (If the value is a boolean, its string representation is taken - to be 1 or 0 if the output - column's type category is numeric, otherwise true or - false.) - - - - If a column's XPath expression returns a non-empty set of XML nodes - and the column's PostgreSQL type is xml, the column will - be assigned the expression result exactly, if it is of document or - content form. - - - A result containing more than one element node at the top level, or - non-whitespace text outside of an element, is an example of content form. - An XPath result can be of neither form, for example if it returns an - attribute node selected from the element that contains it. Such a result - will be put into content form with each such disallowed node replaced by - its string value, as defined for the XPath 1.0 - string function. - - - - - - A non-XML result assigned to an xml output column produces - content, a single text node with the string value of the result. - An XML result assigned to a column of any other type may not have more than - one node, or an error is raised. If there is exactly one node, the column - will be set as if by assigning the node's string - value (as defined for the XPath 1.0 string function) - to the PostgreSQL type. - - - - The string value of an XML element is the concatenation, in document order, - of all text nodes contained in that element and its descendants. The string - value of an element with no descendant text nodes is an - empty string (not NULL). - Any xsi:nil attributes are ignored. - Note that the whitespace-only text() node between two non-text - elements is preserved, and that leading whitespace on a text() - node is not flattened. - The XPath 1.0 string function may be consulted for the - rules defining the string value of other XML node types and non-XML values. - - - - The conversion rules presented here are not exactly those of the SQL - standard, as discussed in . - - - - If the path expression returns an empty node-set - (typically, when it does not match) - for a given row, the column will be set to NULL, unless - a default_expression is specified; then the - value resulting from evaluating that expression is used. - - - - A default_expression, rather than being - evaluated immediately when xmltable is called, - is evaluated each time a default is needed for the column. - If the expression qualifies as stable or immutable, the repeat - evaluation may be skipped. - This means that you can usefully use volatile functions like - nextval in - default_expression. - - - - Columns may be marked NOT NULL. If the - column_expression for a NOT - NULL column does not match anything and there is - no DEFAULT or - the default_expression also evaluates to null, - an error is reported. - - - - Examples: - - - AU - Australia - - - JP - Japan - Shinzo Abe - 145935 - - - SG - Singapore - 697 - - -$$ AS data; - -SELECT xmltable.* - FROM xmldata, - XMLTABLE('//ROWS/ROW' - PASSING data - COLUMNS id int PATH '@id', - ordinality FOR ORDINALITY, - "COUNTRY_NAME" text, - country_id text PATH 'COUNTRY_ID', - size_sq_km float PATH 'SIZE[@unit = "sq_km"]', - size_other text PATH - 'concat(SIZE[@unit!="sq_km"], " ", SIZE[@unit!="sq_km"]/@unit)', - premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); - - id | ordinality | COUNTRY_NAME | country_id | size_sq_km | size_other | premier_name -----+------------+--------------+------------+------------+--------------+--------------- - 1 | 1 | Australia | AU | | | not specified - 5 | 2 | Japan | JP | | 145935 sq_mi | Shinzo Abe - 6 | 3 | Singapore | SG | 697 | | not specified -]]> - - The following example shows concatenation of multiple text() nodes, - usage of the column name as XPath filter, and the treatment of whitespace, - XML comments and processing instructions: - - - Hello2a2 bbbxxxCC - -$$ AS data; - -SELECT xmltable.* - FROM xmlelements, XMLTABLE('/root' PASSING data COLUMNS element text); - element -------------------------- - Hello2a2 bbbxxxCC -]]> - - - - The following example illustrates how - the XMLNAMESPACES clause can be used to specify - a list of namespaces - used in the XML document as well as in the XPath expressions: - - - - - -'::xml) -) -SELECT xmltable.* - FROM XMLTABLE(XMLNAMESPACES('http://example.com/myns' AS x, - 'http://example.com/b' AS "B"), - '/x:example/x:item' - PASSING (SELECT data FROM xmldata) - COLUMNS foo int PATH '@foo', - bar int PATH '@B:bar'); - foo | bar ------+----- - 1 | 2 - 3 | 4 - 4 | 5 -(3 rows) -]]> - - - - - - Mapping Tables to XML - - - XML export - - - - The following functions map the contents of relational tables to - XML values. They can be thought of as XML export functionality: - -table_to_xml ( table regclass, nulls boolean, - tableforest boolean, targetns text ) xml -query_to_xml ( query text, nulls boolean, - tableforest boolean, targetns text ) xml -cursor_to_xml ( cursor refcursor, count integer, nulls boolean, - tableforest boolean, targetns text ) xml - - - - - table_to_xml maps the content of the named - table, passed as parameter table. The - regclass type accepts strings identifying tables using the - usual notation, including optional schema qualification and - double quotes (see for details). - query_to_xml executes the - query whose text is passed as parameter - query and maps the result set. - cursor_to_xml fetches the indicated number of - rows from the cursor specified by the parameter - cursor. This variant is recommended if - large tables have to be mapped, because the result value is built - up in memory by each function. - - - - If tableforest is false, then the resulting - XML document looks like this: - - - data - data - - - - ... - - - ... - -]]> - - If tableforest is true, the result is an - XML content fragment that looks like this: - - data - data - - - - ... - - -... -]]> - - If no table name is available, that is, when mapping a query or a - cursor, the string table is used in the first - format, row in the second format. - - - - The choice between these formats is up to the user. The first - format is a proper XML document, which will be important in many - applications. The second format tends to be more useful in the - cursor_to_xml function if the result values are to be - reassembled into one document later on. The functions for - producing XML content discussed above, in particular - xmlelement, can be used to alter the results - to taste. - - - - The data values are mapped in the same way as described for the - function xmlelement above. - - - - The parameter nulls determines whether null - values should be included in the output. If true, null values in - columns are represented as: - -]]> - where xsi is the XML namespace prefix for XML - Schema Instance. An appropriate namespace declaration will be - added to the result value. If false, columns containing null - values are simply omitted from the output. - - - - The parameter targetns specifies the - desired XML namespace of the result. If no particular namespace - is wanted, an empty string should be passed. - - - - The following functions return XML Schema documents describing the - mappings performed by the corresponding functions above: - -table_to_xmlschema ( table regclass, nulls boolean, - tableforest boolean, targetns text ) xml -query_to_xmlschema ( query text, nulls boolean, - tableforest boolean, targetns text ) xml -cursor_to_xmlschema ( cursor refcursor, nulls boolean, - tableforest boolean, targetns text ) xml - - It is essential that the same parameters are passed in order to - obtain matching XML data mappings and XML Schema documents. - - - - The following functions produce XML data mappings and the - corresponding XML Schema in one document (or forest), linked - together. They can be useful where self-contained and - self-describing results are wanted: - -table_to_xml_and_xmlschema ( table regclass, nulls boolean, - tableforest boolean, targetns text ) xml -query_to_xml_and_xmlschema ( query text, nulls boolean, - tableforest boolean, targetns text ) xml - - - - - In addition, the following functions are available to produce - analogous mappings of entire schemas or the entire current - database: - -schema_to_xml ( schema name, nulls boolean, - tableforest boolean, targetns text ) xml -schema_to_xmlschema ( schema name, nulls boolean, - tableforest boolean, targetns text ) xml -schema_to_xml_and_xmlschema ( schema name, nulls boolean, - tableforest boolean, targetns text ) xml - -database_to_xml ( nulls boolean, - tableforest boolean, targetns text ) xml -database_to_xmlschema ( nulls boolean, - tableforest boolean, targetns text ) xml -database_to_xml_and_xmlschema ( nulls boolean, - tableforest boolean, targetns text ) xml - - - These functions ignore tables that are not readable by the current user. - The database-wide functions additionally ignore schemas that the current - user does not have USAGE (lookup) privilege for. - - - - Note that these potentially produce a lot of data, which needs to - be built up in memory. When requesting content mappings of large - schemas or databases, it might be worthwhile to consider mapping the - tables separately instead, possibly even through a cursor. - - - - The result of a schema content mapping looks like this: - - - -table1-mapping - -table2-mapping - -... - -]]> - - where the format of a table mapping depends on the - tableforest parameter as explained above. - - - - The result of a database content mapping looks like this: - - - - - ... - - - - ... - - -... - -]]> - - where the schema mapping is as above. - - - - As an example of using the output produced by these functions, - shows an XSLT stylesheet that - converts the output of - table_to_xml_and_xmlschema to an HTML - document containing a tabular rendition of the table data. In a - similar manner, the results from these functions can be - converted into other XML-based formats. - - - - XSLT Stylesheet for Converting SQL/XML Output to HTML - - - - - - - - - - - - - <xsl:value-of select="name(current())"/> - - - - - - - - - - - - - - - - -
- - -
- -
-]]>
-
-
-
- - - JSON Functions and Operators - - - JSON - functions and operators - - - SQL/JSON - functions and expressions - - - - This section describes: - - - - - functions and operators for processing and creating JSON data - - - - - the SQL/JSON path language - - - - - the SQL/JSON query functions - - - - - - - To provide native support for JSON data types within the SQL environment, - PostgreSQL implements the - SQL/JSON data model. - This model comprises sequences of items. Each item can hold SQL scalar - values, with an additional SQL/JSON null value, and composite data structures - that use JSON arrays and objects. The model is a formalization of the implied - data model in the JSON specification - RFC 7159. - - - - SQL/JSON allows you to handle JSON data alongside regular SQL data, - with transaction support, including: - - - - - Uploading JSON data into the database and storing it in - regular SQL columns as character or binary strings. - - - - - Generating JSON objects and arrays from relational data. - - - - - Querying JSON data using SQL/JSON query functions and - SQL/JSON path language expressions. - - - - - - - To learn more about the SQL/JSON standard, see - . For details on JSON types - supported in PostgreSQL, - see . - - - - Processing and Creating JSON Data - - - shows the operators that - are available for use with JSON data types (see ). - In addition, the usual comparison operators shown in are available for - jsonb, though not for json. The comparison - operators follow the ordering rules for B-tree operations outlined in - . - See also for the aggregate - function json_agg which aggregates record - values as JSON, the aggregate function - json_object_agg which aggregates pairs of values - into a JSON object, and their jsonb equivalents, - jsonb_agg and jsonb_object_agg. - - - - <type>json</type> and <type>jsonb</type> Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - json -> integer - json - - - jsonb -> integer - jsonb - - - Extracts n'th element of JSON array - (array elements are indexed from zero, but negative integers count - from the end). - - - '[{"a":"foo"},{"b":"bar"},{"c":"baz"}]'::json -> 2 - {"c":"baz"} - - - '[{"a":"foo"},{"b":"bar"},{"c":"baz"}]'::json -> -3 - {"a":"foo"} - - - - - - json -> text - json - - - jsonb -> text - jsonb - - - Extracts JSON object field with the given key. - - - '{"a": {"b":"foo"}}'::json -> 'a' - {"b":"foo"} - - - - - - json ->> integer - text - - - jsonb ->> integer - text - - - Extracts n'th element of JSON array, - as text. - - - '[1,2,3]'::json ->> 2 - 3 - - - - - - json ->> text - text - - - jsonb ->> text - text - - - Extracts JSON object field with the given key, as text. - - - '{"a":1,"b":2}'::json ->> 'b' - 2 - - - - - - json #> text[] - json - - - jsonb #> text[] - jsonb - - - Extracts JSON sub-object at the specified path, where path elements - can be either field keys or array indexes. - - - '{"a": {"b": ["foo","bar"]}}'::json #> '{a,b,1}' - "bar" - - - - - - json #>> text[] - text - - - jsonb #>> text[] - text - - - Extracts JSON sub-object at the specified path as text. - - - '{"a": {"b": ["foo","bar"]}}'::json #>> '{a,b,1}' - bar - - - - -
- - - - The field/element/path extraction operators return NULL, rather than - failing, if the JSON input does not have the right structure to match - the request; for example if no such key or array element exists. - - - - - Some further operators exist only for jsonb, as shown - in . - - describes how these operators can be used to effectively search indexed - jsonb data. - - - - Additional <type>jsonb</type> Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - jsonb @> jsonb - boolean - - - Does the first JSON value contain the second? - (See for details about containment.) - - - '{"a":1, "b":2}'::jsonb @> '{"b":2}'::jsonb - t - - - - - - jsonb <@ jsonb - boolean - - - Is the first JSON value contained in the second? - - - '{"b":2}'::jsonb <@ '{"a":1, "b":2}'::jsonb - t - - - - - - jsonb ? text - boolean - - - Does the text string exist as a top-level key or array element within - the JSON value? - - - '{"a":1, "b":2}'::jsonb ? 'b' - t - - - '["a", "b", "c"]'::jsonb ? 'b' - t - - - - - - jsonb ?| text[] - boolean - - - Do any of the strings in the text array exist as top-level keys or - array elements? - - - '{"a":1, "b":2, "c":3}'::jsonb ?| array['b', 'd'] - t - - - - - - jsonb ?& text[] - boolean - - - Do all of the strings in the text array exist as top-level keys or - array elements? - - - '["a", "b", "c"]'::jsonb ?& array['a', 'b'] - t - - - - - - jsonb || jsonb - jsonb - - - Concatenates two jsonb values. - Concatenating two arrays generates an array containing all the - elements of each input. Concatenating two objects generates an - object containing the union of their - keys, taking the second object's value when there are duplicate keys. - All other cases are treated by converting a non-array input into a - single-element array, and then proceeding as for two arrays. - Does not operate recursively: only the top-level array or object - structure is merged. - - - '["a", "b"]'::jsonb || '["a", "d"]'::jsonb - ["a", "b", "a", "d"] - - - '{"a": "b"}'::jsonb || '{"c": "d"}'::jsonb - {"a": "b", "c": "d"} - - - '[1, 2]'::jsonb || '3'::jsonb - [1, 2, 3] - - - '{"a": "b"}'::jsonb || '42'::jsonb - [{"a": "b"}, 42] - - - To append an array to another array as a single entry, wrap it - in an additional layer of array, for example: - - - '[1, 2]'::jsonb || jsonb_build_array('[3, 4]'::jsonb) - [1, 2, [3, 4]] - - - - - - jsonb - text - jsonb - - - Deletes a key (and its value) from a JSON object, or matching string - value(s) from a JSON array. - - - '{"a": "b", "c": "d"}'::jsonb - 'a' - {"c": "d"} - - - '["a", "b", "c", "b"]'::jsonb - 'b' - ["a", "c"] - - - - - - jsonb - text[] - jsonb - - - Deletes all matching keys or array elements from the left operand. - - - '{"a": "b", "c": "d"}'::jsonb - '{a,c}'::text[] - {} - - - - - - jsonb - integer - jsonb - - - Deletes the array element with specified index (negative - integers count from the end). Throws an error if JSON value - is not an array. - - - '["a", "b"]'::jsonb - 1 - ["a"] - - - - - - jsonb #- text[] - jsonb - - - Deletes the field or array element at the specified path, where path - elements can be either field keys or array indexes. - - - '["a", {"b":1}]'::jsonb #- '{1,b}' - ["a", {}] - - - - - - jsonb @? jsonpath - boolean - - - Does JSON path return any item for the specified JSON value? - (This is useful only with SQL-standard JSON path expressions, not - predicate check - expressions, since those always return a value.) - - - '{"a":[1,2,3,4,5]}'::jsonb @? '$.a[*] ? (@ > 2)' - t - - - - - - jsonb @@ jsonpath - boolean - - - Returns the result of a JSON path predicate check for the - specified JSON value. - (This is useful only - with predicate - check expressions, not SQL-standard JSON path expressions, - since it will return NULL if the path result is - not a single boolean value.) - - - '{"a":[1,2,3,4,5]}'::jsonb @@ '$.a[*] > 2' - t - - - - -
- - - - The jsonpath operators @? - and @@ suppress the following errors: missing object - field or array element, unexpected JSON item type, datetime and numeric - errors. The jsonpath-related functions described below can - also be told to suppress these types of errors. This behavior might be - helpful when searching JSON document collections of varying structure. - - - - - shows the functions that are - available for constructing json and jsonb values. - Some functions in this table have a RETURNING clause, - which specifies the data type returned. It must be one of json, - jsonb, bytea, a character string type (text, - char, or varchar), or a type - that can be cast to json. - By default, the json type is returned. - - - - JSON Creation Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - to_json - - to_json ( anyelement ) - json - - - - to_jsonb - - to_jsonb ( anyelement ) - jsonb - - - Converts any SQL value to json or jsonb. - Arrays and composites are converted recursively to arrays and - objects (multidimensional arrays become arrays of arrays in JSON). - Otherwise, if there is a cast from the SQL data type - to json, the cast function will be used to perform the - conversion; - - For example, the extension has a cast - from hstore to json, so that - hstore values converted via the JSON creation functions - will be represented as JSON objects, not as primitive string values. - - - otherwise, a scalar JSON value is produced. For any scalar other than - a number, a Boolean, or a null value, the text representation will be - used, with escaping as necessary to make it a valid JSON string value. - - - to_json('Fred said "Hi."'::text) - "Fred said \"Hi.\"" - - - to_jsonb(row(42, 'Fred said "Hi."'::text)) - {"f1": 42, "f2": "Fred said \"Hi.\""} - - - - - - - array_to_json - - array_to_json ( anyarray , boolean ) - json - - - Converts an SQL array to a JSON array. The behavior is the same - as to_json except that line feeds will be added - between top-level array elements if the optional boolean parameter is - true. - - - array_to_json('{{1,5},{99,100}}'::int[]) - [[1,5],[99,100]] - - - - - - - json_array - json_array ( - { value_expression FORMAT JSON } , ... - { NULL | ABSENT } ON NULL - RETURNING data_type FORMAT JSON ENCODING UTF8 ) - - - json_array ( - query_expression - RETURNING data_type FORMAT JSON ENCODING UTF8 ) - - - Constructs a JSON array from either a series of - value_expression parameters or from the results - of query_expression, - which must be a SELECT query returning a single column. If - ABSENT ON NULL is specified, NULL values are ignored. - This is always the case if a - query_expression is used. - - - json_array(1,true,json '{"a":null}') - [1, true, {"a":null}] - - - json_array(SELECT * FROM (VALUES(1),(2)) t) - [1, 2] - - - - - - - row_to_json - - row_to_json ( record , boolean ) - json - - - Converts an SQL composite value to a JSON object. The behavior is the - same as to_json except that line feeds will be - added between top-level elements if the optional boolean parameter is - true. - - - row_to_json(row(1,'foo')) - {"f1":1,"f2":"foo"} - - - - - - - json_build_array - - json_build_array ( VARIADIC "any" ) - json - - - - jsonb_build_array - - jsonb_build_array ( VARIADIC "any" ) - jsonb - - - Builds a possibly-heterogeneously-typed JSON array out of a variadic - argument list. Each argument is converted as - per to_json or to_jsonb. - - - json_build_array(1, 2, 'foo', 4, 5) - [1, 2, "foo", 4, 5] - - - - - - - json_build_object - - json_build_object ( VARIADIC "any" ) - json - - - - jsonb_build_object - - jsonb_build_object ( VARIADIC "any" ) - jsonb - - - Builds a JSON object out of a variadic argument list. By convention, - the argument list consists of alternating keys and values. Key - arguments are coerced to text; value arguments are converted as - per to_json or to_jsonb. - - - json_build_object('foo', 1, 2, row(3,'bar')) - {"foo" : 1, "2" : {"f1":3,"f2":"bar"}} - - - - - - json_object - json_object ( - { key_expression { VALUE | ':' } - value_expression FORMAT JSON ENCODING UTF8 }, ... - { NULL | ABSENT } ON NULL - { WITH | WITHOUT } UNIQUE KEYS - RETURNING data_type FORMAT JSON ENCODING UTF8 ) - - - Constructs a JSON object of all the key/value pairs given, - or an empty object if none are given. - key_expression is a scalar expression - defining the JSON key, which is - converted to the text type. - It cannot be NULL nor can it - belong to a type that has a cast to the json type. - If WITH UNIQUE KEYS is specified, there must not - be any duplicate key_expression. - Any pair for which the value_expression - evaluates to NULL is omitted from the output - if ABSENT ON NULL is specified; - if NULL ON NULL is specified or the clause - omitted, the key is included with value NULL. - - - json_object('code' VALUE 'P123', 'title': 'Jaws') - {"code" : "P123", "title" : "Jaws"} - - - - - - - json_object - - json_object ( text[] ) - json - - - - jsonb_object - - jsonb_object ( text[] ) - jsonb - - - Builds a JSON object out of a text array. The array must have either - exactly one dimension with an even number of members, in which case - they are taken as alternating key/value pairs, or two dimensions - such that each inner array has exactly two elements, which - are taken as a key/value pair. All values are converted to JSON - strings. - - - json_object('{a, 1, b, "def", c, 3.5}') - {"a" : "1", "b" : "def", "c" : "3.5"} - - json_object('{{a, 1}, {b, "def"}, {c, 3.5}}') - {"a" : "1", "b" : "def", "c" : "3.5"} - - - - - - json_object ( keys text[], values text[] ) - json - - - jsonb_object ( keys text[], values text[] ) - jsonb - - - This form of json_object takes keys and values - pairwise from separate text arrays. Otherwise it is identical to - the one-argument form. - - - json_object('{a,b}', '{1,2}') - {"a": "1", "b": "2"} - - - - - - json constructor - json ( - expression - FORMAT JSON ENCODING UTF8 - { WITH | WITHOUT } UNIQUE KEYS ) - json - - - Converts a given expression specified as text or - bytea string (in UTF8 encoding) into a JSON - value. If expression is NULL, an - SQL null value is returned. - If WITH UNIQUE is specified, the - expression must not contain any duplicate - object keys. - - - json('{"a":123, "b":[true,"foo"], "a":"bar"}') - {"a":123, "b":[true,"foo"], "a":"bar"} - - - - - - - json_scalar - json_scalar ( expression ) - - - Converts a given SQL scalar value into a JSON scalar value. - If the input is NULL, an SQL null is returned. If - the input is number or a boolean value, a corresponding JSON number - or boolean value is returned. For any other value, a JSON string is - returned. - - - json_scalar(123.45) - 123.45 - - - json_scalar(CURRENT_TIMESTAMP) - "2022-05-10T10:51:04.62128-04:00" - - - - - - json_serialize ( - expression FORMAT JSON ENCODING UTF8 - RETURNING data_type FORMAT JSON ENCODING UTF8 ) - - - Converts an SQL/JSON expression into a character or binary string. The - expression can be of any JSON type, any - character string type, or bytea in UTF8 encoding. - The returned type used in RETURNING can be any - character string type or bytea. The default is - text. - - - json_serialize('{ "a" : 1 } ' RETURNING bytea) - \x7b20226122203a2031207d20 - - - - -
- - - details SQL/JSON - facilities for testing JSON. - - - - SQL/JSON Testing Functions - - - - - Function signature - - - Description - - - Example(s) - - - - - - - IS JSON - expression IS NOT JSON - { VALUE | SCALAR | ARRAY | OBJECT } - { WITH | WITHOUT } UNIQUE KEYS - - - This predicate tests whether expression can be - parsed as JSON, possibly of a specified type. - If SCALAR or ARRAY or - OBJECT is specified, the - test is whether or not the JSON is of that particular type. If - WITH UNIQUE KEYS is specified, then any object in the - expression is also tested to see if it - has duplicate keys. - - - -SELECT js, - js IS JSON "json?", - js IS JSON SCALAR "scalar?", - js IS JSON OBJECT "object?", - js IS JSON ARRAY "array?" -FROM (VALUES - ('123'), ('"abc"'), ('{"a": "b"}'), ('[1,2]'),('abc')) foo(js); - js | json? | scalar? | object? | array? -------------+-------+---------+---------+-------- - 123 | t | t | f | f - "abc" | t | t | f | f - {"a": "b"} | t | f | t | f - [1,2] | t | f | f | t - abc | f | f | f | f - - - - -SELECT js, - js IS JSON OBJECT "object?", - js IS JSON ARRAY "array?", - js IS JSON ARRAY WITH UNIQUE KEYS "array w. UK?", - js IS JSON ARRAY WITHOUT UNIQUE KEYS "array w/o UK?" -FROM (VALUES ('[{"a":"1"}, - {"b":"2","b":"3"}]')) foo(js); --[ RECORD 1 ]-+-------------------- -js | [{"a":"1"}, + - | {"b":"2","b":"3"}] -object? | f -array? | t -array w. UK? | f -array w/o UK? | t - - - - - -
- - - shows the functions that - are available for processing json and jsonb values. - - - - JSON Processing Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - json_array_elements - - json_array_elements ( json ) - setof json - - - - jsonb_array_elements - - jsonb_array_elements ( jsonb ) - setof jsonb - - - Expands the top-level JSON array into a set of JSON values. - - - select * from json_array_elements('[1,true, [2,false]]') - - - value ------------ - 1 - true - [2,false] - - - - - - - - json_array_elements_text - - json_array_elements_text ( json ) - setof text - - - - jsonb_array_elements_text - - jsonb_array_elements_text ( jsonb ) - setof text - - - Expands the top-level JSON array into a set of text values. - - - select * from json_array_elements_text('["foo", "bar"]') - - - value ------------ - foo - bar - - - - - - - - json_array_length - - json_array_length ( json ) - integer - - - - jsonb_array_length - - jsonb_array_length ( jsonb ) - integer - - - Returns the number of elements in the top-level JSON array. - - - json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]') - 5 - - - jsonb_array_length('[]') - 0 - - - - - - - json_each - - json_each ( json ) - setof record - ( key text, - value json ) - - - - jsonb_each - - jsonb_each ( jsonb ) - setof record - ( key text, - value jsonb ) - - - Expands the top-level JSON object into a set of key/value pairs. - - - select * from json_each('{"a":"foo", "b":"bar"}') - - - key | value ------+------- - a | "foo" - b | "bar" - - - - - - - - json_each_text - - json_each_text ( json ) - setof record - ( key text, - value text ) - - - - jsonb_each_text - - jsonb_each_text ( jsonb ) - setof record - ( key text, - value text ) - - - Expands the top-level JSON object into a set of key/value pairs. - The returned values will be of - type text. - - - select * from json_each_text('{"a":"foo", "b":"bar"}') - - - key | value ------+------- - a | foo - b | bar - - - - - - - - json_extract_path - - json_extract_path ( from_json json, VARIADIC path_elems text[] ) - json - - - - jsonb_extract_path - - jsonb_extract_path ( from_json jsonb, VARIADIC path_elems text[] ) - jsonb - - - Extracts JSON sub-object at the specified path. - (This is functionally equivalent to the #> - operator, but writing the path out as a variadic list can be more - convenient in some cases.) - - - json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}', 'f4', 'f6') - "foo" - - - - - - - json_extract_path_text - - json_extract_path_text ( from_json json, VARIADIC path_elems text[] ) - text - - - - jsonb_extract_path_text - - jsonb_extract_path_text ( from_json jsonb, VARIADIC path_elems text[] ) - text - - - Extracts JSON sub-object at the specified path as text. - (This is functionally equivalent to the #>> - operator.) - - - json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}', 'f4', 'f6') - foo - - - - - - - json_object_keys - - json_object_keys ( json ) - setof text - - - - jsonb_object_keys - - jsonb_object_keys ( jsonb ) - setof text - - - Returns the set of keys in the top-level JSON object. - - - select * from json_object_keys('{"f1":"abc","f2":{"f3":"a", "f4":"b"}}') - - - json_object_keys ------------------- - f1 - f2 - - - - - - - - json_populate_record - - json_populate_record ( base anyelement, from_json json ) - anyelement - - - - jsonb_populate_record - - jsonb_populate_record ( base anyelement, from_json jsonb ) - anyelement - - - Expands the top-level JSON object to a row having the composite type - of the base argument. The JSON object - is scanned for fields whose names match column names of the output row - type, and their values are inserted into those columns of the output. - (Fields that do not correspond to any output column name are ignored.) - In typical use, the value of base is just - NULL, which means that any output columns that do - not match any object field will be filled with nulls. However, - if base isn't NULL then - the values it contains will be used for unmatched columns. - - - To convert a JSON value to the SQL type of an output column, the - following rules are applied in sequence: - - - - A JSON null value is converted to an SQL null in all cases. - - - - - If the output column is of type json - or jsonb, the JSON value is just reproduced exactly. - - - - - If the output column is a composite (row) type, and the JSON value - is a JSON object, the fields of the object are converted to columns - of the output row type by recursive application of these rules. - - - - - Likewise, if the output column is an array type and the JSON value - is a JSON array, the elements of the JSON array are converted to - elements of the output array by recursive application of these - rules. - - - - - Otherwise, if the JSON value is a string, the contents of the - string are fed to the input conversion function for the column's - data type. - - - - - Otherwise, the ordinary text representation of the JSON value is - fed to the input conversion function for the column's data type. - - - - - - While the example below uses a constant JSON value, typical use would - be to reference a json or jsonb column - laterally from another table in the query's FROM - clause. Writing json_populate_record in - the FROM clause is good practice, since all of the - extracted columns are available for use without duplicate function - calls. - - - create type subrowtype as (d int, e text); - create type myrowtype as (a int, b text[], c subrowtype); - - - select * from json_populate_record(null::myrowtype, - '{"a": 1, "b": ["2", "a b"], "c": {"d": 4, "e": "a b c"}, "x": "foo"}') - - - a | b | c ----+-----------+------------- - 1 | {2,"a b"} | (4,"a b c") - - - - - - - - jsonb_populate_record_valid - - jsonb_populate_record_valid ( base anyelement, from_json json ) - boolean - - - Function for testing jsonb_populate_record. Returns - true if the input jsonb_populate_record - would finish without an error for the given input JSON object; that is, it's - valid input, false otherwise. - - - create type jsb_char2 as (a char(2)); - - - select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aaa"}'); - - - jsonb_populate_record_valid ------------------------------ - f -(1 row) - - - select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aaa"}') q; - - -ERROR: value too long for type character(2) - - select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aa"}'); - - - jsonb_populate_record_valid ------------------------------ - t -(1 row) - - - select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aa"}') q; - - - a ----- - aa -(1 row) - - - - - - - - json_populate_recordset - - json_populate_recordset ( base anyelement, from_json json ) - setof anyelement - - - - jsonb_populate_recordset - - jsonb_populate_recordset ( base anyelement, from_json jsonb ) - setof anyelement - - - Expands the top-level JSON array of objects to a set of rows having - the composite type of the base argument. - Each element of the JSON array is processed as described above - for json[b]_populate_record. - - - create type twoints as (a int, b int); - - - select * from json_populate_recordset(null::twoints, '[{"a":1,"b":2}, {"a":3,"b":4}]') - - - a | b ----+--- - 1 | 2 - 3 | 4 - - - - - - - - json_to_record - - json_to_record ( json ) - record - - - - jsonb_to_record - - jsonb_to_record ( jsonb ) - record - - - Expands the top-level JSON object to a row having the composite type - defined by an AS clause. (As with all functions - returning record, the calling query must explicitly - define the structure of the record with an AS - clause.) The output record is filled from fields of the JSON object, - in the same way as described above - for json[b]_populate_record. Since there is no - input record value, unmatched columns are always filled with nulls. - - - create type myrowtype as (a int, b text); - - - select * from json_to_record('{"a":1,"b":[1,2,3],"c":[1,2,3],"e":"bar","r": {"a": 123, "b": "a b c"}}') as x(a int, b text, c int[], d text, r myrowtype) - - - a | b | c | d | r ----+---------+---------+---+--------------- - 1 | [1,2,3] | {1,2,3} | | (123,"a b c") - - - - - - - - json_to_recordset - - json_to_recordset ( json ) - setof record - - - - jsonb_to_recordset - - jsonb_to_recordset ( jsonb ) - setof record - - - Expands the top-level JSON array of objects to a set of rows having - the composite type defined by an AS clause. (As - with all functions returning record, the calling query - must explicitly define the structure of the record with - an AS clause.) Each element of the JSON array is - processed as described above - for json[b]_populate_record. - - - select * from json_to_recordset('[{"a":1,"b":"foo"}, {"a":"2","c":"bar"}]') as x(a int, b text) - - - a | b ----+----- - 1 | foo - 2 | - - - - - - - - jsonb_set - - jsonb_set ( target jsonb, path text[], new_value jsonb , create_if_missing boolean ) - jsonb - - - Returns target - with the item designated by path - replaced by new_value, or with - new_value added if - create_if_missing is true (which is the - default) and the item designated by path - does not exist. - All earlier steps in the path must exist, or - the target is returned unchanged. - As with the path oriented operators, negative integers that - appear in the path count from the end - of JSON arrays. - If the last path step is an array index that is out of range, - and create_if_missing is true, the new - value is added at the beginning of the array if the index is negative, - or at the end of the array if it is positive. - - - jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0,f1}', '[2,3,4]', false) - [{"f1": [2, 3, 4], "f2": null}, 2, null, 3] - - - jsonb_set('[{"f1":1,"f2":null},2]', '{0,f3}', '[2,3,4]') - [{"f1": 1, "f2": null, "f3": [2, 3, 4]}, 2] - - - - - - - jsonb_set_lax - - jsonb_set_lax ( target jsonb, path text[], new_value jsonb , create_if_missing boolean , null_value_treatment text ) - jsonb - - - If new_value is not NULL, - behaves identically to jsonb_set. Otherwise behaves - according to the value - of null_value_treatment which must be one - of 'raise_exception', - 'use_json_null', 'delete_key', or - 'return_target'. The default is - 'use_json_null'. - - - jsonb_set_lax('[{"f1":1,"f2":null},2,null,3]', '{0,f1}', null) - [{"f1": null, "f2": null}, 2, null, 3] - - - jsonb_set_lax('[{"f1":99,"f2":null},2]', '{0,f3}', null, true, 'return_target') - [{"f1": 99, "f2": null}, 2] - - - - - - - jsonb_insert - - jsonb_insert ( target jsonb, path text[], new_value jsonb , insert_after boolean ) - jsonb - - - Returns target - with new_value inserted. If the item - designated by the path is an array - element, new_value will be inserted before - that item if insert_after is false (which - is the default), or after it - if insert_after is true. If the item - designated by the path is an object - field, new_value will be inserted only if - the object does not already contain that key. - All earlier steps in the path must exist, or - the target is returned unchanged. - As with the path oriented operators, negative integers that - appear in the path count from the end - of JSON arrays. - If the last path step is an array index that is out of range, the new - value is added at the beginning of the array if the index is negative, - or at the end of the array if it is positive. - - - jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"') - {"a": [0, "new_value", 1, 2]} - - - jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"', true) - {"a": [0, 1, "new_value", 2]} - - - - - - - json_strip_nulls - - json_strip_nulls ( target json ,strip_in_arrays boolean ) - json - - - - jsonb_strip_nulls - - jsonb_strip_nulls ( target jsonb ,strip_in_arrays boolean ) - jsonb - - - Deletes all object fields that have null values from the given JSON - value, recursively. - If strip_in_arrays is true (the default is false), - null array elements are also stripped. - Otherwise they are not stripped. Bare null values are never stripped. - - - json_strip_nulls('[{"f1":1, "f2":null}, 2, null, 3]') - [{"f1":1},2,null,3] - - - jsonb_strip_nulls('[1,2,null,3,4]', true); - [1,2,3,4] - - - - - - - - jsonb_path_exists - - jsonb_path_exists ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - boolean - - - Checks whether the JSON path returns any item for the specified JSON - value. - (This is useful only with SQL-standard JSON path expressions, not - predicate check - expressions, since those always return a value.) - If the vars argument is specified, it must - be a JSON object, and its fields provide named values to be - substituted into the jsonpath expression. - If the silent argument is specified and - is true, the function suppresses the same errors - as the @? and @@ operators do. - - - jsonb_path_exists('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') - t - - - - - - - jsonb_path_match - - jsonb_path_match ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - boolean - - - Returns the SQL boolean result of a JSON path predicate check - for the specified JSON value. - (This is useful only - with predicate - check expressions, not SQL-standard JSON path expressions, - since it will either fail or return NULL if the - path result is not a single boolean value.) - The optional vars - and silent arguments act the same as - for jsonb_path_exists. - - - jsonb_path_match('{"a":[1,2,3,4,5]}', 'exists($.a[*] ? (@ >= $min && @ <= $max))', '{"min":2, "max":4}') - t - - - - - - - jsonb_path_query - - jsonb_path_query ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - setof jsonb - - - Returns all JSON items returned by the JSON path for the specified - JSON value. - For SQL-standard JSON path expressions it returns the JSON - values selected from target. - For predicate - check expressions it returns the result of the predicate - check: true, false, - or null. - The optional vars - and silent arguments act the same as - for jsonb_path_exists. - - - select * from jsonb_path_query('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') - - - jsonb_path_query ------------------- - 2 - 3 - 4 - - - - - - - - jsonb_path_query_array - - jsonb_path_query_array ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - jsonb - - - Returns all JSON items returned by the JSON path for the specified - JSON value, as a JSON array. - The parameters are the same as - for jsonb_path_query. - - - jsonb_path_query_array('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') - [2, 3, 4] - - - - - - - jsonb_path_query_first - - jsonb_path_query_first ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - jsonb - - - Returns the first JSON item returned by the JSON path for the - specified JSON value, or NULL if there are no - results. - The parameters are the same as - for jsonb_path_query. - - - jsonb_path_query_first('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') - 2 - - - - - - - jsonb_path_exists_tz - - jsonb_path_exists_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - boolean - - - - jsonb_path_match_tz - - jsonb_path_match_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - boolean - - - - jsonb_path_query_tz - - jsonb_path_query_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - setof jsonb - - - - jsonb_path_query_array_tz - - jsonb_path_query_array_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - jsonb - - - - jsonb_path_query_first_tz - - jsonb_path_query_first_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) - jsonb - - - These functions act like their counterparts described above without - the _tz suffix, except that these functions support - comparisons of date/time values that require timezone-aware - conversions. The example below requires interpretation of the - date-only value 2015-08-02 as a timestamp with time - zone, so the result depends on the current - setting. Due to this dependency, these - functions are marked as stable, which means these functions cannot be - used in indexes. Their counterparts are immutable, and so can be used - in indexes; but they will throw errors if asked to make such - comparisons. - - - jsonb_path_exists_tz('["2015-08-01 12:00:00-05"]', '$[*] ? (@.datetime() < "2015-08-02".datetime())') - t - - - - - - - jsonb_pretty - - jsonb_pretty ( jsonb ) - text - - - Converts the given JSON value to pretty-printed, indented text. - - - jsonb_pretty('[{"f1":1,"f2":null}, 2]') - - -[ - { - "f1": 1, - "f2": null - }, - 2 -] - - - - - - - - json_typeof - - json_typeof ( json ) - text - - - - jsonb_typeof - - jsonb_typeof ( jsonb ) - text - - - Returns the type of the top-level JSON value as a text string. - Possible types are - object, array, - string, number, - boolean, and null. - (The null result should not be confused - with an SQL NULL; see the examples.) - - - json_typeof('-123.4') - number - - - json_typeof('null'::json) - null - - - json_typeof(NULL::json) IS NULL - t - - - - -
-
- - - The SQL/JSON Path Language - - - SQL/JSON path language - - - - SQL/JSON path expressions specify item(s) to be retrieved - from a JSON value, similarly to XPath expressions used - for access to XML content. In PostgreSQL, - path expressions are implemented as the jsonpath - data type and can use any elements described in - . - - - - JSON query functions and operators - pass the provided path expression to the path engine - for evaluation. If the expression matches the queried JSON data, - the corresponding JSON item, or set of items, is returned. - If there is no match, the result will be NULL, - false, or an error, depending on the function. - Path expressions are written in the SQL/JSON path language - and can include arithmetic expressions and functions. - - - - A path expression consists of a sequence of elements allowed - by the jsonpath data type. - The path expression is normally evaluated from left to right, but - you can use parentheses to change the order of operations. - If the evaluation is successful, a sequence of JSON items is produced, - and the evaluation result is returned to the JSON query function - that completes the specified computation. - - - - To refer to the JSON value being queried (the - context item), use the $ variable - in the path expression. The first element of a path must always - be $. It can be followed by one or more - accessor operators, - which go down the JSON structure level by level to retrieve sub-items - of the context item. Each accessor operator acts on the - result(s) of the previous evaluation step, producing zero, one, or more - output items from each input item. - - - - For example, suppose you have some JSON data from a GPS tracker that you - would like to parse, such as: - -SELECT '{ - "track": { - "segments": [ - { - "location": [ 47.763, 13.4034 ], - "start time": "2018-10-14 10:05:14", - "HR": 73 - }, - { - "location": [ 47.706, 13.2635 ], - "start time": "2018-10-14 10:39:21", - "HR": 135 - } - ] - } -}' AS json \gset - - (The above example can be copied-and-pasted - into psql to set things up for the following - examples. Then psql will - expand :'json' into a suitably-quoted string - constant containing the JSON value.) - - - - To retrieve the available track segments, you need to use the - .key accessor - operator to descend through surrounding JSON objects, for example: - -=> select jsonb_path_query(:'json', '$.track.segments'); - jsonb_path_query ------------------------------------------------------------&zwsp;-----------------------------------------------------------&zwsp;--------------------------------------------- - [{"HR": 73, "location": [47.763, 13.4034], "start time": "2018-10-14 10:05:14"}, {"HR": 135, "location": [47.706, 13.2635], "start time": "2018-10-14 10:39:21"}] - - - - - To retrieve the contents of an array, you typically use the - [*] operator. - The following example will return the location coordinates for all - the available track segments: - -=> select jsonb_path_query(:'json', '$.track.segments[*].location'); - jsonb_path_query -------------------- - [47.763, 13.4034] - [47.706, 13.2635] - - Here we started with the whole JSON input value ($), - then the .track accessor selected the JSON object - associated with the "track" object key, then - the .segments accessor selected the JSON array - associated with the "segments" key within that - object, then the [*] accessor selected each element - of that array (producing a series of items), then - the .location accessor selected the JSON array - associated with the "location" key within each of - those objects. In this example, each of those objects had - a "location" key; but if any of them did not, - the .location accessor would have simply produced no - output for that input item. - - - - To return the coordinates of the first segment only, you can - specify the corresponding subscript in the [] - accessor operator. Recall that JSON array indexes are 0-relative: - -=> select jsonb_path_query(:'json', '$.track.segments[0].location'); - jsonb_path_query -------------------- - [47.763, 13.4034] - - - - - The result of each path evaluation step can be processed - by one or more of the jsonpath operators and methods - listed in . - Each method name must be preceded by a dot. For example, - you can get the size of an array: - -=> select jsonb_path_query(:'json', '$.track.segments.size()'); - jsonb_path_query ------------------- - 2 - - More examples of using jsonpath operators - and methods within path expressions appear below in - . - - - - A path can also contain - filter expressions that work similarly to the - WHERE clause in SQL. A filter expression begins with - a question mark and provides a condition in parentheses: - - -? (condition) - - - - - Filter expressions must be written just after the path evaluation step - to which they should apply. The result of that step is filtered to include - only those items that satisfy the provided condition. SQL/JSON defines - three-valued logic, so the condition can - produce true, false, - or unknown. The unknown value - plays the same role as SQL NULL and can be tested - for with the is unknown predicate. Further path - evaluation steps use only those items for which the filter expression - returned true. - - - - The functions and operators that can be used in filter expressions are - listed in . Within a - filter expression, the @ variable denotes the value - being considered (i.e., one result of the preceding path step). You can - write accessor operators after @ to retrieve component - items. - - - - For example, suppose you would like to retrieve all heart rate values higher - than 130. You can achieve this as follows: - -=> select jsonb_path_query(:'json', '$.track.segments[*].HR ? (@ > 130)'); - jsonb_path_query ------------------- - 135 - - - - - To get the start times of segments with such values, you have to - filter out irrelevant segments before selecting the start times, so the - filter expression is applied to the previous step, and the path used - in the condition is different: - -=> select jsonb_path_query(:'json', '$.track.segments[*] ? (@.HR > 130)."start time"'); - jsonb_path_query ------------------------ - "2018-10-14 10:39:21" - - - - - You can use several filter expressions in sequence, if required. - The following example selects start times of all segments that - contain locations with relevant coordinates and high heart rate values: - -=> select jsonb_path_query(:'json', '$.track.segments[*] ? (@.location[1] < 13.4) ? (@.HR > 130)."start time"'); - jsonb_path_query ------------------------ - "2018-10-14 10:39:21" - - - - - Using filter expressions at different nesting levels is also allowed. - The following example first filters all segments by location, and then - returns high heart rate values for these segments, if available: - -=> select jsonb_path_query(:'json', '$.track.segments[*] ? (@.location[1] < 13.4).HR ? (@ > 130)'); - jsonb_path_query ------------------- - 135 - - - - - You can also nest filter expressions within each other. - This example returns the size of the track if it contains any - segments with high heart rate values, or an empty sequence otherwise: - -=> select jsonb_path_query(:'json', '$.track ? (exists(@.segments[*] ? (@.HR > 130))).segments.size()'); - jsonb_path_query ------------------- - 2 - - - - - Deviations from the SQL Standard - - PostgreSQL's implementation of the SQL/JSON path - language has the following deviations from the SQL/JSON standard. - - - - Boolean Predicate Check Expressions - - As an extension to the SQL standard, - a PostgreSQL path expression can be a - Boolean predicate, whereas the SQL standard allows predicates only within - filters. While SQL-standard path expressions return the relevant - element(s) of the queried JSON value, predicate check expressions - return the single three-valued jsonb result of the - predicate: true, - false, or null. - For example, we could write this SQL-standard filter expression: - -=> select jsonb_path_query(:'json', '$.track.segments ?(@[*].HR > 130)'); - jsonb_path_query ------------------------------------------------------------&zwsp;---------------------- - {"HR": 135, "location": [47.706, 13.2635], "start time": "2018-10-14 10:39:21"} - - The similar predicate check expression simply - returns true, indicating that a match exists: - -=> select jsonb_path_query(:'json', '$.track.segments[*].HR > 130'); - jsonb_path_query ------------------- - true - - - - - - Predicate check expressions are required in the - @@ operator (and the - jsonb_path_match function), and should not be used - with the @? operator (or the - jsonb_path_exists function). - - - - - - Regular Expression Interpretation - - There are minor differences in the interpretation of regular - expression patterns used in like_regex filters, as - described in . - - - - - - Strict and Lax Modes - - When you query JSON data, the path expression may not match the - actual JSON data structure. An attempt to access a non-existent - member of an object or element of an array is defined as a - structural error. SQL/JSON path expressions have two modes - of handling structural errors: - - - - - - lax (default) — the path engine implicitly adapts - the queried data to the specified path. - Any structural errors that cannot be fixed as described below - are suppressed, producing no match. - - - - - strict — if a structural error occurs, an error is raised. - - - - - - Lax mode facilitates matching of a JSON document and path - expression when the JSON data does not conform to the expected schema. - If an operand does not match the requirements of a particular operation, - it can be automatically wrapped as an SQL/JSON array, or unwrapped by - converting its elements into an SQL/JSON sequence before performing - the operation. Also, comparison operators automatically unwrap their - operands in lax mode, so you can compare SQL/JSON arrays - out-of-the-box. An array of size 1 is considered equal to its sole element. - Automatic unwrapping is not performed when: - - - - The path expression contains type() or - size() methods that return the type - and the number of elements in the array, respectively. - - - - - The queried JSON data contain nested arrays. In this case, only - the outermost array is unwrapped, while all the inner arrays - remain unchanged. Thus, implicit unwrapping can only go one - level down within each path evaluation step. - - - - - - - For example, when querying the GPS data listed above, you can - abstract from the fact that it stores an array of segments - when using lax mode: - -=> select jsonb_path_query(:'json', 'lax $.track.segments.location'); - jsonb_path_query -------------------- - [47.763, 13.4034] - [47.706, 13.2635] - - - - - In strict mode, the specified path must exactly match the structure of - the queried JSON document, so using this path - expression will cause an error: - -=> select jsonb_path_query(:'json', 'strict $.track.segments.location'); -ERROR: jsonpath member accessor can only be applied to an object - - To get the same result as in lax mode, you have to explicitly unwrap the - segments array: - -=> select jsonb_path_query(:'json', 'strict $.track.segments[*].location'); - jsonb_path_query -------------------- - [47.763, 13.4034] - [47.706, 13.2635] - - - - - The unwrapping behavior of lax mode can lead to surprising results. For - instance, the following query using the .** accessor - selects every HR value twice: - -=> select jsonb_path_query(:'json', 'lax $.**.HR'); - jsonb_path_query ------------------- - 73 - 135 - 73 - 135 - - This happens because the .** accessor selects both - the segments array and each of its elements, while - the .HR accessor automatically unwraps arrays when - using lax mode. To avoid surprising results, we recommend using - the .** accessor only in strict mode. The - following query selects each HR value just once: - -=> select jsonb_path_query(:'json', 'strict $.**.HR'); - jsonb_path_query ------------------- - 73 - 135 - - - - - The unwrapping of arrays can also lead to unexpected results. Consider this - example, which selects all the location arrays: - -=> select jsonb_path_query(:'json', 'lax $.track.segments[*].location'); - jsonb_path_query -------------------- - [47.763, 13.4034] - [47.706, 13.2635] -(2 rows) - - As expected it returns the full arrays. But applying a filter expression - causes the arrays to be unwrapped to evaluate each item, returning only the - items that match the expression: - -=> select jsonb_path_query(:'json', 'lax $.track.segments[*].location ?(@[*] > 15)'); - jsonb_path_query ------------------- - 47.763 - 47.706 -(2 rows) - - This despite the fact that the full arrays are selected by the path - expression. Use strict mode to restore selecting the arrays: - -=> select jsonb_path_query(:'json', 'strict $.track.segments[*].location ?(@[*] > 15)'); - jsonb_path_query -------------------- - [47.763, 13.4034] - [47.706, 13.2635] -(2 rows) - - - - - - SQL/JSON Path Operators and Methods - - - shows the operators and - methods available in jsonpath. Note that while the unary - operators and methods can be applied to multiple values resulting from a - preceding path step, the binary operators (addition etc.) can only be - applied to single values. In lax mode, methods applied to an array will be - executed for each value in the array. The exceptions are - .type() and .size(), which apply to - the array itself. - - - - <type>jsonpath</type> Operators and Methods - - - - - Operator/Method - - - Description - - - Example(s) - - - - - - - - number + number - number - - - Addition - - - jsonb_path_query('[2]', '$[0] + 3') - 5 - - - - - - + number - number - - - Unary plus (no operation); unlike addition, this can iterate over - multiple values - - - jsonb_path_query_array('{"x": [2,3,4]}', '+ $.x') - [2, 3, 4] - - - - - - number - number - number - - - Subtraction - - - jsonb_path_query('[2]', '7 - $[0]') - 5 - - - - - - - number - number - - - Negation; unlike subtraction, this can iterate over - multiple values - - - jsonb_path_query_array('{"x": [2,3,4]}', '- $.x') - [-2, -3, -4] - - - - - - number * number - number - - - Multiplication - - - jsonb_path_query('[4]', '2 * $[0]') - 8 - - - - - - number / number - number - - - Division - - - jsonb_path_query('[8.5]', '$[0] / 2') - 4.2500000000000000 - - - - - - number % number - number - - - Modulo (remainder) - - - jsonb_path_query('[32]', '$[0] % 10') - 2 - - - - - - value . type() - string - - - Type of the JSON item (see json_typeof) - - - jsonb_path_query_array('[1, "2", {}]', '$[*].type()') - ["number", "string", "object"] - - - - - - value . size() - number - - - Size of the JSON item (number of array elements, or 1 if not an - array) - - - jsonb_path_query('{"m": [11, 15]}', '$.m.size()') - 2 - - - - - - value . boolean() - boolean - - - Boolean value converted from a JSON boolean, number, or string - - - jsonb_path_query_array('[1, "yes", false]', '$[*].boolean()') - [true, true, false] - - - - - - value . string() - string - - - String value converted from a JSON boolean, number, string, or - datetime - - - jsonb_path_query_array('[1.23, "xyz", false]', '$[*].string()') - ["1.23", "xyz", "false"] - - - jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp().string()') - "2023-08-15T12:34:56" - - - - - - value . double() - number - - - Approximate floating-point number converted from a JSON number or - string - - - jsonb_path_query('{"len": "1.9"}', '$.len.double() * 2') - 3.8 - - - - - - number . ceiling() - number - - - Nearest integer greater than or equal to the given number - - - jsonb_path_query('{"h": 1.3}', '$.h.ceiling()') - 2 - - - - - - number . floor() - number - - - Nearest integer less than or equal to the given number - - - jsonb_path_query('{"h": 1.7}', '$.h.floor()') - 1 - - - - - - number . abs() - number - - - Absolute value of the given number - - - jsonb_path_query('{"z": -0.3}', '$.z.abs()') - 0.3 - - - - - - value . bigint() - bigint - - - Big integer value converted from a JSON number or string - - - jsonb_path_query('{"len": "9876543219"}', '$.len.bigint()') - 9876543219 - - - - - - value . decimal( [ precision [ , scale ] ] ) - decimal - - - Rounded decimal value converted from a JSON number or string - (precision and scale must be - integer values) - - - jsonb_path_query('1234.5678', '$.decimal(6, 2)') - 1234.57 - - - - - - value . integer() - integer - - - Integer value converted from a JSON number or string - - - jsonb_path_query('{"len": "12345"}', '$.len.integer()') - 12345 - - - - - - value . number() - numeric - - - Numeric value converted from a JSON number or string - - - jsonb_path_query('{"len": "123.45"}', '$.len.number()') - 123.45 - - - - - - string . datetime() - datetime_type - (see note) - - - Date/time value converted from a string - - - jsonb_path_query('["2015-8-1", "2015-08-12"]', '$[*] ? (@.datetime() < "2015-08-2".datetime())') - "2015-8-1" - - - - - - string . datetime(template) - datetime_type - (see note) - - - Date/time value converted from a string using the - specified to_timestamp template - - - jsonb_path_query_array('["12:30", "18:40"]', '$[*].datetime("HH24:MI")') - ["12:30:00", "18:40:00"] - - - - - - string . date() - date - - - Date value converted from a string - - - jsonb_path_query('"2023-08-15"', '$.date()') - "2023-08-15" - - - - - - string . time() - time without time zone - - - Time without time zone value converted from a string - - - jsonb_path_query('"12:34:56"', '$.time()') - "12:34:56" - - - - - - string . time(precision) - time without time zone - - - Time without time zone value converted from a string, with fractional - seconds adjusted to the given precision - - - jsonb_path_query('"12:34:56.789"', '$.time(2)') - "12:34:56.79" - - - - - - string . time_tz() - time with time zone - - - Time with time zone value converted from a string - - - jsonb_path_query('"12:34:56 +05:30"', '$.time_tz()') - "12:34:56+05:30" - - - - - - string . time_tz(precision) - time with time zone - - - Time with time zone value converted from a string, with fractional - seconds adjusted to the given precision - - - jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(2)') - "12:34:56.79+05:30" - - - - - - string . timestamp() - timestamp without time zone - - - Timestamp without time zone value converted from a string - - - jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp()') - "2023-08-15T12:34:56" - - - - - - string . timestamp(precision) - timestamp without time zone - - - Timestamp without time zone value converted from a string, with - fractional seconds adjusted to the given precision - - - jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(2)') - "2023-08-15T12:34:56.79" - - - - - - string . timestamp_tz() - timestamp with time zone - - - Timestamp with time zone value converted from a string - - - jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()') - "2023-08-15T12:34:56+05:30" - - - - - - string . timestamp_tz(precision) - timestamp with time zone - - - Timestamp with time zone value converted from a string, with fractional - seconds adjusted to the given precision - - - jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(2)') - "2023-08-15T12:34:56.79+05:30" - - - - - - object . keyvalue() - array - - - The object's key-value pairs, represented as an array of objects - containing three fields: "key", - "value", and "id"; - "id" is a unique identifier of the object the - key-value pair belongs to - - - jsonb_path_query_array('{"x": "20", "y": 32}', '$.keyvalue()') - [{"id": 0, "key": "x", "value": "20"}, {"id": 0, "key": "y", "value": 32}] - - - - -
- - - - The result type of the datetime() and - datetime(template) - methods can be date, timetz, time, - timestamptz, or timestamp. - Both methods determine their result type dynamically. - - - The datetime() method sequentially tries to - match its input string to the ISO formats - for date, timetz, time, - timestamptz, and timestamp. It stops on - the first matching format and emits the corresponding data type. - - - The datetime(template) - method determines the result type according to the fields used in the - provided template string. - - - The datetime() and - datetime(template) methods - use the same parsing rules as the to_timestamp SQL - function does (see ), with three - exceptions. First, these methods don't allow unmatched template - patterns. Second, only the following separators are allowed in the - template string: minus sign, period, solidus (slash), comma, apostrophe, - semicolon, colon and space. Third, separators in the template string - must exactly match the input string. - - - If different date/time types need to be compared, an implicit cast is - applied. A date value can be cast to timestamp - or timestamptz, timestamp can be cast to - timestamptz, and time to timetz. - However, all but the first of these conversions depend on the current - setting, and thus can only be performed - within timezone-aware jsonpath functions. Similarly, other - date/time-related methods that convert strings to date/time types - also do this casting, which may involve the current - setting. Therefore, these conversions can - also only be performed within timezone-aware jsonpath - functions. - - - - - shows the available - filter expression elements. - - - - <type>jsonpath</type> Filter Expression Elements - - - - - Predicate/Value - - - Description - - - Example(s) - - - - - - - - value == value - boolean - - - Equality comparison (this, and the other comparison operators, work on - all JSON scalar values) - - - jsonb_path_query_array('[1, "a", 1, 3]', '$[*] ? (@ == 1)') - [1, 1] - - - jsonb_path_query_array('[1, "a", 1, 3]', '$[*] ? (@ == "a")') - ["a"] - - - - - - value != value - boolean - - - value <> value - boolean - - - Non-equality comparison - - - jsonb_path_query_array('[1, 2, 1, 3]', '$[*] ? (@ != 1)') - [2, 3] - - - jsonb_path_query_array('["a", "b", "c"]', '$[*] ? (@ <> "b")') - ["a", "c"] - - - - - - value < value - boolean - - - Less-than comparison - - - jsonb_path_query_array('[1, 2, 3]', '$[*] ? (@ < 2)') - [1] - - - - - - value <= value - boolean - - - Less-than-or-equal-to comparison - - - jsonb_path_query_array('["a", "b", "c"]', '$[*] ? (@ <= "b")') - ["a", "b"] - - - - - - value > value - boolean - - - Greater-than comparison - - - jsonb_path_query_array('[1, 2, 3]', '$[*] ? (@ > 2)') - [3] - - - - - - value >= value - boolean - - - Greater-than-or-equal-to comparison - - - jsonb_path_query_array('[1, 2, 3]', '$[*] ? (@ >= 2)') - [2, 3] - - - - - - true - boolean - - - JSON constant true - - - jsonb_path_query('[{"name": "John", "parent": false}, {"name": "Chris", "parent": true}]', '$[*] ? (@.parent == true)') - {"name": "Chris", "parent": true} - - - - - - false - boolean - - - JSON constant false - - - jsonb_path_query('[{"name": "John", "parent": false}, {"name": "Chris", "parent": true}]', '$[*] ? (@.parent == false)') - {"name": "John", "parent": false} - - - - - - null - value - - - JSON constant null (note that, unlike in SQL, - comparison to null works normally) - - - jsonb_path_query('[{"name": "Mary", "job": null}, {"name": "Michael", "job": "driver"}]', '$[*] ? (@.job == null) .name') - "Mary" - - - - - - boolean && boolean - boolean - - - Boolean AND - - - jsonb_path_query('[1, 3, 7]', '$[*] ? (@ > 1 && @ < 5)') - 3 - - - - - - boolean || boolean - boolean - - - Boolean OR - - - jsonb_path_query('[1, 3, 7]', '$[*] ? (@ < 1 || @ > 5)') - 7 - - - - - - ! boolean - boolean - - - Boolean NOT - - - jsonb_path_query('[1, 3, 7]', '$[*] ? (!(@ < 5))') - 7 - - - - - - boolean is unknown - boolean - - - Tests whether a Boolean condition is unknown. - - - jsonb_path_query('[-1, 2, 7, "foo"]', '$[*] ? ((@ > 0) is unknown)') - "foo" - - - - - - string like_regex string flag string - boolean - - - Tests whether the first operand matches the regular expression - given by the second operand, optionally with modifications - described by a string of flag characters (see - ). - - - jsonb_path_query_array('["abc", "abd", "aBdC", "abdacb", "babc"]', '$[*] ? (@ like_regex "^ab.*c")') - ["abc", "abdacb"] - - - jsonb_path_query_array('["abc", "abd", "aBdC", "abdacb", "babc"]', '$[*] ? (@ like_regex "^ab.*c" flag "i")') - ["abc", "aBdC", "abdacb"] - - - - - - string starts with string - boolean - - - Tests whether the second operand is an initial substring of the first - operand. - - - jsonb_path_query('["John Smith", "Mary Stone", "Bob Johnson"]', '$[*] ? (@ starts with "John")') - "John Smith" - - - - - - exists ( path_expression ) - boolean - - - Tests whether a path expression matches at least one SQL/JSON item. - Returns unknown if the path expression would result - in an error; the second example uses this to avoid a no-such-key error - in strict mode. - - - jsonb_path_query('{"x": [1, 2], "y": [2, 4]}', 'strict $.* ? (exists (@ ? (@[*] > 2)))') - [2, 4] - - - jsonb_path_query_array('{"value": 41}', 'strict $ ? (exists (@.name)) .name') - [] - - - - -
- -
- - - SQL/JSON Regular Expressions - - - LIKE_REGEX - in SQL/JSON - - - - SQL/JSON path expressions allow matching text to a regular expression - with the like_regex filter. For example, the - following SQL/JSON path query would case-insensitively match all - strings in an array that start with an English vowel: - -$[*] ? (@ like_regex "^[aeiou]" flag "i") - - - - - The optional flag string may include one or more of - the characters - i for case-insensitive match, - m to allow ^ - and $ to match at newlines, - s to allow . to match a newline, - and q to quote the whole pattern (reducing the - behavior to a simple substring match). - - - - The SQL/JSON standard borrows its definition for regular expressions - from the LIKE_REGEX operator, which in turn uses the - XQuery standard. PostgreSQL does not currently support the - LIKE_REGEX operator. Therefore, - the like_regex filter is implemented using the - POSIX regular expression engine described in - . This leads to various minor - discrepancies from standard SQL/JSON behavior, which are cataloged in - . - Note, however, that the flag-letter incompatibilities described there - do not apply to SQL/JSON, as it translates the XQuery flag letters to - match what the POSIX engine expects. - - - - Keep in mind that the pattern argument of like_regex - is a JSON path string literal, written according to the rules given in - . This means in particular that any - backslashes you want to use in the regular expression must be doubled. - For example, to match string values of the root document that contain - only digits: - -$.* ? (@ like_regex "^\\d+$") - - - -
- - - SQL/JSON Query Functions - - SQL/JSON functions JSON_EXISTS(), - JSON_QUERY(), and JSON_VALUE() - described in can be used - to query JSON documents. Each of these functions apply a - path_expression (an SQL/JSON path query) to a - context_item (the document). See - for more details on what - the path_expression can contain. The - path_expression can also reference variables, - whose values are specified with their respective names in the - PASSING clause that is supported by each function. - context_item can be a jsonb value - or a character string that can be successfully cast to jsonb. - - - - SQL/JSON Query Functions - - - - - Function signature - - - Description - - - Example(s) - - - - - - - json_exists - -JSON_EXISTS ( -context_item, path_expression - PASSING { value AS varname } , ... -{ TRUE | FALSE | UNKNOWN | ERROR } ON ERROR ) boolean - - - - - - Returns true if the SQL/JSON path_expression - applied to the context_item yields any - items, false otherwise. - - - - - The ON ERROR clause specifies the behavior if - an error occurs during path_expression - evaluation. Specifying ERROR will cause an error to - be thrown with the appropriate message. Other options include - returning boolean values FALSE or - TRUE or the value UNKNOWN which - is actually an SQL NULL. The default when no ON ERROR - clause is specified is to return the boolean value - FALSE. - - - - - Examples: - - - JSON_EXISTS(jsonb '{"key1": [1,2,3]}', 'strict $.key1[*] ? (@ > $x)' PASSING 2 AS x) - t - - - JSON_EXISTS(jsonb '{"a": [1,2,3]}', 'lax $.a[5]' ERROR ON ERROR) - f - - - JSON_EXISTS(jsonb '{"a": [1,2,3]}', 'strict $.a[5]' ERROR ON ERROR) - - -ERROR: jsonpath array subscript is out of bounds - - - - - - json_query - -JSON_QUERY ( -context_item, path_expression - PASSING { value AS varname } , ... - RETURNING data_type FORMAT JSON ENCODING UTF8 - { WITHOUT | WITH { CONDITIONAL | UNCONDITIONAL } } ARRAY WRAPPER - { KEEP | OMIT } QUOTES ON SCALAR STRING - { ERROR | NULL | EMPTY { ARRAY | OBJECT } | DEFAULT expression } ON EMPTY - { ERROR | NULL | EMPTY { ARRAY | OBJECT } | DEFAULT expression } ON ERROR ) jsonb - - - - - - Returns the result of applying the SQL/JSON - path_expression to the - context_item. - - - - - By default, the result is returned as a value of type jsonb, - though the RETURNING clause can be used to return - as some other type to which it can be successfully coerced. - - - - - If the path expression may return multiple values, it might be necessary - to wrap those values using the WITH WRAPPER clause to - make it a valid JSON string, because the default behavior is to not wrap - them, as if WITHOUT WRAPPER were specified. The - WITH WRAPPER clause is by default taken to mean - WITH UNCONDITIONAL WRAPPER, which means that even a - single result value will be wrapped. To apply the wrapper only when - multiple values are present, specify WITH CONDITIONAL WRAPPER. - Getting multiple values in result will be treated as an error if - WITHOUT WRAPPER is specified. - - - - - If the result is a scalar string, by default, the returned value will - be surrounded by quotes, making it a valid JSON value. It can be made - explicit by specifying KEEP QUOTES. Conversely, - quotes can be omitted by specifying OMIT QUOTES. - To ensure that the result is a valid JSON value, OMIT QUOTES - cannot be specified when WITH WRAPPER is also - specified. - - - - - The ON EMPTY clause specifies the behavior if - evaluating path_expression yields an empty - set. The ON ERROR clause specifies the behavior - if an error occurs when evaluating path_expression, - when coercing the result value to the RETURNING type, - or when evaluating the ON EMPTY expression if the - path_expression evaluation returns an empty - set. - - - - - For both ON EMPTY and ON ERROR, - specifying ERROR will cause an error to be thrown with - the appropriate message. Other options include returning an SQL NULL, an - empty array (EMPTY ARRAY), - an empty object (EMPTY OBJECT), or a user-specified - expression (DEFAULT expression) - that can be coerced to jsonb or the type specified in RETURNING. - The default when ON EMPTY or ON ERROR - is not specified is to return an SQL NULL value. - - - - - Examples: - - - JSON_QUERY(jsonb '[1,[2,3],null]', 'lax $[*][$off]' PASSING 1 AS off WITH CONDITIONAL WRAPPER) - 3 - - - JSON_QUERY(jsonb '{"a": "[1, 2]"}', 'lax $.a' OMIT QUOTES) - [1, 2] - - - JSON_QUERY(jsonb '{"a": "[1, 2]"}', 'lax $.a' RETURNING int[] OMIT QUOTES ERROR ON ERROR) - - -ERROR: malformed array literal: "[1, 2]" -DETAIL: Missing "]" after array dimensions. - - - - - - - json_value - -JSON_VALUE ( -context_item, path_expression - PASSING { value AS varname } , ... - RETURNING data_type - { ERROR | NULL | DEFAULT expression } ON EMPTY - { ERROR | NULL | DEFAULT expression } ON ERROR ) text - - - - - - Returns the result of applying the SQL/JSON - path_expression to the - context_item. - - - - - Only use JSON_VALUE() if the extracted value is - expected to be a single SQL/JSON scalar item; - getting multiple values will be treated as an error. If you expect that - extracted value might be an object or an array, use the - JSON_QUERY function instead. - - - - - By default, the result, which must be a single scalar value, is - returned as a value of type text, though the - RETURNING clause can be used to return as some - other type to which it can be successfully coerced. - - - - - The ON ERROR and ON EMPTY - clauses have similar semantics as mentioned in the description of - JSON_QUERY, except the set of values returned in - lieu of throwing an error is different. - - - - - Note that scalar strings returned by JSON_VALUE - always have their quotes removed, equivalent to specifying - OMIT QUOTES in JSON_QUERY. - - - - - Examples: - - - JSON_VALUE(jsonb '"123.45"', '$' RETURNING float) - 123.45 - - - JSON_VALUE(jsonb '"03:04 2015-02-01"', '$.datetime("HH24:MI YYYY-MM-DD")' RETURNING date) - 2015-02-01 - - - JSON_VALUE(jsonb '[1,2]', 'strict $[$off]' PASSING 1 as off) - 2 - - - JSON_VALUE(jsonb '[1,2]', 'strict $[*]' DEFAULT 9 ON ERROR) - 9 - - - - - -
- - - The context_item expression is converted to - jsonb by an implicit cast if the expression is not already of - type jsonb. Note, however, that any parsing errors that occur - during that conversion are thrown unconditionally, that is, are not - handled according to the (specified or implicit) ON ERROR - clause. - - - - - JSON_VALUE() returns an SQL NULL if - path_expression returns a JSON - null, whereas JSON_QUERY() returns - the JSON null as is. - - -
- - - JSON_TABLE - - json_table - - - - JSON_TABLE is an SQL/JSON function which - queries JSON data - and presents the results as a relational view, which can be accessed as a - regular SQL table. You can use JSON_TABLE inside - the FROM clause of a SELECT, - UPDATE, or DELETE and as data source - in a MERGE statement. - - - - Taking JSON data as input, JSON_TABLE uses a JSON path - expression to extract a part of the provided data to use as a - row pattern for the constructed view. Each SQL/JSON - value given by the row pattern serves as source for a separate row in the - constructed view. - - - - To split the row pattern into columns, JSON_TABLE - provides the COLUMNS clause that defines the - schema of the created view. For each column, a separate JSON path expression - can be specified to be evaluated against the row pattern to get an SQL/JSON - value that will become the value for the specified column in a given output - row. - - - - JSON data stored at a nested level of the row pattern can be extracted using - the NESTED PATH clause. Each - NESTED PATH clause can be used to generate one or more - columns using the data from a nested level of the row pattern. Those - columns can be specified using a COLUMNS clause that - looks similar to the top-level COLUMNS clause. Rows constructed from - NESTED COLUMNS are called child rows and are joined - against the row constructed from the columns specified in the parent - COLUMNS clause to get the row in the final view. Child - columns themselves may contain a NESTED PATH - specification thus allowing to extract data located at arbitrary nesting - levels. Columns produced by multiple NESTED PATHs at the - same level are considered to be siblings of each - other and their rows after joining with the parent row are combined using - UNION. - - - - The rows produced by JSON_TABLE are laterally - joined to the row that generated them, so you do not have to explicitly join - the constructed view with the original table holding JSON - data. - - - - The syntax is: - - - -JSON_TABLE ( - context_item, path_expression AS json_path_name PASSING { value AS varname } , ... - COLUMNS ( json_table_column , ... ) - { ERROR | EMPTY ARRAY} ON ERROR -) - - -where json_table_column is: - - name FOR ORDINALITY - | name type - FORMAT JSON ENCODING UTF8 - PATH path_expression - { WITHOUT | WITH { CONDITIONAL | UNCONDITIONAL } } ARRAY WRAPPER - { KEEP | OMIT } QUOTES ON SCALAR STRING - { ERROR | NULL | EMPTY { ARRAY | OBJECT } | DEFAULT expression } ON EMPTY - { ERROR | NULL | EMPTY { ARRAY | OBJECT } | DEFAULT expression } ON ERROR - | name type EXISTS PATH path_expression - { ERROR | TRUE | FALSE | UNKNOWN } ON ERROR - | NESTED PATH path_expression AS json_path_name COLUMNS ( json_table_column , ... ) - - - - Each syntax element is described below in more detail. - - - - - - context_item, path_expression AS json_path_name PASSING { value AS varname } , ... - - - - The context_item specifies the input document - to query, the path_expression is an SQL/JSON - path expression defining the query, and json_path_name - is an optional name for the path_expression. - The optional PASSING clause provides data values for - the variables mentioned in the path_expression. - The result of the input data evaluation using the aforementioned elements - is called the row pattern, which is used as the - source for row values in the constructed view. - - - - - - - COLUMNS ( json_table_column , ... ) - - - - - The COLUMNS clause defining the schema of the - constructed view. In this clause, you can specify each column to be - filled with an SQL/JSON value obtained by applying a JSON path expression - against the row pattern. json_table_column has - the following variants: - - - - - - name FOR ORDINALITY - - - - Adds an ordinality column that provides sequential row numbering starting - from 1. Each NESTED PATH (see below) gets its own - counter for any nested ordinality columns. - - - - - - - name type - FORMAT JSON ENCODING UTF8 - PATH path_expression - - - - Inserts an SQL/JSON value obtained by applying - path_expression against the row pattern into - the view's output row after coercing it to specified - type. - - - Specifying FORMAT JSON makes it explicit that you - expect the value to be a valid json object. It only - makes sense to specify FORMAT JSON if - type is one of bpchar, - bytea, character varying, name, - json, jsonb, text, or a domain over - these types. - - - Optionally, you can specify WRAPPER and - QUOTES clauses to format the output. Note that - specifying OMIT QUOTES overrides - FORMAT JSON if also specified, because unquoted - literals do not constitute valid json values. - - - Optionally, you can use ON EMPTY and - ON ERROR clauses to specify whether to throw the error - or return the specified value when the result of JSON path evaluation is - empty and when an error occurs during JSON path evaluation or when - coercing the SQL/JSON value to the specified type, respectively. The - default for both is to return a NULL value. - - - - This clause is internally turned into and has the same semantics as - JSON_VALUE or JSON_QUERY. - The latter if the specified type is not a scalar type or if either of - FORMAT JSON, WRAPPER, or - QUOTES clause is present. - - - - - - - - name type - EXISTS PATH path_expression - - - - Inserts a boolean value obtained by applying - path_expression against the row pattern - into the view's output row after coercing it to specified - type. - - - The value corresponds to whether applying the PATH - expression to the row pattern yields any values. - - - The specified type should have a cast from the - boolean type. - - - Optionally, you can use ON ERROR to specify whether to - throw the error or return the specified value when an error occurs during - JSON path evaluation or when coercing SQL/JSON value to the specified - type. The default is to return a boolean value - FALSE. - - - - This clause is internally turned into and has the same semantics as - JSON_EXISTS. - - - - - - - - NESTED PATH path_expression AS json_path_name - COLUMNS ( json_table_column , ... ) - - - - - Extracts SQL/JSON values from nested levels of the row pattern, - generates one or more columns as defined by the COLUMNS - subclause, and inserts the extracted SQL/JSON values into those - columns. The json_table_column - expression in the COLUMNS subclause uses the same - syntax as in the parent COLUMNS clause. - - - - The NESTED PATH syntax is recursive, - so you can go down multiple nested levels by specifying several - NESTED PATH subclauses within each other. - It allows to unnest the hierarchy of JSON objects and arrays - in a single function invocation rather than chaining several - JSON_TABLE expressions in an SQL statement. - - - - - - - - In each variant of json_table_column described - above, if the PATH clause is omitted, path expression - $.name is used, where - name is the provided column name. - - - - - - - - - AS json_path_name - - - - - The optional json_path_name serves as an - identifier of the provided path_expression. - The name must be unique and distinct from the column names. - - - - - - - { ERROR | EMPTY } ON ERROR - - - - - The optional ON ERROR can be used to specify how to - handle errors when evaluating the top-level - path_expression. Use ERROR - if you want the errors to be thrown and EMPTY to - return an empty table, that is, a table containing 0 rows. Note that - this clause does not affect the errors that occur when evaluating - columns, for which the behavior depends on whether the - ON ERROR clause is specified against a given column. - - - - - - Examples - - - In the examples that follow, the following table containing JSON data - will be used: - - -CREATE TABLE my_films ( js jsonb ); - -INSERT INTO my_films VALUES ( -'{ "favorites" : [ - { "kind" : "comedy", "films" : [ - { "title" : "Bananas", - "director" : "Woody Allen"}, - { "title" : "The Dinner Game", - "director" : "Francis Veber" } ] }, - { "kind" : "horror", "films" : [ - { "title" : "Psycho", - "director" : "Alfred Hitchcock" } ] }, - { "kind" : "thriller", "films" : [ - { "title" : "Vertigo", - "director" : "Alfred Hitchcock" } ] }, - { "kind" : "drama", "films" : [ - { "title" : "Yojimbo", - "director" : "Akira Kurosawa" } ] } - ] }'); - - - - - The following query shows how to use JSON_TABLE to - turn the JSON objects in the my_films table - to a view containing columns for the keys kind, - title, and director contained in - the original JSON along with an ordinality column: - - -SELECT jt.* FROM - my_films, - JSON_TABLE (js, '$.favorites[*]' COLUMNS ( - id FOR ORDINALITY, - kind text PATH '$.kind', - title text PATH '$.films[*].title' WITH WRAPPER, - director text PATH '$.films[*].director' WITH WRAPPER)) AS jt; - - - - id | kind | title | director -----+----------+--------------------------------+---------------------------------- - 1 | comedy | ["Bananas", "The Dinner Game"] | ["Woody Allen", "Francis Veber"] - 2 | horror | ["Psycho"] | ["Alfred Hitchcock"] - 3 | thriller | ["Vertigo"] | ["Alfred Hitchcock"] - 4 | drama | ["Yojimbo"] | ["Akira Kurosawa"] -(4 rows) - - - - - The following is a modified version of the above query to show the - usage of PASSING arguments in the filter specified in - the top-level JSON path expression and the various options for the - individual columns: - - -SELECT jt.* FROM - my_films, - JSON_TABLE (js, '$.favorites[*] ? (@.films[*].director == $filter)' - PASSING 'Alfred Hitchcock' AS filter - COLUMNS ( - id FOR ORDINALITY, - kind text PATH '$.kind', - title text FORMAT JSON PATH '$.films[*].title' OMIT QUOTES, - director text PATH '$.films[*].director' KEEP QUOTES)) AS jt; - - - - id | kind | title | director -----+----------+---------+-------------------- - 1 | horror | Psycho | "Alfred Hitchcock" - 2 | thriller | Vertigo | "Alfred Hitchcock" -(2 rows) - - - - - The following is a modified version of the above query to show the usage - of NESTED PATH for populating title and director - columns, illustrating how they are joined to the parent columns id and - kind: - - -SELECT jt.* FROM - my_films, - JSON_TABLE ( js, '$.favorites[*] ? (@.films[*].director == $filter)' - PASSING 'Alfred Hitchcock' AS filter - COLUMNS ( - id FOR ORDINALITY, - kind text PATH '$.kind', - NESTED PATH '$.films[*]' COLUMNS ( - title text FORMAT JSON PATH '$.title' OMIT QUOTES, - director text PATH '$.director' KEEP QUOTES))) AS jt; - - - - id | kind | title | director -----+----------+---------+-------------------- - 1 | horror | Psycho | "Alfred Hitchcock" - 2 | thriller | Vertigo | "Alfred Hitchcock" -(2 rows) - - - - - - The following is the same query but without the filter in the root - path: - - -SELECT jt.* FROM - my_films, - JSON_TABLE ( js, '$.favorites[*]' - COLUMNS ( - id FOR ORDINALITY, - kind text PATH '$.kind', - NESTED PATH '$.films[*]' COLUMNS ( - title text FORMAT JSON PATH '$.title' OMIT QUOTES, - director text PATH '$.director' KEEP QUOTES))) AS jt; - - - - id | kind | title | director -----+----------+-----------------+-------------------- - 1 | comedy | Bananas | "Woody Allen" - 1 | comedy | The Dinner Game | "Francis Veber" - 2 | horror | Psycho | "Alfred Hitchcock" - 3 | thriller | Vertigo | "Alfred Hitchcock" - 4 | drama | Yojimbo | "Akira Kurosawa" -(5 rows) - - - - - - The following shows another query using a different JSON - object as input. It shows the UNION "sibling join" between - NESTED paths $.movies[*] and - $.books[*] and also the usage of - FOR ORDINALITY column at NESTED - levels (columns movie_id, book_id, - and author_id): - - -SELECT * FROM JSON_TABLE ( -'{"favorites": - [{"movies": - [{"name": "One", "director": "John Doe"}, - {"name": "Two", "director": "Don Joe"}], - "books": - [{"name": "Mystery", "authors": [{"name": "Brown Dan"}]}, - {"name": "Wonder", "authors": [{"name": "Jun Murakami"}, {"name":"Craig Doe"}]}] -}]}'::json, '$.favorites[*]' -COLUMNS ( - user_id FOR ORDINALITY, - NESTED '$.movies[*]' - COLUMNS ( - movie_id FOR ORDINALITY, - mname text PATH '$.name', - director text), - NESTED '$.books[*]' - COLUMNS ( - book_id FOR ORDINALITY, - bname text PATH '$.name', - NESTED '$.authors[*]' - COLUMNS ( - author_id FOR ORDINALITY, - author_name text PATH '$.name')))); - - - - user_id | movie_id | mname | director | book_id | bname | author_id | author_name ----------+----------+-------+----------+---------+---------+-----------+-------------- - 1 | 1 | One | John Doe | | | | - 1 | 2 | Two | Don Joe | | | | - 1 | | | | 1 | Mystery | 1 | Brown Dan - 1 | | | | 2 | Wonder | 1 | Jun Murakami - 1 | | | | 2 | Wonder | 2 | Craig Doe -(5 rows) - - - - -
- - - Sequence Manipulation Functions - - - sequence - - - - This section describes functions for operating on sequence - objects, also called sequence generators or just sequences. - Sequence objects are special single-row tables created with . - Sequence objects are commonly used to generate unique identifiers - for rows of a table. The sequence functions, listed in , provide simple, multiuser-safe - methods for obtaining successive sequence values from sequence - objects. - - - - Sequence Functions - - - - - Function - - - Description - - - - - - - - - nextval - - nextval ( regclass ) - bigint - - - Advances the sequence object to its next value and returns that value. - This is done atomically: even if multiple sessions - execute nextval concurrently, each will safely - receive a distinct sequence value. - If the sequence object has been created with default parameters, - successive nextval calls will return successive - values beginning with 1. Other behaviors can be obtained by using - appropriate parameters in the - command. - - - This function requires USAGE - or UPDATE privilege on the sequence. - - - - - - - setval - - setval ( regclass, bigint , boolean ) - bigint - - - Sets the sequence object's current value, and optionally - its is_called flag. The two-parameter - form sets the sequence's last_value field to the - specified value and sets its is_called field to - true, meaning that the next - nextval will advance the sequence before - returning a value. The value that will be reported - by currval is also set to the specified value. - In the three-parameter form, is_called can be set - to either true - or false. true has the same - effect as the two-parameter form. If it is set - to false, the next nextval - will return exactly the specified value, and sequence advancement - commences with the following nextval. - Furthermore, the value reported by currval is not - changed in this case. For example, - -SELECT setval('myseq', 42); Next nextval will return 43 -SELECT setval('myseq', 42, true); Same as above -SELECT setval('myseq', 42, false); Next nextval will return 42 - - The result returned by setval is just the value of its - second argument. - - - This function requires UPDATE privilege on the - sequence. - - - - - - - currval - - currval ( regclass ) - bigint - - - Returns the value most recently obtained - by nextval for this sequence in the current - session. (An error is reported if nextval has - never been called for this sequence in this session.) Because this is - returning a session-local value, it gives a predictable answer whether - or not other sessions have executed nextval since - the current session did. - - - This function requires USAGE - or SELECT privilege on the sequence. - - - - - - - lastval - - lastval () - bigint - - - Returns the value most recently returned by - nextval in the current session. This function is - identical to currval, except that instead - of taking the sequence name as an argument it refers to whichever - sequence nextval was most recently applied to - in the current session. It is an error to call - lastval if nextval - has not yet been called in the current session. - - - This function requires USAGE - or SELECT privilege on the last used sequence. - - - - -
- - - - To avoid blocking concurrent transactions that obtain numbers from - the same sequence, the value obtained by nextval - is not reclaimed for re-use if the calling transaction later aborts. - This means that transaction aborts or database crashes can result in - gaps in the sequence of assigned values. That can happen without a - transaction abort, too. For example an INSERT with - an ON CONFLICT clause will compute the to-be-inserted - tuple, including doing any required nextval - calls, before detecting any conflict that would cause it to follow - the ON CONFLICT rule instead. - Thus, PostgreSQL sequence - objects cannot be used to obtain gapless - sequences. - - - - Likewise, sequence state changes made by setval - are immediately visible to other transactions, and are not undone if - the calling transaction rolls back. - - - - If the database cluster crashes before committing a transaction - containing a nextval - or setval call, the sequence state change might - not have made its way to persistent storage, so that it is uncertain - whether the sequence will have its original or updated state after the - cluster restarts. This is harmless for usage of the sequence within - the database, since other effects of uncommitted transactions will not - be visible either. However, if you wish to use a sequence value for - persistent outside-the-database purposes, make sure that the - nextval call has been committed before doing so. - - - - - The sequence to be operated on by a sequence function is specified by - a regclass argument, which is simply the OID of the sequence in the - pg_class system catalog. You do not have to look up the - OID by hand, however, since the regclass data type's input - converter will do the work for you. See - for details. - -
- - - - Conditional Expressions - - - CASE - - - - conditional expression - - - - This section describes the SQL-compliant conditional expressions - available in PostgreSQL. - - - - - If your needs go beyond the capabilities of these conditional - expressions, you might want to consider writing a server-side function - in a more expressive programming language. - - - - - - Although COALESCE, GREATEST, and - LEAST are syntactically similar to functions, they are - not ordinary functions, and thus cannot be used with explicit - VARIADIC array arguments. - - - - - <literal>CASE</literal> - - - The SQL CASE expression is a - generic conditional expression, similar to if/else statements in - other programming languages: - - -CASE WHEN condition THEN result - WHEN ... - ELSE result -END - - - CASE clauses can be used wherever - an expression is valid. Each condition is an - expression that returns a boolean result. If the condition's - result is true, the value of the CASE expression is the - result that follows the condition, and the - remainder of the CASE expression is not processed. If the - condition's result is not true, any subsequent WHEN clauses - are examined in the same manner. If no WHEN - condition yields true, the value of the - CASE expression is the result of the - ELSE clause. If the ELSE clause is - omitted and no condition is true, the result is null. - - - - An example: - -SELECT * FROM test; - - a ---- - 1 - 2 - 3 - - -SELECT a, - CASE WHEN a=1 THEN 'one' - WHEN a=2 THEN 'two' - ELSE 'other' - END - FROM test; - - a | case ----+------- - 1 | one - 2 | two - 3 | other - - - - - The data types of all the result - expressions must be convertible to a single output type. - See for more details. - - - - There is a simple form of CASE expression - that is a variant of the general form above: - - -CASE expression - WHEN value THEN result - WHEN ... - ELSE result -END - - - The first - expression is computed, then compared to - each of the value expressions in the - WHEN clauses until one is found that is equal to it. If - no match is found, the result of the - ELSE clause (or a null value) is returned. This is similar - to the switch statement in C. - - - - The example above can be written using the simple - CASE syntax: - -SELECT a, - CASE a WHEN 1 THEN 'one' - WHEN 2 THEN 'two' - ELSE 'other' - END - FROM test; - - a | case ----+------- - 1 | one - 2 | two - 3 | other - - - - - A CASE expression does not evaluate any subexpressions - that are not needed to determine the result. For example, this is a - possible way of avoiding a division-by-zero failure: - -SELECT ... WHERE CASE WHEN x <> 0 THEN y/x > 1.5 ELSE false END; - - - - - - As described in , there are various - situations in which subexpressions of an expression are evaluated at - different times, so that the principle that CASE - evaluates only necessary subexpressions is not ironclad. For - example a constant 1/0 subexpression will usually result in - a division-by-zero failure at planning time, even if it's within - a CASE arm that would never be entered at run time. - - - - - - <literal>COALESCE</literal> - - - COALESCE - - - - NVL - - - - IFNULL - - - -COALESCE(value , ...) - - - - The COALESCE function returns the first of its - arguments that is not null. Null is returned only if all arguments - are null. It is often used to substitute a default value for - null values when data is retrieved for display, for example: - -SELECT COALESCE(description, short_description, '(none)') ... - - This returns description if it is not null, otherwise - short_description if it is not null, otherwise (none). - - - - The arguments must all be convertible to a common data type, which - will be the type of the result (see - for details). - - - - Like a CASE expression, COALESCE only - evaluates the arguments that are needed to determine the result; - that is, arguments to the right of the first non-null argument are - not evaluated. This SQL-standard function provides capabilities similar - to NVL and IFNULL, which are used in some other - database systems. - - - - - <literal>NULLIF</literal> - - - NULLIF - - - -NULLIF(value1, value2) - - - - The NULLIF function returns a null value if - value1 equals value2; - otherwise it returns value1. - This can be used to perform the inverse operation of the - COALESCE example given above: - -SELECT NULLIF(value, '(none)') ... - - In this example, if value is (none), - null is returned, otherwise the value of value - is returned. - - - - The two arguments must be of comparable types. - To be specific, they are compared exactly as if you had - written value1 - = value2, so there must be a - suitable = operator available. - - - - The result has the same type as the first argument — but there is - a subtlety. What is actually returned is the first argument of the - implied = operator, and in some cases that will have - been promoted to match the second argument's type. For - example, NULLIF(1, 2.2) yields numeric, - because there is no integer = - numeric operator, - only numeric = numeric. - - - - - - <literal>GREATEST</literal> and <literal>LEAST</literal> - - - GREATEST - - - LEAST - - - -GREATEST(value , ...) - - -LEAST(value , ...) - - - - The GREATEST and LEAST functions select the - largest or smallest value from a list of any number of expressions. - The expressions must all be convertible to a common data type, which - will be the type of the result - (see for details). - - - - NULL values in the argument list are ignored. The result will be NULL - only if all the expressions evaluate to NULL. (This is a deviation from - the SQL standard. According to the standard, the return value is NULL if - any argument is NULL. Some other databases behave this way.) - - - - - - Array Functions and Operators - - - shows the specialized operators - available for array types. - In addition to those, the usual comparison operators shown in are available for - arrays. The comparison operators compare the array contents - element-by-element, using the default B-tree comparison function for - the element data type, and sort based on the first difference. - In multidimensional arrays the elements are visited in row-major order - (last subscript varies most rapidly). - If the contents of two arrays are equal but the dimensionality is - different, the first difference in the dimensionality information - determines the sort order. - - - - Array Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - anyarray @> anyarray - boolean - - - Does the first array contain the second, that is, does each element - appearing in the second array equal some element of the first array? - (Duplicates are not treated specially, - thus ARRAY[1] and ARRAY[1,1] are - each considered to contain the other.) - - - ARRAY[1,4,3] @> ARRAY[3,1,3] - t - - - - - - anyarray <@ anyarray - boolean - - - Is the first array contained by the second? - - - ARRAY[2,2,7] <@ ARRAY[1,7,4,2,6] - t - - - - - - anyarray && anyarray - boolean - - - Do the arrays overlap, that is, have any elements in common? - - - ARRAY[1,4,3] && ARRAY[2,1] - t - - - - - - anycompatiblearray || anycompatiblearray - anycompatiblearray - - - Concatenates the two arrays. Concatenating a null or empty array is a - no-op; otherwise the arrays must have the same number of dimensions - (as illustrated by the first example) or differ in number of - dimensions by one (as illustrated by the second). - If the arrays are not of identical element types, they will be coerced - to a common type (see ). - - - ARRAY[1,2,3] || ARRAY[4,5,6,7] - {1,2,3,4,5,6,7} - - - ARRAY[1,2,3] || ARRAY[[4,5,6],[7,8,9.9]] - {{1,2,3},{4,5,6},{7,8,9.9}} - - - - - - anycompatible || anycompatiblearray - anycompatiblearray - - - Concatenates an element onto the front of an array (which must be - empty or one-dimensional). - - - 3 || ARRAY[4,5,6] - {3,4,5,6} - - - - - - anycompatiblearray || anycompatible - anycompatiblearray - - - Concatenates an element onto the end of an array (which must be - empty or one-dimensional). - - - ARRAY[4,5,6] || 7 - {4,5,6,7} - - - - -
- - - See for more details about array operator - behavior. See for more details about - which operators support indexed operations. - - - - shows the functions - available for use with array types. See - for more information and examples of the use of these functions. - - - - Array Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - array_append - - array_append ( anycompatiblearray, anycompatible ) - anycompatiblearray - - - Appends an element to the end of an array (same as - the anycompatiblearray || anycompatible - operator). - - - array_append(ARRAY[1,2], 3) - {1,2,3} - - - - - - - array_cat - - array_cat ( anycompatiblearray, anycompatiblearray ) - anycompatiblearray - - - Concatenates two arrays (same as - the anycompatiblearray || anycompatiblearray - operator). - - - array_cat(ARRAY[1,2,3], ARRAY[4,5]) - {1,2,3,4,5} - - - - - - - array_dims - - array_dims ( anyarray ) - text - - - Returns a text representation of the array's dimensions. - - - array_dims(ARRAY[[1,2,3], [4,5,6]]) - [1:2][1:3] - - - - - - - array_fill - - array_fill ( anyelement, integer[] - , integer[] ) - anyarray - - - Returns an array filled with copies of the given value, having - dimensions of the lengths specified by the second argument. - The optional third argument supplies lower-bound values for each - dimension (which default to all 1). - - - array_fill(11, ARRAY[2,3]) - {{11,11,11},{11,11,11}} - - - array_fill(7, ARRAY[3], ARRAY[2]) - [2:4]={7,7,7} - - - - - - - array_length - - array_length ( anyarray, integer ) - integer - - - Returns the length of the requested array dimension. - (Produces NULL instead of 0 for empty or missing array dimensions.) - - - array_length(array[1,2,3], 1) - 3 - - - array_length(array[]::int[], 1) - NULL - - - array_length(array['text'], 2) - NULL - - - - - - - array_lower - - array_lower ( anyarray, integer ) - integer - - - Returns the lower bound of the requested array dimension. - - - array_lower('[0:2]={1,2,3}'::integer[], 1) - 0 - - - - - - - array_ndims - - array_ndims ( anyarray ) - integer - - - Returns the number of dimensions of the array. - - - array_ndims(ARRAY[[1,2,3], [4,5,6]]) - 2 - - - - - - - array_position - - array_position ( anycompatiblearray, anycompatible , integer ) - integer - - - Returns the subscript of the first occurrence of the second argument - in the array, or NULL if it's not present. - If the third argument is given, the search begins at that subscript. - The array must be one-dimensional. - Comparisons are done using IS NOT DISTINCT FROM - semantics, so it is possible to search for NULL. - - - array_position(ARRAY['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'], 'mon') - 2 - - - - - - - array_positions - - array_positions ( anycompatiblearray, anycompatible ) - integer[] - - - Returns an array of the subscripts of all occurrences of the second - argument in the array given as first argument. - The array must be one-dimensional. - Comparisons are done using IS NOT DISTINCT FROM - semantics, so it is possible to search for NULL. - NULL is returned only if the array - is NULL; if the value is not found in the array, an - empty array is returned. - - - array_positions(ARRAY['A','A','B','A'], 'A') - {1,2,4} - - - - - - - array_prepend - - array_prepend ( anycompatible, anycompatiblearray ) - anycompatiblearray - - - Prepends an element to the beginning of an array (same as - the anycompatible || anycompatiblearray - operator). - - - array_prepend(1, ARRAY[2,3]) - {1,2,3} - - - - - - - array_remove - - array_remove ( anycompatiblearray, anycompatible ) - anycompatiblearray - - - Removes all elements equal to the given value from the array. - The array must be one-dimensional. - Comparisons are done using IS NOT DISTINCT FROM - semantics, so it is possible to remove NULLs. - - - array_remove(ARRAY[1,2,3,2], 2) - {1,3} - - - - - - - array_replace - - array_replace ( anycompatiblearray, anycompatible, anycompatible ) - anycompatiblearray - - - Replaces each array element equal to the second argument with the - third argument. - - - array_replace(ARRAY[1,2,5,4], 5, 3) - {1,2,3,4} - - - - - - - array_reverse - - array_reverse ( anyarray ) - anyarray - - - Reverses the first dimension of the array. - - - array_reverse(ARRAY[[1,2],[3,4],[5,6]]) - {{5,6},{3,4},{1,2}} - - - - - - - array_sample - - array_sample ( array anyarray, n integer ) - anyarray - - - Returns an array of n items randomly selected - from array. n may not - exceed the length of array's first dimension. - If array is multi-dimensional, - an item is a slice having a given first subscript. - - - array_sample(ARRAY[1,2,3,4,5,6], 3) - {2,6,1} - - - array_sample(ARRAY[[1,2],[3,4],[5,6]], 2) - {{5,6},{1,2}} - - - - - - - array_shuffle - - array_shuffle ( anyarray ) - anyarray - - - Randomly shuffles the first dimension of the array. - - - array_shuffle(ARRAY[[1,2],[3,4],[5,6]]) - {{5,6},{1,2},{3,4}} - - - - - - - array_sort - - array_sort ( - array anyarray - , descending boolean - , nulls_first boolean - ) - anyarray - - - Sorts the first dimension of the array. - The sort order is determined by the default sort ordering of the - array's element type; however, if the element type is collatable, - the collation to use can be specified by adding - a COLLATE clause to - the array argument. - - - If descending is true then sort in - descending order, otherwise ascending order. If omitted, the - default is ascending order. - If nulls_first is true then nulls appear - before non-null values, otherwise nulls appear after non-null - values. - If omitted, nulls_first is taken to have - the same value as descending. - - - array_sort(ARRAY[[2,4],[2,1],[6,5]]) - {{2,1},{2,4},{6,5}} - - - - - - - array_to_string - - array_to_string ( array anyarray, delimiter text , null_string text ) - text - - - Converts each array element to its text representation, and - concatenates those separated by - the delimiter string. - If null_string is given and is - not NULL, then NULL array - entries are represented by that string; otherwise, they are omitted. - See also string_to_array. - - - array_to_string(ARRAY[1, 2, 3, NULL, 5], ',', '*') - 1,2,3,*,5 - - - - - - - array_upper - - array_upper ( anyarray, integer ) - integer - - - Returns the upper bound of the requested array dimension. - - - array_upper(ARRAY[1,8,3,7], 1) - 4 - - - - - - - cardinality - - cardinality ( anyarray ) - integer - - - Returns the total number of elements in the array, or 0 if the array - is empty. - - - cardinality(ARRAY[[1,2],[3,4]]) - 4 - - - - - - - trim_array - - trim_array ( array anyarray, n integer ) - anyarray - - - Trims an array by removing the last n elements. - If the array is multidimensional, only the first dimension is trimmed. - - - trim_array(ARRAY[1,2,3,4,5,6], 2) - {1,2,3,4} - - - - - - - unnest - - unnest ( anyarray ) - setof anyelement - - - Expands an array into a set of rows. - The array's elements are read out in storage order. - - - unnest(ARRAY[1,2]) - - - 1 - 2 - - - - unnest(ARRAY[['foo','bar'],['baz','quux']]) - - - foo - bar - baz - quux - - - - - - - unnest ( anyarray, anyarray , ... ) - setof anyelement, anyelement [, ... ] - - - Expands multiple arrays (possibly of different data types) into a set of - rows. If the arrays are not all the same length then the shorter ones - are padded with NULLs. This form is only allowed - in a query's FROM clause; see . - - - select * from unnest(ARRAY[1,2], ARRAY['foo','bar','baz']) as x(a,b) - - - a | b ----+----- - 1 | foo - 2 | bar - | baz - - - - - -
- - - See also about the aggregate - function array_agg for use with arrays. - -
- - - Range/Multirange Functions and Operators - - - See for an overview of range types. - - - - shows the specialized operators - available for range types. - shows the specialized operators - available for multirange types. - In addition to those, the usual comparison operators shown in - are available for range - and multirange types. The comparison operators order first by the range lower - bounds, and only if those are equal do they compare the upper bounds. The - multirange operators compare each range until one is unequal. This - does not usually result in a useful overall ordering, but the operators are - provided to allow unique indexes to be constructed on ranges. - - - - Range Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - anyrange @> anyrange - boolean - - - Does the first range contain the second? - - - int4range(2,4) @> int4range(2,3) - t - - - - - - anyrange @> anyelement - boolean - - - Does the range contain the element? - - - '[2011-01-01,2011-03-01)'::tsrange @> '2011-01-10'::timestamp - t - - - - - - anyrange <@ anyrange - boolean - - - Is the first range contained by the second? - - - int4range(2,4) <@ int4range(1,7) - t - - - - - - anyelement <@ anyrange - boolean - - - Is the element contained in the range? - - - 42 <@ int4range(1,7) - f - - - - - - anyrange && anyrange - boolean - - - Do the ranges overlap, that is, have any elements in common? - - - int8range(3,7) && int8range(4,12) - t - - - - - - anyrange << anyrange - boolean - - - Is the first range strictly left of the second? - - - int8range(1,10) << int8range(100,110) - t - - - - - - anyrange >> anyrange - boolean - - - Is the first range strictly right of the second? - - - int8range(50,60) >> int8range(20,30) - t - - - - - - anyrange &< anyrange - boolean - - - Does the first range not extend to the right of the second? - - - int8range(1,20) &< int8range(18,20) - t - - - - - - anyrange &> anyrange - boolean - - - Does the first range not extend to the left of the second? - - - int8range(7,20) &> int8range(5,10) - t - - - - - - anyrange -|- anyrange - boolean - - - Are the ranges adjacent? - - - numrange(1.1,2.2) -|- numrange(2.2,3.3) - t - - - - - - anyrange + anyrange - anyrange - - - Computes the union of the ranges. The ranges must overlap or be - adjacent, so that the union is a single range (but - see range_merge()). - - - numrange(5,15) + numrange(10,20) - [5,20) - - - - - - anyrange * anyrange - anyrange - - - Computes the intersection of the ranges. - - - int8range(5,15) * int8range(10,20) - [10,15) - - - - - - anyrange - anyrange - anyrange - - - Computes the difference of the ranges. The second range must not be - contained in the first in such a way that the difference would not be - a single range. - - - int8range(5,15) - int8range(10,20) - [5,10) - - - - -
- - - Multirange Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - anymultirange @> anymultirange - boolean - - - Does the first multirange contain the second? - - - '{[2,4)}'::int4multirange @> '{[2,3)}'::int4multirange - t - - - - - - anymultirange @> anyrange - boolean - - - Does the multirange contain the range? - - - '{[2,4)}'::int4multirange @> int4range(2,3) - t - - - - - - anymultirange @> anyelement - boolean - - - Does the multirange contain the element? - - - '{[2011-01-01,2011-03-01)}'::tsmultirange @> '2011-01-10'::timestamp - t - - - - - - anyrange @> anymultirange - boolean - - - Does the range contain the multirange? - - - '[2,4)'::int4range @> '{[2,3)}'::int4multirange - t - - - - - - anymultirange <@ anymultirange - boolean - - - Is the first multirange contained by the second? - - - '{[2,4)}'::int4multirange <@ '{[1,7)}'::int4multirange - t - - - - - - anymultirange <@ anyrange - boolean - - - Is the multirange contained by the range? - - - '{[2,4)}'::int4multirange <@ int4range(1,7) - t - - - - - - anyrange <@ anymultirange - boolean - - - Is the range contained by the multirange? - - - int4range(2,4) <@ '{[1,7)}'::int4multirange - t - - - - - - anyelement <@ anymultirange - boolean - - - Is the element contained by the multirange? - - - 4 <@ '{[1,7)}'::int4multirange - t - - - - - - anymultirange && anymultirange - boolean - - - Do the multiranges overlap, that is, have any elements in common? - - - '{[3,7)}'::int8multirange && '{[4,12)}'::int8multirange - t - - - - - - anymultirange && anyrange - boolean - - - Does the multirange overlap the range? - - - '{[3,7)}'::int8multirange && int8range(4,12) - t - - - - - - anyrange && anymultirange - boolean - - - Does the range overlap the multirange? - - - int8range(3,7) && '{[4,12)}'::int8multirange - t - - - - - - anymultirange << anymultirange - boolean - - - Is the first multirange strictly left of the second? - - - '{[1,10)}'::int8multirange << '{[100,110)}'::int8multirange - t - - - - - - anymultirange << anyrange - boolean - - - Is the multirange strictly left of the range? - - - '{[1,10)}'::int8multirange << int8range(100,110) - t - - - - - - anyrange << anymultirange - boolean - - - Is the range strictly left of the multirange? - - - int8range(1,10) << '{[100,110)}'::int8multirange - t - - - - - - anymultirange >> anymultirange - boolean - - - Is the first multirange strictly right of the second? - - - '{[50,60)}'::int8multirange >> '{[20,30)}'::int8multirange - t - - - - - - anymultirange >> anyrange - boolean - - - Is the multirange strictly right of the range? - - - '{[50,60)}'::int8multirange >> int8range(20,30) - t - - - - - - anyrange >> anymultirange - boolean - - - Is the range strictly right of the multirange? - - - int8range(50,60) >> '{[20,30)}'::int8multirange - t - - - - - - anymultirange &< anymultirange - boolean - - - Does the first multirange not extend to the right of the second? - - - '{[1,20)}'::int8multirange &< '{[18,20)}'::int8multirange - t - - - - - - anymultirange &< anyrange - boolean - - - Does the multirange not extend to the right of the range? - - - '{[1,20)}'::int8multirange &< int8range(18,20) - t - - - - - - anyrange &< anymultirange - boolean - - - Does the range not extend to the right of the multirange? - - - int8range(1,20) &< '{[18,20)}'::int8multirange - t - - - - - - anymultirange &> anymultirange - boolean - - - Does the first multirange not extend to the left of the second? - - - '{[7,20)}'::int8multirange &> '{[5,10)}'::int8multirange - t - - - - - - anymultirange &> anyrange - boolean - - - Does the multirange not extend to the left of the range? - - - '{[7,20)}'::int8multirange &> int8range(5,10) - t - - - - - - anyrange &> anymultirange - boolean - - - Does the range not extend to the left of the multirange? - - - int8range(7,20) &> '{[5,10)}'::int8multirange - t - - - - - - anymultirange -|- anymultirange - boolean - - - Are the multiranges adjacent? - - - '{[1.1,2.2)}'::nummultirange -|- '{[2.2,3.3)}'::nummultirange - t - - - - - - anymultirange -|- anyrange - boolean - - - Is the multirange adjacent to the range? - - - '{[1.1,2.2)}'::nummultirange -|- numrange(2.2,3.3) - t - - - - - - anyrange -|- anymultirange - boolean - - - Is the range adjacent to the multirange? - - - numrange(1.1,2.2) -|- '{[2.2,3.3)}'::nummultirange - t - - - - - - anymultirange + anymultirange - anymultirange - - - Computes the union of the multiranges. The multiranges need not overlap - or be adjacent. - - - '{[5,10)}'::nummultirange + '{[15,20)}'::nummultirange - {[5,10), [15,20)} - - - - - - anymultirange * anymultirange - anymultirange - - - Computes the intersection of the multiranges. - - - '{[5,15)}'::int8multirange * '{[10,20)}'::int8multirange - {[10,15)} - - - - - - anymultirange - anymultirange - anymultirange - - - Computes the difference of the multiranges. - - - '{[5,20)}'::int8multirange - '{[10,15)}'::int8multirange - {[5,10), [15,20)} - - - - -
- - - The left-of/right-of/adjacent operators always return false when an empty - range or multirange is involved; that is, an empty range is not considered to - be either before or after any other range. - - - - Elsewhere empty ranges and multiranges are treated as the additive identity: - anything unioned with an empty value is itself. Anything minus an empty - value is itself. An empty multirange has exactly the same points as an empty - range. Every range contains the empty range. Every multirange contains as many - empty ranges as you like. - - - - The range union and difference operators will fail if the resulting range would - need to contain two disjoint sub-ranges, as such a range cannot be - represented. There are separate operators for union and difference that take - multirange parameters and return a multirange, and they do not fail even if - their arguments are disjoint. So if you need a union or difference operation - for ranges that may be disjoint, you can avoid errors by first casting your - ranges to multiranges. - - - - shows the functions - available for use with range types. - shows the functions - available for use with multirange types. - - - - Range Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - lower - - lower ( anyrange ) - anyelement - - - Extracts the lower bound of the range (NULL if the - range is empty or has no lower bound). - - - lower(numrange(1.1,2.2)) - 1.1 - - - - - - - upper - - upper ( anyrange ) - anyelement - - - Extracts the upper bound of the range (NULL if the - range is empty or has no upper bound). - - - upper(numrange(1.1,2.2)) - 2.2 - - - - - - - isempty - - isempty ( anyrange ) - boolean - - - Is the range empty? - - - isempty(numrange(1.1,2.2)) - f - - - - - - - lower_inc - - lower_inc ( anyrange ) - boolean - - - Is the range's lower bound inclusive? - - - lower_inc(numrange(1.1,2.2)) - t - - - - - - - upper_inc - - upper_inc ( anyrange ) - boolean - - - Is the range's upper bound inclusive? - - - upper_inc(numrange(1.1,2.2)) - f - - - - - - - lower_inf - - lower_inf ( anyrange ) - boolean - - - Does the range have no lower bound? (A lower bound of - -Infinity returns false.) - - - lower_inf('(,)'::daterange) - t - - - - - - - upper_inf - - upper_inf ( anyrange ) - boolean - - - Does the range have no upper bound? (An upper bound of - Infinity returns false.) - - - upper_inf('(,)'::daterange) - t - - - - - - - range_merge - - range_merge ( anyrange, anyrange ) - anyrange - - - Computes the smallest range that includes both of the given ranges. - - - range_merge('[1,2)'::int4range, '[3,4)'::int4range) - [1,4) - - - - -
- - - Multirange Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - lower - - lower ( anymultirange ) - anyelement - - - Extracts the lower bound of the multirange (NULL if the - multirange is empty or has no lower bound). - - - lower('{[1.1,2.2)}'::nummultirange) - 1.1 - - - - - - - upper - - upper ( anymultirange ) - anyelement - - - Extracts the upper bound of the multirange (NULL if the - multirange is empty or has no upper bound). - - - upper('{[1.1,2.2)}'::nummultirange) - 2.2 - - - - - - - isempty - - isempty ( anymultirange ) - boolean - - - Is the multirange empty? - - - isempty('{[1.1,2.2)}'::nummultirange) - f - - - - - - - lower_inc - - lower_inc ( anymultirange ) - boolean - - - Is the multirange's lower bound inclusive? - - - lower_inc('{[1.1,2.2)}'::nummultirange) - t - - - - - - - upper_inc - - upper_inc ( anymultirange ) - boolean - - - Is the multirange's upper bound inclusive? - - - upper_inc('{[1.1,2.2)}'::nummultirange) - f - - - - - - - lower_inf - - lower_inf ( anymultirange ) - boolean - - - Does the multirange have no lower bound? (A lower bound of - -Infinity returns false.) - - - lower_inf('{(,)}'::datemultirange) - t - - - - - - - upper_inf - - upper_inf ( anymultirange ) - boolean - - - Does the multirange have no upper bound? (An upper bound of - Infinity returns false.) - - - upper_inf('{(,)}'::datemultirange) - t - - - - - - - range_merge - - range_merge ( anymultirange ) - anyrange - - - Computes the smallest range that includes the entire multirange. - - - range_merge('{[1,2), [3,4)}'::int4multirange) - [1,4) - - - - - - - multirange (function) - - multirange ( anyrange ) - anymultirange - - - Returns a multirange containing just the given range. - - - multirange('[1,2)'::int4range) - {[1,2)} - - - - - - - unnest - for multirange - - unnest ( anymultirange ) - setof anyrange - - - Expands a multirange into a set of ranges in ascending order. - - - unnest('{[1,2), [3,4)}'::int4multirange) - - - [1,2) - [3,4) - - - - - -
- - - The lower_inc, upper_inc, - lower_inf, and upper_inf - functions all return false for an empty range or multirange. - -
- - - Aggregate Functions - - - aggregate function - built-in - - - - Aggregate functions compute a single result - from a set of input values. The built-in general-purpose aggregate - functions are listed in - while statistical aggregates are in . - The built-in within-group ordered-set aggregate functions - are listed in - while the built-in within-group hypothetical-set ones are in . Grouping operations, - which are closely related to aggregate functions, are listed in - . - The special syntax considerations for aggregate - functions are explained in . - Consult for additional introductory - information. - - - - Aggregate functions that support Partial Mode - are eligible to participate in various optimizations, such as parallel - aggregation. - - - - While all aggregates below accept an optional - ORDER BY clause (as outlined in ), the clause has only been added to - aggregates whose output is affected by ordering. - - - - General-Purpose Aggregate Functions - - - - - - - Function - - - Description - - Partial Mode - - - - - - - - any_value - - any_value ( anyelement ) - same as input type - - - Returns an arbitrary value from the non-null input values. - - Yes - - - - - - array_agg - - array_agg ( anynonarray ORDER BY input_sort_columns ) - anyarray - - - Collects all the input values, including nulls, into an array. - - Yes - - - - - array_agg ( anyarray ORDER BY input_sort_columns ) - anyarray - - - Concatenates all the input arrays into an array of one higher - dimension. (The inputs must all have the same dimensionality, and - cannot be empty or null.) - - Yes - - - - - - average - - - avg - - avg ( smallint ) - numeric - - - avg ( integer ) - numeric - - - avg ( bigint ) - numeric - - - avg ( numeric ) - numeric - - - avg ( real ) - double precision - - - avg ( double precision ) - double precision - - - avg ( interval ) - interval - - - Computes the average (arithmetic mean) of all the non-null input - values. - - Yes - - - - - - bit_and - - bit_and ( smallint ) - smallint - - - bit_and ( integer ) - integer - - - bit_and ( bigint ) - bigint - - - bit_and ( bit ) - bit - - - Computes the bitwise AND of all non-null input values. - - Yes - - - - - - bit_or - - bit_or ( smallint ) - smallint - - - bit_or ( integer ) - integer - - - bit_or ( bigint ) - bigint - - - bit_or ( bit ) - bit - - - Computes the bitwise OR of all non-null input values. - - Yes - - - - - - bit_xor - - bit_xor ( smallint ) - smallint - - - bit_xor ( integer ) - integer - - - bit_xor ( bigint ) - bigint - - - bit_xor ( bit ) - bit - - - Computes the bitwise exclusive OR of all non-null input values. - Can be useful as a checksum for an unordered set of values. - - Yes - - - - - - bool_and - - bool_and ( boolean ) - boolean - - - Returns true if all non-null input values are true, otherwise false. - - Yes - - - - - - bool_or - - bool_or ( boolean ) - boolean - - - Returns true if any non-null input value is true, otherwise false. - - Yes - - - - - - count - - count ( * ) - bigint - - - Computes the number of input rows. - - Yes - - - - - count ( "any" ) - bigint - - - Computes the number of input rows in which the input value is not - null. - - Yes - - - - - - every - - every ( boolean ) - boolean - - - This is the SQL standard's equivalent to bool_and. - - Yes - - - - - - json_agg - - json_agg ( anyelement ORDER BY input_sort_columns ) - json - - - - jsonb_agg - - jsonb_agg ( anyelement ORDER BY input_sort_columns ) - jsonb - - - Collects all the input values, including nulls, into a JSON array. - Values are converted to JSON as per to_json - or to_jsonb. - - No - - - - - - json_agg_strict - - json_agg_strict ( anyelement ) - json - - - - jsonb_agg_strict - - jsonb_agg_strict ( anyelement ) - jsonb - - - Collects all the input values, skipping nulls, into a JSON array. - Values are converted to JSON as per to_json - or to_jsonb. - - No - - - - - json_arrayagg - json_arrayagg ( - value_expression - ORDER BY sort_expression - { NULL | ABSENT } ON NULL - RETURNING data_type FORMAT JSON ENCODING UTF8 ) - - - Behaves in the same way as json_array - but as an aggregate function so it only takes one - value_expression parameter. - If ABSENT ON NULL is specified, any NULL - values are omitted. - If ORDER BY is specified, the elements will - appear in the array in that order rather than in the input order. - - - SELECT json_arrayagg(v) FROM (VALUES(2),(1)) t(v) - [2, 1] - - No - - - - - json_objectagg - json_objectagg ( - { key_expression { VALUE | ':' } value_expression } - { NULL | ABSENT } ON NULL - { WITH | WITHOUT } UNIQUE KEYS - RETURNING data_type FORMAT JSON ENCODING UTF8 ) - - - Behaves like json_object, but as an - aggregate function, so it only takes one - key_expression and one - value_expression parameter. - - - SELECT json_objectagg(k:v) FROM (VALUES ('a'::text,current_date),('b',current_date + 1)) AS t(k,v) - { "a" : "2022-05-10", "b" : "2022-05-11" } - - No - - - - - - json_object_agg - - json_object_agg ( key - "any", value - "any" - ORDER BY input_sort_columns ) - json - - - - jsonb_object_agg - - jsonb_object_agg ( key - "any", value - "any" - ORDER BY input_sort_columns ) - jsonb - - - Collects all the key/value pairs into a JSON object. Key arguments - are coerced to text; value arguments are converted as per - to_json or to_jsonb. - Values can be null, but keys cannot. - - No - - - - - - json_object_agg_strict - - json_object_agg_strict ( - key "any", - value "any" ) - json - - - - jsonb_object_agg_strict - - jsonb_object_agg_strict ( - key "any", - value "any" ) - jsonb - - - Collects all the key/value pairs into a JSON object. Key arguments - are coerced to text; value arguments are converted as per - to_json or to_jsonb. - The key can not be null. If the - value is null then the entry is skipped, - - No - - - - - - json_object_agg_unique - - json_object_agg_unique ( - key "any", - value "any" ) - json - - - - jsonb_object_agg_unique - - jsonb_object_agg_unique ( - key "any", - value "any" ) - jsonb - - - Collects all the key/value pairs into a JSON object. Key arguments - are coerced to text; value arguments are converted as per - to_json or to_jsonb. - Values can be null, but keys cannot. - If there is a duplicate key an error is thrown. - - No - - - - - - json_object_agg_unique_strict - - json_object_agg_unique_strict ( - key "any", - value "any" ) - json - - - - jsonb_object_agg_unique_strict - - jsonb_object_agg_unique_strict ( - key "any", - value "any" ) - jsonb - - - Collects all the key/value pairs into a JSON object. Key arguments - are coerced to text; value arguments are converted as per - to_json or to_jsonb. - The key can not be null. If the - value is null then the entry is skipped. - If there is a duplicate key an error is thrown. - - No - - - - - - max - - max ( see text ) - same as input type - - - Computes the maximum of the non-null input - values. Available for any numeric, string, date/time, or enum type, - as well as bytea, inet, interval, - money, oid, pg_lsn, - tid, xid8, - and also arrays and composite types containing sortable data types. - - Yes - - - - - - min - - min ( see text ) - same as input type - - - Computes the minimum of the non-null input - values. Available for any numeric, string, date/time, or enum type, - as well as bytea, inet, interval, - money, oid, pg_lsn, - tid, xid8, - and also arrays and composite types containing sortable data types. - - Yes - - - - - - range_agg - - range_agg ( value - anyrange ) - anymultirange - - - range_agg ( value - anymultirange ) - anymultirange - - - Computes the union of the non-null input values. - - No - - - - - - range_intersect_agg - - range_intersect_agg ( value - anyrange ) - anyrange - - - range_intersect_agg ( value - anymultirange ) - anymultirange - - - Computes the intersection of the non-null input values. - - No - - - - - - string_agg - - string_agg ( value - text, delimiter text ) - text - - - string_agg ( value - bytea, delimiter bytea - ORDER BY input_sort_columns ) - bytea - - - Concatenates the non-null input values into a string. Each value - after the first is preceded by the - corresponding delimiter (if it's not null). - - Yes - - - - - - sum - - sum ( smallint ) - bigint - - - sum ( integer ) - bigint - - - sum ( bigint ) - numeric - - - sum ( numeric ) - numeric - - - sum ( real ) - real - - - sum ( double precision ) - double precision - - - sum ( interval ) - interval - - - sum ( money ) - money - - - Computes the sum of the non-null input values. - - Yes - - - - - - xmlagg - - xmlagg ( xml ORDER BY input_sort_columns ) - xml - - - Concatenates the non-null XML input values (see - ). - - No - - - -
- - - It should be noted that except for count, - these functions return a null value when no rows are selected. In - particular, sum of no rows returns null, not - zero as one might expect, and array_agg - returns null rather than an empty array when there are no input - rows. The coalesce function can be used to - substitute zero or an empty array for null when necessary. - - - - The aggregate functions array_agg, - json_agg, jsonb_agg, - json_agg_strict, jsonb_agg_strict, - json_object_agg, jsonb_object_agg, - json_object_agg_strict, jsonb_object_agg_strict, - json_object_agg_unique, jsonb_object_agg_unique, - json_object_agg_unique_strict, - jsonb_object_agg_unique_strict, - string_agg, - and xmlagg, as well as similar user-defined - aggregate functions, produce meaningfully different result values - depending on the order of the input values. This ordering is - unspecified by default, but can be controlled by writing an - ORDER BY clause within the aggregate call, as shown in - . - Alternatively, supplying the input values from a sorted subquery - will usually work. For example: - - - - Beware that this approach can fail if the outer query level contains - additional processing, such as a join, because that might cause the - subquery's output to be reordered before the aggregate is computed. - - - - - ANY - - - SOME - - - The boolean aggregates bool_and and - bool_or correspond to the standard SQL aggregates - every and any or - some. - PostgreSQL - supports every, but not any - or some, because there is an ambiguity built into - the standard syntax: - -SELECT b1 = ANY((SELECT b2 FROM t2 ...)) FROM t1 ...; - - Here ANY can be considered either as introducing - a subquery, or as being an aggregate function, if the subquery - returns one row with a Boolean value. - Thus the standard name cannot be given to these aggregates. - - - - - - Users accustomed to working with other SQL database management - systems might be disappointed by the performance of the - count aggregate when it is applied to the - entire table. A query like: - -SELECT count(*) FROM sometable; - - will require effort proportional to the size of the table: - PostgreSQL will need to scan either the - entire table or the entirety of an index that includes all rows in - the table. - - - - - shows - aggregate functions typically used in statistical analysis. - (These are separated out merely to avoid cluttering the listing - of more-commonly-used aggregates.) Functions shown as - accepting numeric_type are available for all - the types smallint, integer, - bigint, numeric, real, - and double precision. - Where the description mentions - N, it means the - number of input rows for which all the input expressions are non-null. - In all cases, null is returned if the computation is meaningless, - for example when N is zero. - - - - statistics - - - linear regression - - - - Aggregate Functions for Statistics - - - - - - - Function - - - Description - - Partial Mode - - - - - - - - correlation - - - corr - - corr ( Y double precision, X double precision ) - double precision - - - Computes the correlation coefficient. - - Yes - - - - - - covariance - population - - - covar_pop - - covar_pop ( Y double precision, X double precision ) - double precision - - - Computes the population covariance. - - Yes - - - - - - covariance - sample - - - covar_samp - - covar_samp ( Y double precision, X double precision ) - double precision - - - Computes the sample covariance. - - Yes - - - - - - regr_avgx - - regr_avgx ( Y double precision, X double precision ) - double precision - - - Computes the average of the independent variable, - sum(X)/N. - - Yes - - - - - - regr_avgy - - regr_avgy ( Y double precision, X double precision ) - double precision - - - Computes the average of the dependent variable, - sum(Y)/N. - - Yes - - - - - - regr_count - - regr_count ( Y double precision, X double precision ) - bigint - - - Computes the number of rows in which both inputs are non-null. - - Yes - - - - - - regression intercept - - - regr_intercept - - regr_intercept ( Y double precision, X double precision ) - double precision - - - Computes the y-intercept of the least-squares-fit linear equation - determined by the - (X, Y) pairs. - - Yes - - - - - - regr_r2 - - regr_r2 ( Y double precision, X double precision ) - double precision - - - Computes the square of the correlation coefficient. - - Yes - - - - - - regression slope - - - regr_slope - - regr_slope ( Y double precision, X double precision ) - double precision - - - Computes the slope of the least-squares-fit linear equation determined - by the (X, Y) - pairs. - - Yes - - - - - - regr_sxx - - regr_sxx ( Y double precision, X double precision ) - double precision - - - Computes the sum of squares of the independent - variable, - sum(X^2) - sum(X)^2/N. - - Yes - - - - - - regr_sxy - - regr_sxy ( Y double precision, X double precision ) - double precision - - - Computes the sum of products of independent times - dependent variables, - sum(X*Y) - sum(X) * sum(Y)/N. - - Yes - - - - - - regr_syy - - regr_syy ( Y double precision, X double precision ) - double precision - - - Computes the sum of squares of the dependent - variable, - sum(Y^2) - sum(Y)^2/N. - - Yes - - - - - - standard deviation - - - stddev - - stddev ( numeric_type ) - double precision - for real or double precision, - otherwise numeric - - - This is a historical alias for stddev_samp. - - Yes - - - - - - standard deviation - population - - - stddev_pop - - stddev_pop ( numeric_type ) - double precision - for real or double precision, - otherwise numeric - - - Computes the population standard deviation of the input values. - - Yes - - - - - - standard deviation - sample - - - stddev_samp - - stddev_samp ( numeric_type ) - double precision - for real or double precision, - otherwise numeric - - - Computes the sample standard deviation of the input values. - - Yes - - - - - - variance - - variance ( numeric_type ) - double precision - for real or double precision, - otherwise numeric - - - This is a historical alias for var_samp. - - Yes - - - - - - variance - population - - - var_pop - - var_pop ( numeric_type ) - double precision - for real or double precision, - otherwise numeric - - - Computes the population variance of the input values (square of the - population standard deviation). - - Yes - - - - - - variance - sample - - - var_samp - - var_samp ( numeric_type ) - double precision - for real or double precision, - otherwise numeric - - - Computes the sample variance of the input values (square of the sample - standard deviation). - - Yes - - - -
- - - shows some - aggregate functions that use the ordered-set aggregate - syntax. These functions are sometimes referred to as inverse - distribution functions. Their aggregated input is introduced by - ORDER BY, and they may also take a direct - argument that is not aggregated, but is computed only once. - All these functions ignore null values in their aggregated input. - For those that take a fraction parameter, the - fraction value must be between 0 and 1; an error is thrown if not. - However, a null fraction value simply produces a - null result. - - - - ordered-set aggregate - built-in - - - inverse distribution - - - - Ordered-Set Aggregate Functions - - - - - - - Function - - - Description - - Partial Mode - - - - - - - - mode - statistical - - mode () WITHIN GROUP ( ORDER BY anyelement ) - anyelement - - - Computes the mode, the most frequent - value of the aggregated argument (arbitrarily choosing the first one - if there are multiple equally-frequent values). The aggregated - argument must be of a sortable type. - - No - - - - - - percentile - continuous - - percentile_cont ( fraction double precision ) WITHIN GROUP ( ORDER BY double precision ) - double precision - - - percentile_cont ( fraction double precision ) WITHIN GROUP ( ORDER BY interval ) - interval - - - Computes the continuous percentile, a value - corresponding to the specified fraction - within the ordered set of aggregated argument values. This will - interpolate between adjacent input items if needed. - - No - - - - - percentile_cont ( fractions double precision[] ) WITHIN GROUP ( ORDER BY double precision ) - double precision[] - - - percentile_cont ( fractions double precision[] ) WITHIN GROUP ( ORDER BY interval ) - interval[] - - - Computes multiple continuous percentiles. The result is an array of - the same dimensions as the fractions - parameter, with each non-null element replaced by the (possibly - interpolated) value corresponding to that percentile. - - No - - - - - - percentile - discrete - - percentile_disc ( fraction double precision ) WITHIN GROUP ( ORDER BY anyelement ) - anyelement - - - Computes the discrete percentile, the first - value within the ordered set of aggregated argument values whose - position in the ordering equals or exceeds the - specified fraction. The aggregated - argument must be of a sortable type. - - No - - - - - percentile_disc ( fractions double precision[] ) WITHIN GROUP ( ORDER BY anyelement ) - anyarray - - - Computes multiple discrete percentiles. The result is an array of the - same dimensions as the fractions parameter, - with each non-null element replaced by the input value corresponding - to that percentile. - The aggregated argument must be of a sortable type. - - No - - - -
- - - hypothetical-set aggregate - built-in - - - - Each of the hypothetical-set aggregates listed in - is associated with a - window function of the same name defined in - . In each case, the aggregate's result - is the value that the associated window function would have - returned for the hypothetical row constructed from - args, if such a row had been added to the sorted - group of rows represented by the sorted_args. - For each of these functions, the list of direct arguments - given in args must match the number and types of - the aggregated arguments given in sorted_args. - Unlike most built-in aggregates, these aggregates are not strict, that is - they do not drop input rows containing nulls. Null values sort according - to the rule specified in the ORDER BY clause. - - - - Hypothetical-Set Aggregate Functions - - - - - - - Function - - - Description - - Partial Mode - - - - - - - - rank - hypothetical - - rank ( args ) WITHIN GROUP ( ORDER BY sorted_args ) - bigint - - - Computes the rank of the hypothetical row, with gaps; that is, the row - number of the first row in its peer group. - - No - - - - - - dense_rank - hypothetical - - dense_rank ( args ) WITHIN GROUP ( ORDER BY sorted_args ) - bigint - - - Computes the rank of the hypothetical row, without gaps; this function - effectively counts peer groups. - - No - - - - - - percent_rank - hypothetical - - percent_rank ( args ) WITHIN GROUP ( ORDER BY sorted_args ) - double precision - - - Computes the relative rank of the hypothetical row, that is - (rank - 1) / (total rows - 1). - The value thus ranges from 0 to 1 inclusive. - - No - - - - - - cume_dist - hypothetical - - cume_dist ( args ) WITHIN GROUP ( ORDER BY sorted_args ) - double precision - - - Computes the cumulative distribution, that is (number of rows - preceding or peers with hypothetical row) / (total rows). The value - thus ranges from 1/N to 1. - - No - - - -
- - - Grouping Operations - - - - - Function - - - Description - - - - - - - - - GROUPING - - GROUPING ( group_by_expression(s) ) - integer - - - Returns a bit mask indicating which GROUP BY - expressions are not included in the current grouping set. - Bits are assigned with the rightmost argument corresponding to the - least-significant bit; each bit is 0 if the corresponding expression - is included in the grouping criteria of the grouping set generating - the current result row, and 1 if it is not included. - - - - -
- - - The grouping operations shown in - are used in conjunction with - grouping sets (see ) to distinguish - result rows. The arguments to the GROUPING function - are not actually evaluated, but they must exactly match expressions given - in the GROUP BY clause of the associated query level. - For example: - -=> SELECT * FROM items_sold; - make | model | sales --------+-------+------- - Foo | GT | 10 - Foo | Tour | 20 - Bar | City | 15 - Bar | Sport | 5 -(4 rows) - -=> SELECT make, model, GROUPING(make,model), sum(sales) FROM items_sold GROUP BY ROLLUP(make,model); - make | model | grouping | sum --------+-------+----------+----- - Foo | GT | 0 | 10 - Foo | Tour | 0 | 20 - Bar | City | 0 | 15 - Bar | Sport | 0 | 5 - Foo | | 1 | 30 - Bar | | 1 | 20 - | | 3 | 50 -(7 rows) - - Here, the grouping value 0 in the - first four rows shows that those have been grouped normally, over both the - grouping columns. The value 1 indicates - that model was not grouped by in the next-to-last two - rows, and the value 3 indicates that - neither make nor model was grouped - by in the last row (which therefore is an aggregate over all the input - rows). - - -
- - - Window Functions - - - window function - built-in - - - - Window functions provide the ability to perform - calculations across sets of rows that are related to the current query - row. See for an introduction to this - feature, and for syntax - details. - - - - The built-in window functions are listed in - . Note that these functions - must be invoked using window function syntax, i.e., an - OVER clause is required. - - - - In addition to these functions, any built-in or user-defined - ordinary aggregate (i.e., not ordered-set or hypothetical-set aggregates) - can be used as a window function; see - for a list of the built-in aggregates. - Aggregate functions act as window functions only when an OVER - clause follows the call; otherwise they act as plain aggregates - and return a single row for the entire set. - - - - General-Purpose Window Functions - - - - - Function - - - Description - - - - - - - - - row_number - - row_number () - bigint - - - Returns the number of the current row within its partition, counting - from 1. - - - - - - - rank - - rank () - bigint - - - Returns the rank of the current row, with gaps; that is, - the row_number of the first row in its peer - group. - - - - - - - dense_rank - - dense_rank () - bigint - - - Returns the rank of the current row, without gaps; this function - effectively counts peer groups. - - - - - - - percent_rank - - percent_rank () - double precision - - - Returns the relative rank of the current row, that is - (rank - 1) / (total partition rows - 1). - The value thus ranges from 0 to 1 inclusive. - - - - - - - cume_dist - - cume_dist () - double precision - - - Returns the cumulative distribution, that is (number of partition rows - preceding or peers with current row) / (total partition rows). - The value thus ranges from 1/N to 1. - - - - - - - ntile - - ntile ( num_buckets integer ) - integer - - - Returns an integer ranging from 1 to the argument value, dividing the - partition as equally as possible. - - - - - - - lag - - lag ( value anycompatible - , offset integer - , default anycompatible ) - anycompatible - - - Returns value evaluated at - the row that is offset - rows before the current row within the partition; if there is no such - row, instead returns default - (which must be of a type compatible with - value). - Both offset and - default are evaluated - with respect to the current row. If omitted, - offset defaults to 1 and - default to NULL. - - - - - - - lead - - lead ( value anycompatible - , offset integer - , default anycompatible ) - anycompatible - - - Returns value evaluated at - the row that is offset - rows after the current row within the partition; if there is no such - row, instead returns default - (which must be of a type compatible with - value). - Both offset and - default are evaluated - with respect to the current row. If omitted, - offset defaults to 1 and - default to NULL. - - - - - - - first_value - - first_value ( value anyelement ) - anyelement - - - Returns value evaluated - at the row that is the first row of the window frame. - - - - - - - last_value - - last_value ( value anyelement ) - anyelement - - - Returns value evaluated - at the row that is the last row of the window frame. - - - - - - - nth_value - - nth_value ( value anyelement, n integer ) - anyelement - - - Returns value evaluated - at the row that is the n'th - row of the window frame (counting from 1); - returns NULL if there is no such row. - - - - -
- - - All of the functions listed in - depend on the sort ordering - specified by the ORDER BY clause of the associated window - definition. Rows that are not distinct when considering only the - ORDER BY columns are said to be peers. - The four ranking functions (including cume_dist) are - defined so that they give the same answer for all rows of a peer group. - - - - Note that first_value, last_value, and - nth_value consider only the rows within the window - frame, which by default contains the rows from the start of the - partition through the last peer of the current row. This is - likely to give unhelpful results for last_value and - sometimes also nth_value. You can redefine the frame by - adding a suitable frame specification (RANGE, - ROWS or GROUPS) to - the OVER clause. - See for more information - about frame specifications. - - - - When an aggregate function is used as a window function, it aggregates - over the rows within the current row's window frame. - An aggregate used with ORDER BY and the default window frame - definition produces a running sum type of behavior, which may or - may not be what's wanted. To obtain - aggregation over the whole partition, omit ORDER BY or use - ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING. - Other frame specifications can be used to obtain other effects. - - - - - The SQL standard defines a RESPECT NULLS or - IGNORE NULLS option for lead, lag, - first_value, last_value, and - nth_value. This is not implemented in - PostgreSQL: the behavior is always the - same as the standard's default, namely RESPECT NULLS. - Likewise, the standard's FROM FIRST or FROM LAST - option for nth_value is not implemented: only the - default FROM FIRST behavior is supported. (You can achieve - the result of FROM LAST by reversing the ORDER BY - ordering.) - - - -
- - - Merge Support Functions - - - MERGE - RETURNING - - - - PostgreSQL includes one merge support function - that may be used in the RETURNING list of a - command to identify the action taken for each - row; see . - - - - Merge Support Functions - - - - - - Function - - - Description - - - - - - - - - merge_action - - merge_action ( ) - text - - - Returns the merge action command executed for the current row. This - will be 'INSERT', 'UPDATE', or - 'DELETE'. - - - - -
- - - Example: - 0 THEN - UPDATE SET in_stock = true, quantity = s.quantity - WHEN MATCHED THEN - UPDATE SET in_stock = false, quantity = 0 - WHEN NOT MATCHED THEN - INSERT (product_id, in_stock, quantity) - VALUES (s.product_id, true, s.quantity) - RETURNING merge_action(), p.*; - - merge_action | product_id | in_stock | quantity ---------------+------------+----------+---------- - UPDATE | 1001 | t | 50 - UPDATE | 1002 | f | 0 - INSERT | 1003 | t | 10 -]]> - - - - Note that this function can only be used in the RETURNING - list of a MERGE command. It is an error to use it in any - other part of a query. - - -
- - - Subquery Expressions - - - EXISTS - - - - IN - - - - NOT IN - - - - ANY - - - - ALL - - - - SOME - - - - subquery - - - - This section describes the SQL-compliant subquery - expressions available in PostgreSQL. - All of the expression forms documented in this section return - Boolean (true/false) results. - - - - <literal>EXISTS</literal> - - -EXISTS (subquery) - - - - The argument of EXISTS is an arbitrary SELECT statement, - or subquery. The - subquery is evaluated to determine whether it returns any rows. - If it returns at least one row, the result of EXISTS is - true; if the subquery returns no rows, the result of EXISTS - is false. - - - - The subquery can refer to variables from the surrounding query, - which will act as constants during any one evaluation of the subquery. - - - - The subquery will generally only be executed long enough to determine - whether at least one row is returned, not all the way to completion. - It is unwise to write a subquery that has side effects (such as - calling sequence functions); whether the side effects occur - might be unpredictable. - - - - Since the result depends only on whether any rows are returned, - and not on the contents of those rows, the output list of the - subquery is normally unimportant. A common coding convention is - to write all EXISTS tests in the form - EXISTS(SELECT 1 WHERE ...). There are exceptions to - this rule however, such as subqueries that use INTERSECT. - - - - This simple example is like an inner join on col2, but - it produces at most one output row for each tab1 row, - even if there are several matching tab2 rows: - -SELECT col1 -FROM tab1 -WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); - - - - - - <literal>IN</literal> - - -expression IN (subquery) - - - - The right-hand side is a parenthesized - subquery, which must return exactly one column. The left-hand expression - is evaluated and compared to each row of the subquery result. - The result of IN is true if any equal subquery row is found. - The result is false if no equal row is found (including the - case where the subquery returns no rows). - - - - Note that if the left-hand expression yields null, or if there are - no equal right-hand values and at least one right-hand row yields - null, the result of the IN construct will be null, not false. - This is in accordance with SQL's normal rules for Boolean combinations - of null values. - - - - As with EXISTS, it's unwise to assume that the subquery will - be evaluated completely. - - - -row_constructor IN (subquery) - - - - The left-hand side of this form of IN is a row constructor, - as described in . - The right-hand side is a parenthesized - subquery, which must return exactly as many columns as there are - expressions in the left-hand row. The left-hand expressions are - evaluated and compared row-wise to each row of the subquery result. - The result of IN is true if any equal subquery row is found. - The result is false if no equal row is found (including the - case where the subquery returns no rows). - - - - As usual, null values in the rows are combined per - the normal rules of SQL Boolean expressions. Two rows are considered - equal if all their corresponding members are non-null and equal; the rows - are unequal if any corresponding members are non-null and unequal; - otherwise the result of that row comparison is unknown (null). - If all the per-row results are either unequal or null, with at least one - null, then the result of IN is null. - - - - - <literal>NOT IN</literal> - - -expression NOT IN (subquery) - - - - The right-hand side is a parenthesized - subquery, which must return exactly one column. The left-hand expression - is evaluated and compared to each row of the subquery result. - The result of NOT IN is true if only unequal subquery rows - are found (including the case where the subquery returns no rows). - The result is false if any equal row is found. - - - - Note that if the left-hand expression yields null, or if there are - no equal right-hand values and at least one right-hand row yields - null, the result of the NOT IN construct will be null, not true. - This is in accordance with SQL's normal rules for Boolean combinations - of null values. - - - - As with EXISTS, it's unwise to assume that the subquery will - be evaluated completely. - - - -row_constructor NOT IN (subquery) - - - - The left-hand side of this form of NOT IN is a row constructor, - as described in . - The right-hand side is a parenthesized - subquery, which must return exactly as many columns as there are - expressions in the left-hand row. The left-hand expressions are - evaluated and compared row-wise to each row of the subquery result. - The result of NOT IN is true if only unequal subquery rows - are found (including the case where the subquery returns no rows). - The result is false if any equal row is found. - - - - As usual, null values in the rows are combined per - the normal rules of SQL Boolean expressions. Two rows are considered - equal if all their corresponding members are non-null and equal; the rows - are unequal if any corresponding members are non-null and unequal; - otherwise the result of that row comparison is unknown (null). - If all the per-row results are either unequal or null, with at least one - null, then the result of NOT IN is null. - - - - - <literal>ANY</literal>/<literal>SOME</literal> - - -expression operator ANY (subquery) -expression operator SOME (subquery) - - - - The right-hand side is a parenthesized - subquery, which must return exactly one column. The left-hand expression - is evaluated and compared to each row of the subquery result using the - given operator, which must yield a Boolean - result. - The result of ANY is true if any true result is obtained. - The result is false if no true result is found (including the - case where the subquery returns no rows). - - - - SOME is a synonym for ANY. - IN is equivalent to = ANY. - - - - Note that if there are no successes and at least one right-hand row yields - null for the operator's result, the result of the ANY construct - will be null, not false. - This is in accordance with SQL's normal rules for Boolean combinations - of null values. - - - - As with EXISTS, it's unwise to assume that the subquery will - be evaluated completely. - - - -row_constructor operator ANY (subquery) -row_constructor operator SOME (subquery) - - - - The left-hand side of this form of ANY is a row constructor, - as described in . - The right-hand side is a parenthesized - subquery, which must return exactly as many columns as there are - expressions in the left-hand row. The left-hand expressions are - evaluated and compared row-wise to each row of the subquery result, - using the given operator. - The result of ANY is true if the comparison - returns true for any subquery row. - The result is false if the comparison returns false for every - subquery row (including the case where the subquery returns no - rows). - The result is NULL if no comparison with a subquery row returns true, - and at least one comparison returns NULL. - - - - See for details about the meaning - of a row constructor comparison. - - - - - <literal>ALL</literal> - - -expression operator ALL (subquery) - - - - The right-hand side is a parenthesized - subquery, which must return exactly one column. The left-hand expression - is evaluated and compared to each row of the subquery result using the - given operator, which must yield a Boolean - result. - The result of ALL is true if all rows yield true - (including the case where the subquery returns no rows). - The result is false if any false result is found. - The result is NULL if no comparison with a subquery row returns false, - and at least one comparison returns NULL. - - - - NOT IN is equivalent to <> ALL. - - - - As with EXISTS, it's unwise to assume that the subquery will - be evaluated completely. - - - -row_constructor operator ALL (subquery) - - - - The left-hand side of this form of ALL is a row constructor, - as described in . - The right-hand side is a parenthesized - subquery, which must return exactly as many columns as there are - expressions in the left-hand row. The left-hand expressions are - evaluated and compared row-wise to each row of the subquery result, - using the given operator. - The result of ALL is true if the comparison - returns true for all subquery rows (including the - case where the subquery returns no rows). - The result is false if the comparison returns false for any - subquery row. - The result is NULL if no comparison with a subquery row returns false, - and at least one comparison returns NULL. - - - - See for details about the meaning - of a row constructor comparison. - - - - - Single-Row Comparison - - - comparison - subquery result row - - - -row_constructor operator (subquery) - - - - The left-hand side is a row constructor, - as described in . - The right-hand side is a parenthesized subquery, which must return exactly - as many columns as there are expressions in the left-hand row. Furthermore, - the subquery cannot return more than one row. (If it returns zero rows, - the result is taken to be null.) The left-hand side is evaluated and - compared row-wise to the single subquery result row. - - - - See for details about the meaning - of a row constructor comparison. - - - - - - - Row and Array Comparisons - - - IN - - - - NOT IN - - - - ANY - - - - ALL - - - - SOME - - - - composite type - comparison - - - - row-wise comparison - - - - comparison - composite type - - - - comparison - row constructor - - - - IS DISTINCT FROM - - - - IS NOT DISTINCT FROM - - - - This section describes several specialized constructs for making - multiple comparisons between groups of values. These forms are - syntactically related to the subquery forms of the previous section, - but do not involve subqueries. - The forms involving array subexpressions are - PostgreSQL extensions; the rest are - SQL-compliant. - All of the expression forms documented in this section return - Boolean (true/false) results. - - - - <literal>IN</literal> - - -expression IN (value , ...) - - - - The right-hand side is a parenthesized list - of expressions. The result is true if the left-hand expression's - result is equal to any of the right-hand expressions. This is a shorthand - notation for - - -expression = value1 -OR -expression = value2 -OR -... - - - - - Note that if the left-hand expression yields null, or if there are - no equal right-hand values and at least one right-hand expression yields - null, the result of the IN construct will be null, not false. - This is in accordance with SQL's normal rules for Boolean combinations - of null values. - - - - - <literal>NOT IN</literal> - - -expression NOT IN (value , ...) - - - - The right-hand side is a parenthesized list - of expressions. The result is true if the left-hand expression's - result is unequal to all of the right-hand expressions. This is a shorthand - notation for - - -expression <> value1 -AND -expression <> value2 -AND -... - - - - - Note that if the left-hand expression yields null, or if there are - no equal right-hand values and at least one right-hand expression yields - null, the result of the NOT IN construct will be null, not true - as one might naively expect. - This is in accordance with SQL's normal rules for Boolean combinations - of null values. - - - - - x NOT IN y is equivalent to NOT (x IN y) in all - cases. However, null values are much more likely to trip up the novice when - working with NOT IN than when working with IN. - It is best to express your condition positively if possible. - - - - - - <literal>ANY</literal>/<literal>SOME</literal> (array) - - -expression operator ANY (array expression) -expression operator SOME (array expression) - - - - The right-hand side is a parenthesized expression, which must yield an - array value. - The left-hand expression - is evaluated and compared to each element of the array using the - given operator, which must yield a Boolean - result. - The result of ANY is true if any true result is obtained. - The result is false if no true result is found (including the - case where the array has zero elements). - - - - If the array expression yields a null array, the result of - ANY will be null. If the left-hand expression yields null, - the result of ANY is ordinarily null (though a non-strict - comparison operator could possibly yield a different result). - Also, if the right-hand array contains any null elements and no true - comparison result is obtained, the result of ANY - will be null, not false (again, assuming a strict comparison operator). - This is in accordance with SQL's normal rules for Boolean combinations - of null values. - - - - SOME is a synonym for ANY. - - - - - <literal>ALL</literal> (array) - - -expression operator ALL (array expression) - - - - The right-hand side is a parenthesized expression, which must yield an - array value. - The left-hand expression - is evaluated and compared to each element of the array using the - given operator, which must yield a Boolean - result. - The result of ALL is true if all comparisons yield true - (including the case where the array has zero elements). - The result is false if any false result is found. - - - - If the array expression yields a null array, the result of - ALL will be null. If the left-hand expression yields null, - the result of ALL is ordinarily null (though a non-strict - comparison operator could possibly yield a different result). - Also, if the right-hand array contains any null elements and no false - comparison result is obtained, the result of ALL - will be null, not true (again, assuming a strict comparison operator). - This is in accordance with SQL's normal rules for Boolean combinations - of null values. - - - - - Row Constructor Comparison - - -row_constructor operator row_constructor - - - - Each side is a row constructor, - as described in . - The two row constructors must have the same number of fields. - The given operator is applied to each pair - of corresponding fields. (Since the fields could be of different - types, this means that a different specific operator could be selected - for each pair.) - All the selected operators must be members of some B-tree operator - class, or be the negator of an = member of a B-tree - operator class, meaning that row constructor comparison is only - possible when the operator is - =, - <>, - <, - <=, - >, or - >=, - or has semantics similar to one of these. - - - - The = and <> cases work slightly differently - from the others. Two rows are considered - equal if all their corresponding members are non-null and equal; the rows - are unequal if any corresponding members are non-null and unequal; - otherwise the result of the row comparison is unknown (null). - - - - For the <, <=, > and - >= cases, the row elements are compared left-to-right, - stopping as soon as an unequal or null pair of elements is found. - If either of this pair of elements is null, the result of the - row comparison is unknown (null); otherwise comparison of this pair - of elements determines the result. For example, - ROW(1,2,NULL) < ROW(1,3,0) - yields true, not null, because the third pair of elements are not - considered. - - - -row_constructor IS DISTINCT FROM row_constructor - - - - This construct is similar to a <> row comparison, - but it does not yield null for null inputs. Instead, any null value is - considered unequal to (distinct from) any non-null value, and any two - nulls are considered equal (not distinct). Thus the result will - either be true or false, never null. - - - -row_constructor IS NOT DISTINCT FROM row_constructor - - - - This construct is similar to a = row comparison, - but it does not yield null for null inputs. Instead, any null value is - considered unequal to (distinct from) any non-null value, and any two - nulls are considered equal (not distinct). Thus the result will always - be either true or false, never null. - - - - - - Composite Type Comparison - - -record operator record - - - - The SQL specification requires row-wise comparison to return NULL if the - result depends on comparing two NULL values or a NULL and a non-NULL. - PostgreSQL does this only when comparing the - results of two row constructors (as in - ) or comparing a row constructor - to the output of a subquery (as in ). - In other contexts where two composite-type values are compared, two - NULL field values are considered equal, and a NULL is considered larger - than a non-NULL. This is necessary in order to have consistent sorting - and indexing behavior for composite types. - - - - Each side is evaluated and they are compared row-wise. Composite type - comparisons are allowed when the operator is - =, - <>, - <, - <=, - > or - >=, - or has semantics similar to one of these. (To be specific, an operator - can be a row comparison operator if it is a member of a B-tree operator - class, or is the negator of the = member of a B-tree operator - class.) The default behavior of the above operators is the same as for - IS [ NOT ] DISTINCT FROM for row constructors (see - ). - - - - To support matching of rows which include elements without a default - B-tree operator class, the following operators are defined for composite - type comparison: - *=, - *<>, - *<, - *<=, - *>, and - *>=. - These operators compare the internal binary representation of the two - rows. Two rows might have a different binary representation even - though comparisons of the two rows with the equality operator is true. - The ordering of rows under these comparison operators is deterministic - but not otherwise meaningful. These operators are used internally - for materialized views and might be useful for other specialized - purposes such as replication and B-Tree deduplication (see ). They are not intended to be - generally useful for writing queries, though. - - - - - - Set Returning Functions - - - set returning functions - functions - - - - This section describes functions that possibly return more than one row. - The most widely used functions in this class are series generating - functions, as detailed in and - . Other, more specialized - set-returning functions are described elsewhere in this manual. - See for ways to combine multiple - set-returning functions. - - - - Series Generating Functions - - - - - Function - - - Description - - - - - - - - - generate_series - - generate_series ( start integer, stop integer , step integer ) - setof integer - - - generate_series ( start bigint, stop bigint , step bigint ) - setof bigint - - - generate_series ( start numeric, stop numeric , step numeric ) - setof numeric - - - Generates a series of values from start - to stop, with a step size - of step. step - defaults to 1. - - - - - - generate_series ( start timestamp, stop timestamp, step interval ) - setof timestamp - - - generate_series ( start timestamp with time zone, stop timestamp with time zone, step interval , timezone text ) - setof timestamp with time zone - - - Generates a series of values from start - to stop, with a step size - of step. - In the timezone-aware form, times of day and daylight-savings - adjustments are computed according to the time zone named by - the timezone argument, or the current - setting if that is omitted. - - - - -
- - - When step is positive, zero rows are returned if - start is greater than stop. - Conversely, when step is negative, zero rows are - returned if start is less than stop. - Zero rows are also returned if any input is NULL. - It is an error - for step to be zero. Some examples follow: - -SELECT * FROM generate_series(2,4); - generate_series ------------------ - 2 - 3 - 4 -(3 rows) - -SELECT * FROM generate_series(5,1,-2); - generate_series ------------------ - 5 - 3 - 1 -(3 rows) - -SELECT * FROM generate_series(4,3); - generate_series ------------------ -(0 rows) - -SELECT generate_series(1.1, 4, 1.3); - generate_series ------------------ - 1.1 - 2.4 - 3.7 -(3 rows) - --- this example relies on the date-plus-integer operator: -SELECT current_date + s.a AS dates FROM generate_series(0,14,7) AS s(a); - dates ------------- - 2004-02-05 - 2004-02-12 - 2004-02-19 -(3 rows) - -SELECT * FROM generate_series('2008-03-01 00:00'::timestamp, - '2008-03-04 12:00', '10 hours'); - generate_series ---------------------- - 2008-03-01 00:00:00 - 2008-03-01 10:00:00 - 2008-03-01 20:00:00 - 2008-03-02 06:00:00 - 2008-03-02 16:00:00 - 2008-03-03 02:00:00 - 2008-03-03 12:00:00 - 2008-03-03 22:00:00 - 2008-03-04 08:00:00 -(9 rows) - --- this example assumes that TimeZone is set to UTC; note the DST transition: -SELECT * FROM generate_series('2001-10-22 00:00 -04:00'::timestamptz, - '2001-11-01 00:00 -05:00'::timestamptz, - '1 day'::interval, 'America/New_York'); - generate_series ------------------------- - 2001-10-22 04:00:00+00 - 2001-10-23 04:00:00+00 - 2001-10-24 04:00:00+00 - 2001-10-25 04:00:00+00 - 2001-10-26 04:00:00+00 - 2001-10-27 04:00:00+00 - 2001-10-28 04:00:00+00 - 2001-10-29 05:00:00+00 - 2001-10-30 05:00:00+00 - 2001-10-31 05:00:00+00 - 2001-11-01 05:00:00+00 -(11 rows) - - - - - Subscript Generating Functions - - - - - Function - - - Description - - - - - - - - - generate_subscripts - - generate_subscripts ( array anyarray, dim integer ) - setof integer - - - Generates a series comprising the valid subscripts of - the dim'th dimension of the given array. - - - - - - generate_subscripts ( array anyarray, dim integer, reverse boolean ) - setof integer - - - Generates a series comprising the valid subscripts of - the dim'th dimension of the given array. - When reverse is true, returns the series in - reverse order. - - - - -
- - - generate_subscripts is a convenience function that generates - the set of valid subscripts for the specified dimension of the given - array. - Zero rows are returned for arrays that do not have the requested dimension, - or if any input is NULL. - Some examples follow: - --- basic usage: -SELECT generate_subscripts('{NULL,1,NULL,2}'::int[], 1) AS s; - s ---- - 1 - 2 - 3 - 4 -(4 rows) - --- presenting an array, the subscript and the subscripted --- value requires a subquery: -SELECT * FROM arrays; - a --------------------- - {-1,-2} - {100,200,300} -(2 rows) - -SELECT a AS array, s AS subscript, a[s] AS value -FROM (SELECT generate_subscripts(a, 1) AS s, a FROM arrays) foo; - array | subscript | value ----------------+-----------+------- - {-1,-2} | 1 | -1 - {-1,-2} | 2 | -2 - {100,200,300} | 1 | 100 - {100,200,300} | 2 | 200 - {100,200,300} | 3 | 300 -(5 rows) - --- unnest a 2D array: -CREATE OR REPLACE FUNCTION unnest2(anyarray) -RETURNS SETOF anyelement AS $$ -select $1[i][j] - from generate_subscripts($1,1) g1(i), - generate_subscripts($1,2) g2(j); -$$ LANGUAGE sql IMMUTABLE; -CREATE FUNCTION -SELECT * FROM unnest2(ARRAY[[1,2],[3,4]]); - unnest2 ---------- - 1 - 2 - 3 - 4 -(4 rows) - - - - - ordinality - - - - When a function in the FROM clause is suffixed - by WITH ORDINALITY, a bigint column is - appended to the function's output column(s), which starts from 1 and - increments by 1 for each row of the function's output. - This is most useful in the case of set returning - functions such as unnest(). - - --- set returning function WITH ORDINALITY: -SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); - ls | n ------------------+---- - pg_serial | 1 - pg_twophase | 2 - postmaster.opts | 3 - pg_notify | 4 - postgresql.conf | 5 - pg_tblspc | 6 - logfile | 7 - base | 8 - postmaster.pid | 9 - pg_ident.conf | 10 - global | 11 - pg_xact | 12 - pg_snapshots | 13 - pg_multixact | 14 - PG_VERSION | 15 - pg_wal | 16 - pg_hba.conf | 17 - pg_stat_tmp | 18 - pg_subtrans | 19 -(19 rows) - - - -
- - - System Information Functions and Operators - - - The functions described in this section are used to obtain various - information about a PostgreSQL installation. - - - - Session Information Functions - - - shows several - functions that extract session and system information. - - - - In addition to the functions listed in this section, there are a number of - functions related to the statistics system that also provide system - information. See for more - information. - - - - Session Information Functions - - - - - Function - - - Description - - - - - - - - - current_catalog - - current_catalog - name - - - - current_database - - current_database () - name - - - Returns the name of the current database. (Databases are - called catalogs in the SQL standard, - so current_catalog is the standard's - spelling.) - - - - - - - current_query - - current_query () - text - - - Returns the text of the currently executing query, as submitted - by the client (which might contain more than one statement). - - - - - - - current_role - - current_role - name - - - This is equivalent to current_user. - - - - - - - current_schema - - - schema - current - - current_schema - name - - - current_schema () - name - - - Returns the name of the schema that is first in the search path (or a - null value if the search path is empty). This is the schema that will - be used for any tables or other named objects that are created without - specifying a target schema. - - - - - - - current_schemas - - - search path - current - - current_schemas ( include_implicit boolean ) - name[] - - - Returns an array of the names of all schemas presently in the - effective search path, in their priority order. (Items in the current - setting that do not correspond to - existing, searchable schemas are omitted.) If the Boolean argument - is true, then implicitly-searched system schemas - such as pg_catalog are included in the result. - - - - - - - current_user - - - user - current - - current_user - name - - - Returns the user name of the current execution context. - - - - - - - inet_client_addr - - inet_client_addr () - inet - - - Returns the IP address of the current client, - or NULL if the current connection is via a - Unix-domain socket. - - - - - - - inet_client_port - - inet_client_port () - integer - - - Returns the IP port number of the current client, - or NULL if the current connection is via a - Unix-domain socket. - - - - - - - inet_server_addr - - inet_server_addr () - inet - - - Returns the IP address on which the server accepted the current - connection, - or NULL if the current connection is via a - Unix-domain socket. - - - - - - - inet_server_port - - inet_server_port () - integer - - - Returns the IP port number on which the server accepted the current - connection, - or NULL if the current connection is via a - Unix-domain socket. - - - - - - - pg_backend_pid - - pg_backend_pid () - integer - - - Returns the process ID of the server process attached to the current - session. - - - - - - - pg_blocking_pids - - pg_blocking_pids ( integer ) - integer[] - - - Returns an array of the process ID(s) of the sessions that are - blocking the server process with the specified process ID from - acquiring a lock, or an empty array if there is no such server process - or it is not blocked. - - - One server process blocks another if it either holds a lock that - conflicts with the blocked process's lock request (hard block), or is - waiting for a lock that would conflict with the blocked process's lock - request and is ahead of it in the wait queue (soft block). When using - parallel queries the result always lists client-visible process IDs - (that is, pg_backend_pid results) even if the - actual lock is held or awaited by a child worker process. As a result - of that, there may be duplicated PIDs in the result. Also note that - when a prepared transaction holds a conflicting lock, it will be - represented by a zero process ID. - - - Frequent calls to this function could have some impact on database - performance, because it needs exclusive access to the lock manager's - shared state for a short time. - - - - - - - pg_conf_load_time - - pg_conf_load_time () - timestamp with time zone - - - Returns the time when the server configuration files were last loaded. - If the current session was alive at the time, this will be the time - when the session itself re-read the configuration files (so the - reading will vary a little in different sessions). Otherwise it is - the time when the postmaster process re-read the configuration files. - - - - - - - pg_current_logfile - - - Logging - pg_current_logfile function - - - current_logfiles - and the pg_current_logfile function - - - Logging - current_logfiles file and the pg_current_logfile - function - - pg_current_logfile ( text ) - text - - - Returns the path name of the log file currently in use by the logging - collector. The path includes the - directory and the individual log file name. The result - is NULL if the logging collector is disabled. - When multiple log files exist, each in a different - format, pg_current_logfile without an argument - returns the path of the file having the first format found in the - ordered list: stderr, - csvlog, jsonlog. - NULL is returned if no log file has any of these - formats. - To request information about a specific log file format, supply - either csvlog, jsonlog or - stderr as the - value of the optional parameter. The result is NULL - if the log format requested is not configured in - . - The result reflects the contents of - the current_logfiles file. - - - This function is restricted to superusers and roles with privileges of - the pg_monitor role by default, but other users can - be granted EXECUTE to run the function. - - - - - - - pg_get_loaded_modules - - pg_get_loaded_modules () - setof record - ( module_name text, - version text, - file_name text ) - - - Returns a list of the loadable modules that are loaded into the - current server session. The module_name - and version fields are NULL unless the - module author supplied values for them using - the PG_MODULE_MAGIC_EXT macro. - The file_name field gives the file - name of the module (shared library). - - - - - - - pg_my_temp_schema - - pg_my_temp_schema () - oid - - - Returns the OID of the current session's temporary schema, or zero if - it has none (because it has not created any temporary tables). - - - - - - - pg_is_other_temp_schema - - pg_is_other_temp_schema ( oid ) - boolean - - - Returns true if the given OID is the OID of another session's - temporary schema. (This can be useful, for example, to exclude other - sessions' temporary tables from a catalog display.) - - - - - - - pg_jit_available - - pg_jit_available () - boolean - - - Returns true if a JIT compiler extension is - available (see ) and the - configuration parameter is set to - on. - - - - - - - pg_numa_available - - pg_numa_available () - boolean - - - Returns true if the server has been compiled with NUMA support. - - - - - - - pg_listening_channels - - pg_listening_channels () - setof text - - - Returns the set of names of asynchronous notification channels that - the current session is listening to. - - - - - - - pg_notification_queue_usage - - pg_notification_queue_usage () - double precision - - - Returns the fraction (0–1) of the asynchronous notification - queue's maximum size that is currently occupied by notifications that - are waiting to be processed. - See and - for more information. - - - - - - - pg_postmaster_start_time - - pg_postmaster_start_time () - timestamp with time zone - - - Returns the time when the server started. - - - - - - - pg_safe_snapshot_blocking_pids - - pg_safe_snapshot_blocking_pids ( integer ) - integer[] - - - Returns an array of the process ID(s) of the sessions that are blocking - the server process with the specified process ID from acquiring a safe - snapshot, or an empty array if there is no such server process or it - is not blocked. - - - A session running a SERIALIZABLE transaction blocks - a SERIALIZABLE READ ONLY DEFERRABLE transaction - from acquiring a snapshot until the latter determines that it is safe - to avoid taking any predicate locks. See - for more information about - serializable and deferrable transactions. - - - Frequent calls to this function could have some impact on database - performance, because it needs access to the predicate lock manager's - shared state for a short time. - - - - - - - pg_trigger_depth - - pg_trigger_depth () - integer - - - Returns the current nesting level - of PostgreSQL triggers (0 if not called, - directly or indirectly, from inside a trigger). - - - - - - - session_user - - session_user - name - - - Returns the session user's name. - - - - - - - system_user - - system_user - text - - - Returns the authentication method and the identity (if any) that the - user presented during the authentication cycle before they were - assigned a database role. It is represented as - auth_method:identity or - NULL if the user has not been authenticated (for - example if Trust authentication has - been used). - - - - - - - user - - user - name - - - This is equivalent to current_user. - - - - -
- - - - current_catalog, - current_role, - current_schema, - current_user, - session_user, - and user have special syntactic status - in SQL: they must be called without trailing - parentheses. In PostgreSQL, parentheses can optionally be used with - current_schema, but not with the others. - - - - - The session_user is normally the user who initiated - the current database connection; but superusers can change this setting - with . - The current_user is the user identifier - that is applicable for permission checking. Normally it is equal - to the session user, but it can be changed with - . - It also changes during the execution of - functions with the attribute SECURITY DEFINER. - In Unix parlance, the session user is the real user and - the current user is the effective user. - current_role and user are - synonyms for current_user. (The SQL standard draws - a distinction between current_role - and current_user, but PostgreSQL - does not, since it unifies users and roles into a single kind of entity.) - - -
- - - Access Privilege Inquiry Functions - - - privilege - querying - - - - lists functions that - allow querying object access privileges programmatically. - (See for more information about - privileges.) - In these functions, the user whose privileges are being inquired about - can be specified by name or by OID - (pg_authid.oid), or if - the name is given as public then the privileges of the - PUBLIC pseudo-role are checked. Also, the user - argument can be omitted entirely, in which case - the current_user is assumed. - The object that is being inquired about can be specified either by name or - by OID, too. When specifying by name, a schema name can be included if - relevant. - The access privilege of interest is specified by a text string, which must - evaluate to one of the appropriate privilege keywords for the object's type - (e.g., SELECT). Optionally, WITH GRANT - OPTION can be added to a privilege type to test whether the - privilege is held with grant option. Also, multiple privilege types can be - listed separated by commas, in which case the result will be true if any of - the listed privileges is held. (Case of the privilege string is not - significant, and extra whitespace is allowed between but not within - privilege names.) - Some examples: - -SELECT has_table_privilege('myschema.mytable', 'select'); -SELECT has_table_privilege('joe', 'mytable', 'INSERT, SELECT WITH GRANT OPTION'); - - - - - Access Privilege Inquiry Functions - - - - - Function - - - Description - - - - - - - - - has_any_column_privilege - - has_any_column_privilege ( - user name or oid, - table text or oid, - privilege text ) - boolean - - - Does user have privilege for any column of table? - This succeeds either if the privilege is held for the whole table, or - if there is a column-level grant of the privilege for at least one - column. - Allowable privilege types are - SELECT, INSERT, - UPDATE, and REFERENCES. - - - - - - - has_column_privilege - - has_column_privilege ( - user name or oid, - table text or oid, - column text or smallint, - privilege text ) - boolean - - - Does user have privilege for the specified table column? - This succeeds either if the privilege is held for the whole table, or - if there is a column-level grant of the privilege for the column. - The column can be specified by name or by attribute number - (pg_attribute.attnum). - Allowable privilege types are - SELECT, INSERT, - UPDATE, and REFERENCES. - - - - - - - has_database_privilege - - has_database_privilege ( - user name or oid, - database text or oid, - privilege text ) - boolean - - - Does user have privilege for database? - Allowable privilege types are - CREATE, - CONNECT, - TEMPORARY, and - TEMP (which is equivalent to - TEMPORARY). - - - - - - - has_foreign_data_wrapper_privilege - - has_foreign_data_wrapper_privilege ( - user name or oid, - fdw text or oid, - privilege text ) - boolean - - - Does user have privilege for foreign-data wrapper? - The only allowable privilege type is USAGE. - - - - - - - has_function_privilege - - has_function_privilege ( - user name or oid, - function text or oid, - privilege text ) - boolean - - - Does user have privilege for function? - The only allowable privilege type is EXECUTE. - - - When specifying a function by name rather than by OID, the allowed - input is the same as for the regprocedure data type (see - ). - An example is: - -SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute'); - - - - - - - - has_language_privilege - - has_language_privilege ( - user name or oid, - language text or oid, - privilege text ) - boolean - - - Does user have privilege for language? - The only allowable privilege type is USAGE. - - - - - - - has_largeobject_privilege - - has_largeobject_privilege ( - user name or oid, - largeobject oid, - privilege text ) - boolean - - - Does user have privilege for large object? - Allowable privilege types are - SELECT and UPDATE. - - - - - - - has_parameter_privilege - - has_parameter_privilege ( - user name or oid, - parameter text, - privilege text ) - boolean - - - Does user have privilege for configuration parameter? - The parameter name is case-insensitive. - Allowable privilege types are SET - and ALTER SYSTEM. - - - - - - - has_schema_privilege - - has_schema_privilege ( - user name or oid, - schema text or oid, - privilege text ) - boolean - - - Does user have privilege for schema? - Allowable privilege types are - CREATE and - USAGE. - - - - - - - has_sequence_privilege - - has_sequence_privilege ( - user name or oid, - sequence text or oid, - privilege text ) - boolean - - - Does user have privilege for sequence? - Allowable privilege types are - USAGE, - SELECT, and - UPDATE. - - - - - - - has_server_privilege - - has_server_privilege ( - user name or oid, - server text or oid, - privilege text ) - boolean - - - Does user have privilege for foreign server? - The only allowable privilege type is USAGE. - - - - - - - has_table_privilege - - has_table_privilege ( - user name or oid, - table text or oid, - privilege text ) - boolean - - - Does user have privilege for table? - Allowable privilege types - are SELECT, INSERT, - UPDATE, DELETE, - TRUNCATE, REFERENCES, - TRIGGER, and MAINTAIN. - - - - - - - has_tablespace_privilege - - has_tablespace_privilege ( - user name or oid, - tablespace text or oid, - privilege text ) - boolean - - - Does user have privilege for tablespace? - The only allowable privilege type is CREATE. - - - - - - - has_type_privilege - - has_type_privilege ( - user name or oid, - type text or oid, - privilege text ) - boolean - - - Does user have privilege for data type? - The only allowable privilege type is USAGE. - When specifying a type by name rather than by OID, the allowed input - is the same as for the regtype data type (see - ). - - - - - - - pg_has_role - - pg_has_role ( - user name or oid, - role text or oid, - privilege text ) - boolean - - - Does user have privilege for role? - Allowable privilege types are - MEMBER, USAGE, - and SET. - MEMBER denotes direct or indirect membership in - the role without regard to what specific privileges may be conferred. - USAGE denotes whether the privileges of the role - are immediately available without doing SET ROLE, - while SET denotes whether it is possible to change - to the role using the SET ROLE command. - WITH ADMIN OPTION or WITH GRANT - OPTION can be added to any of these privilege types to - test whether the ADMIN privilege is held (all - six spellings test the same thing). - This function does not allow the special case of - setting user to public, - because the PUBLIC pseudo-role can never be a member of real roles. - - - - - - - row_security_active - - row_security_active ( - table text or oid ) - boolean - - - Is row-level security active for the specified table in the context of - the current user and current environment? - - - - -
- - - shows the operators - available for the aclitem type, which is the catalog - representation of access privileges. See - for information about how to read access privilege values. - - - - <type>aclitem</type> Operators - - - - - Operator - - - Description - - - Example(s) - - - - - - - - - aclitemeq - - aclitem = aclitem - boolean - - - Are aclitems equal? (Notice that - type aclitem lacks the usual set of comparison - operators; it has only equality. In turn, aclitem - arrays can only be compared for equality.) - - - 'calvin=r*w/hobbes'::aclitem = 'calvin=r*w*/hobbes'::aclitem - f - - - - - - - aclcontains - - aclitem[] @> aclitem - boolean - - - Does array contain the specified privileges? (This is true if there - is an array entry that matches the aclitem's grantee and - grantor, and has at least the specified set of privileges.) - - - '{calvin=r*w/hobbes,hobbes=r*w*/postgres}'::aclitem[] @> 'calvin=r*/hobbes'::aclitem - t - - - - - - aclitem[] ~ aclitem - boolean - - - This is a deprecated alias for @>. - - - '{calvin=r*w/hobbes,hobbes=r*w*/postgres}'::aclitem[] ~ 'calvin=r*/hobbes'::aclitem - t - - - - -
- - - shows some additional - functions to manage the aclitem type. - - - - <type>aclitem</type> Functions - - - - - Function - - - Description - - - - - - - - - acldefault - - acldefault ( - type "char", - ownerId oid ) - aclitem[] - - - Constructs an aclitem array holding the default access - privileges for an object of type type belonging - to the role with OID ownerId. This represents - the access privileges that will be assumed when an object's - ACL entry is null. (The default access privileges - are described in .) - The type parameter must be one of - 'c' for COLUMN, - 'r' for TABLE and table-like objects, - 's' for SEQUENCE, - 'd' for DATABASE, - 'f' for FUNCTION or PROCEDURE, - 'l' for LANGUAGE, - 'L' for LARGE OBJECT, - 'n' for SCHEMA, - 'p' for PARAMETER, - 't' for TABLESPACE, - 'F' for FOREIGN DATA WRAPPER, - 'S' for FOREIGN SERVER, - or - 'T' for TYPE or DOMAIN. - - - - - - - aclexplode - - aclexplode ( aclitem[] ) - setof record - ( grantor oid, - grantee oid, - privilege_type text, - is_grantable boolean ) - - - Returns the aclitem array as a set of rows. - If the grantee is the pseudo-role PUBLIC, it is represented by zero in - the grantee column. Each granted privilege is - represented as SELECT, INSERT, - etc (see for a full list). - Note that each privilege is broken out as a separate row, so - only one keyword appears in the privilege_type - column. - - - - - - - makeaclitem - - makeaclitem ( - grantee oid, - grantor oid, - privileges text, - is_grantable boolean ) - aclitem - - - Constructs an aclitem with the given properties. - privileges is a comma-separated list of - privilege names such as SELECT, - INSERT, etc, all of which are set in the - result. (Case of the privilege string is not significant, and - extra whitespace is allowed between but not within privilege - names.) - - - - -
- -
- - - Schema Visibility Inquiry Functions - - - shows functions that - determine whether a certain object is visible in the - current schema search path. - For example, a table is said to be visible if its - containing schema is in the search path and no table of the same - name appears earlier in the search path. This is equivalent to the - statement that the table can be referenced by name without explicit - schema qualification. Thus, to list the names of all visible tables: - -SELECT relname FROM pg_class WHERE pg_table_is_visible(oid); - - For functions and operators, an object in the search path is said to be - visible if there is no object of the same name and argument data - type(s) earlier in the path. For operator classes and families, - both the name and the associated index access method are considered. - - - - search path - object visibility - - - - Schema Visibility Inquiry Functions - - - - - Function - - - Description - - - - - - - - - pg_collation_is_visible - - pg_collation_is_visible ( collation oid ) - boolean - - - Is collation visible in search path? - - - - - - - pg_conversion_is_visible - - pg_conversion_is_visible ( conversion oid ) - boolean - - - Is conversion visible in search path? - - - - - - - pg_function_is_visible - - pg_function_is_visible ( function oid ) - boolean - - - Is function visible in search path? - (This also works for procedures and aggregates.) - - - - - - - pg_opclass_is_visible - - pg_opclass_is_visible ( opclass oid ) - boolean - - - Is operator class visible in search path? - - - - - - - pg_operator_is_visible - - pg_operator_is_visible ( operator oid ) - boolean - - - Is operator visible in search path? - - - - - - - pg_opfamily_is_visible - - pg_opfamily_is_visible ( opclass oid ) - boolean - - - Is operator family visible in search path? - - - - - - - pg_statistics_obj_is_visible - - pg_statistics_obj_is_visible ( stat oid ) - boolean - - - Is statistics object visible in search path? - - - - - - - pg_table_is_visible - - pg_table_is_visible ( table oid ) - boolean - - - Is table visible in search path? - (This works for all types of relations, including views, materialized - views, indexes, sequences and foreign tables.) - - - - - - - pg_ts_config_is_visible - - pg_ts_config_is_visible ( config oid ) - boolean - - - Is text search configuration visible in search path? - - - - - - - pg_ts_dict_is_visible - - pg_ts_dict_is_visible ( dict oid ) - boolean - - - Is text search dictionary visible in search path? - - - - - - - pg_ts_parser_is_visible - - pg_ts_parser_is_visible ( parser oid ) - boolean - - - Is text search parser visible in search path? - - - - - - - pg_ts_template_is_visible - - pg_ts_template_is_visible ( template oid ) - boolean - - - Is text search template visible in search path? - - - - - - - pg_type_is_visible - - pg_type_is_visible ( type oid ) - boolean - - - Is type (or domain) visible in search path? - - - - -
- - - All these functions require object OIDs to identify the object to be - checked. If you want to test an object by name, it is convenient to use - the OID alias types (regclass, regtype, - regprocedure, regoperator, regconfig, - or regdictionary), - for example: - -SELECT pg_type_is_visible('myschema.widget'::regtype); - - Note that it would not make much sense to test a non-schema-qualified - type name in this way — if the name can be recognized at all, it must be visible. - - -
- - - System Catalog Information Functions - - - lists functions that - extract information from the system catalogs. - - - - System Catalog Information Functions - - - - - Function - - - Description - - - - - - - - - format_type - - format_type ( type oid, typemod integer ) - text - - - Returns the SQL name for a data type that is identified by its type - OID and possibly a type modifier. Pass NULL for the type modifier if - no specific modifier is known. - - - - - - - pg_basetype - - pg_basetype ( regtype ) - regtype - - - Returns the OID of the base type of a domain identified by its - type OID. If the argument is the OID of a non-domain type, - returns the argument as-is. Returns NULL if the argument is - not a valid type OID. If there's a chain of domain dependencies, - it will recurse until finding the base type. - - - Assuming CREATE DOMAIN mytext AS text: - - - pg_basetype('mytext'::regtype) - text - - - - - - - pg_char_to_encoding - - pg_char_to_encoding ( encoding name ) - integer - - - Converts the supplied encoding name into an integer representing the - internal identifier used in some system catalog tables. - Returns -1 if an unknown encoding name is provided. - - - - - - - pg_encoding_to_char - - pg_encoding_to_char ( encoding integer ) - name - - - Converts the integer used as the internal identifier of an encoding in some - system catalog tables into a human-readable string. - Returns an empty string if an invalid encoding number is provided. - - - - - - - pg_get_catalog_foreign_keys - - pg_get_catalog_foreign_keys () - setof record - ( fktable regclass, - fkcols text[], - pktable regclass, - pkcols text[], - is_array boolean, - is_opt boolean ) - - - Returns a set of records describing the foreign key relationships - that exist within the PostgreSQL system - catalogs. - The fktable column contains the name of the - referencing catalog, and the fkcols column - contains the name(s) of the referencing column(s). Similarly, - the pktable column contains the name of the - referenced catalog, and the pkcols column - contains the name(s) of the referenced column(s). - If is_array is true, the last referencing - column is an array, each of whose elements should match some entry - in the referenced catalog. - If is_opt is true, the referencing column(s) - are allowed to contain zeroes instead of a valid reference. - - - - - - - pg_get_constraintdef - - pg_get_constraintdef ( constraint oid , pretty boolean ) - text - - - Reconstructs the creating command for a constraint. - (This is a decompiled reconstruction, not the original text - of the command.) - - - - - - - pg_get_expr - - pg_get_expr ( expr pg_node_tree, relation oid , pretty boolean ) - text - - - Decompiles the internal form of an expression stored in the system - catalogs, such as the default value for a column. If the expression - might contain Vars, specify the OID of the relation they refer to as - the second parameter; if no Vars are expected, passing zero is - sufficient. - - - - - - - pg_get_functiondef - - pg_get_functiondef ( func oid ) - text - - - Reconstructs the creating command for a function or procedure. - (This is a decompiled reconstruction, not the original text - of the command.) - The result is a complete CREATE OR REPLACE FUNCTION - or CREATE OR REPLACE PROCEDURE statement. - - - - - - - pg_get_function_arguments - - pg_get_function_arguments ( func oid ) - text - - - Reconstructs the argument list of a function or procedure, in the form - it would need to appear in within CREATE FUNCTION - (including default values). - - - - - - - pg_get_function_identity_arguments - - pg_get_function_identity_arguments ( func oid ) - text - - - Reconstructs the argument list necessary to identify a function or - procedure, in the form it would need to appear in within commands such - as ALTER FUNCTION. This form omits default values. - - - - - - - pg_get_function_result - - pg_get_function_result ( func oid ) - text - - - Reconstructs the RETURNS clause of a function, in - the form it would need to appear in within CREATE - FUNCTION. Returns NULL for a procedure. - - - - - - - pg_get_indexdef - - pg_get_indexdef ( index oid , column integer, pretty boolean ) - text - - - Reconstructs the creating command for an index. - (This is a decompiled reconstruction, not the original text - of the command.) If column is supplied and is - not zero, only the definition of that column is reconstructed. - - - - - - - pg_get_keywords - - pg_get_keywords () - setof record - ( word text, - catcode "char", - barelabel boolean, - catdesc text, - baredesc text ) - - - Returns a set of records describing the SQL keywords recognized by the - server. The word column contains the - keyword. The catcode column contains a - category code: U for an unreserved - keyword, C for a keyword that can be a column - name, T for a keyword that can be a type or - function name, or R for a fully reserved keyword. - The barelabel column - contains true if the keyword can be used as - a bare column label in SELECT lists, - or false if it can only be used - after AS. - The catdesc column contains a - possibly-localized string describing the keyword's category. - The baredesc column contains a - possibly-localized string describing the keyword's column label status. - - - - - - - pg_get_partkeydef - - pg_get_partkeydef ( table oid ) - text - - - Reconstructs the definition of a partitioned table's partition - key, in the form it would have in the PARTITION - BY clause of CREATE TABLE. - (This is a decompiled reconstruction, not the original text - of the command.) - - - - - - - pg_get_ruledef - - pg_get_ruledef ( rule oid , pretty boolean ) - text - - - Reconstructs the creating command for a rule. - (This is a decompiled reconstruction, not the original text - of the command.) - - - - - - - pg_get_serial_sequence - - pg_get_serial_sequence ( table text, column text ) - text - - - Returns the name of the sequence associated with a column, - or NULL if no sequence is associated with the column. - If the column is an identity column, the associated sequence is the - sequence internally created for that column. - For columns created using one of the serial types - (serial, smallserial, bigserial), - it is the sequence created for that serial column definition. - In the latter case, the association can be modified or removed - with ALTER SEQUENCE OWNED BY. - (This function probably should have been - called pg_get_owned_sequence; its current name - reflects the fact that it has historically been used with serial-type - columns.) The first parameter is a table name with optional - schema, and the second parameter is a column name. Because the first - parameter potentially contains both schema and table names, it is - parsed per usual SQL rules, meaning it is lower-cased by default. - The second parameter, being just a column name, is treated literally - and so has its case preserved. The result is suitably formatted - for passing to the sequence functions (see - ). - - - A typical use is in reading the current value of the sequence for an - identity or serial column, for example: - -SELECT currval(pg_get_serial_sequence('sometable', 'id')); - - - - - - - - pg_get_statisticsobjdef - - pg_get_statisticsobjdef ( statobj oid ) - text - - - Reconstructs the creating command for an extended statistics object. - (This is a decompiled reconstruction, not the original text - of the command.) - - - - - - - pg_get_triggerdef - -pg_get_triggerdef ( trigger oid , pretty boolean ) - text - - - Reconstructs the creating command for a trigger. - (This is a decompiled reconstruction, not the original text - of the command.) - - - - - - - pg_get_userbyid - - pg_get_userbyid ( role oid ) - name - - - Returns a role's name given its OID. - - - - - - - pg_get_viewdef - - pg_get_viewdef ( view oid , pretty boolean ) - text - - - Reconstructs the underlying SELECT command for a - view or materialized view. (This is a decompiled reconstruction, not - the original text of the command.) - - - - - - pg_get_viewdef ( view oid, wrap_column integer ) - text - - - Reconstructs the underlying SELECT command for a - view or materialized view. (This is a decompiled reconstruction, not - the original text of the command.) In this form of the function, - pretty-printing is always enabled, and long lines are wrapped to try - to keep them shorter than the specified number of columns. - - - - - - pg_get_viewdef ( view text , pretty boolean ) - text - - - Reconstructs the underlying SELECT command for a - view or materialized view, working from a textual name for the view - rather than its OID. (This is deprecated; use the OID variant - instead.) - - - - - - - pg_index_column_has_property - - pg_index_column_has_property ( index regclass, column integer, property text ) - boolean - - - Tests whether an index column has the named property. - Common index column properties are listed in - . - (Note that extension access methods can define additional property - names for their indexes.) - NULL is returned if the property name is not known - or does not apply to the particular object, or if the OID or column - number does not identify a valid object. - - - - - - - pg_index_has_property - - pg_index_has_property ( index regclass, property text ) - boolean - - - Tests whether an index has the named property. - Common index properties are listed in - . - (Note that extension access methods can define additional property - names for their indexes.) - NULL is returned if the property name is not known - or does not apply to the particular object, or if the OID does not - identify a valid object. - - - - - - - pg_indexam_has_property - - pg_indexam_has_property ( am oid, property text ) - boolean - - - Tests whether an index access method has the named property. - Access method properties are listed in - . - NULL is returned if the property name is not known - or does not apply to the particular object, or if the OID does not - identify a valid object. - - - - - - - pg_options_to_table - - pg_options_to_table ( options_array text[] ) - setof record - ( option_name text, - option_value text ) - - - Returns the set of storage options represented by a value from - pg_class.reloptions or - pg_attribute.attoptions. - - - - - - - pg_settings_get_flags - - pg_settings_get_flags ( guc text ) - text[] - - - Returns an array of the flags associated with the given GUC, or - NULL if it does not exist. The result is - an empty array if the GUC exists but there are no flags to show. - Only the most useful flags listed in - are exposed. - - - - - - - pg_tablespace_databases - - pg_tablespace_databases ( tablespace oid ) - setof oid - - - Returns the set of OIDs of databases that have objects stored in the - specified tablespace. If this function returns any rows, the - tablespace is not empty and cannot be dropped. To identify the specific - objects populating the tablespace, you will need to connect to the - database(s) identified by pg_tablespace_databases - and query their pg_class catalogs. - - - - - - - pg_tablespace_location - - pg_tablespace_location ( tablespace oid ) - text - - - Returns the file system path that this tablespace is located in. - - - - - - - pg_typeof - - pg_typeof ( "any" ) - regtype - - - Returns the OID of the data type of the value that is passed to it. - This can be helpful for troubleshooting or dynamically constructing - SQL queries. The function is declared as - returning regtype, which is an OID alias type (see - ); this means that it is the same as an - OID for comparison purposes but displays as a type name. - - - pg_typeof(33) - integer - - - - - - - COLLATION FOR - - COLLATION FOR ( "any" ) - text - - - Returns the name of the collation of the value that is passed to it. - The value is quoted and schema-qualified if necessary. If no - collation was derived for the argument expression, - then NULL is returned. If the argument is not of a - collatable data type, then an error is raised. - - - collation for ('foo'::text) - "default" - - - collation for ('foo' COLLATE "de_DE") - "de_DE" - - - - - - - to_regclass - - to_regclass ( text ) - regclass - - - Translates a textual relation name to its OID. A similar result is - obtained by casting the string to type regclass (see - ); however, this function will return - NULL rather than throwing an error if the name is - not found. - - - - - - - to_regdatabase - - to_regdatabase ( text ) - regdatabase - - - Translates a textual database name to its OID. A similar result is - obtained by casting the string to type regdatabase (see - ); however, this function will return - NULL rather than throwing an error if the name is - not found. - - - - - - - to_regcollation - - to_regcollation ( text ) - regcollation - - - Translates a textual collation name to its OID. A similar result is - obtained by casting the string to type regcollation (see - ); however, this function will return - NULL rather than throwing an error if the name is - not found. - - - - - - - to_regnamespace - - to_regnamespace ( text ) - regnamespace - - - Translates a textual schema name to its OID. A similar result is - obtained by casting the string to type regnamespace (see - ); however, this function will return - NULL rather than throwing an error if the name is - not found. - - - - - - - to_regoper - - to_regoper ( text ) - regoper - - - Translates a textual operator name to its OID. A similar result is - obtained by casting the string to type regoper (see - ); however, this function will return - NULL rather than throwing an error if the name is - not found or is ambiguous. - - - - - - - to_regoperator - - to_regoperator ( text ) - regoperator - - - Translates a textual operator name (with parameter types) to its OID. A similar result is - obtained by casting the string to type regoperator (see - ); however, this function will return - NULL rather than throwing an error if the name is - not found. - - - - - - - to_regproc - - to_regproc ( text ) - regproc - - - Translates a textual function or procedure name to its OID. A similar result is - obtained by casting the string to type regproc (see - ); however, this function will return - NULL rather than throwing an error if the name is - not found or is ambiguous. - - - - - - - to_regprocedure - - to_regprocedure ( text ) - regprocedure - - - Translates a textual function or procedure name (with argument types) to its OID. A similar result is - obtained by casting the string to type regprocedure (see - ); however, this function will return - NULL rather than throwing an error if the name is - not found. - - - - - - - to_regrole - - to_regrole ( text ) - regrole - - - Translates a textual role name to its OID. A similar result is - obtained by casting the string to type regrole (see - ); however, this function will return - NULL rather than throwing an error if the name is - not found. - - - - - - - to_regtype - - to_regtype ( text ) - regtype - - - Parses a string of text, extracts a potential type name from it, - and translates that name into a type OID. A syntax error in the - string will result in an error; but if the string is a - syntactically valid type name that happens not to be found in the - catalogs, the result is NULL. A similar result - is obtained by casting the string to type regtype - (see ), except that that will throw - error for name not found. - - - - - - - to_regtypemod - - to_regtypemod ( text ) - integer - - - Parses a string of text, extracts a potential type name from it, - and translates its type modifier, if any. A syntax error in the - string will result in an error; but if the string is a - syntactically valid type name that happens not to be found in the - catalogs, the result is NULL. The result is - -1 if no type modifier is present. - - - to_regtypemod can be combined with - to produce appropriate inputs for - , allowing a string representing a - type name to be canonicalized. - - - format_type(to_regtype('varchar(32)'), to_regtypemod('varchar(32)')) - character varying(32) - - - - -
- - - Most of the functions that reconstruct (decompile) database objects - have an optional pretty flag, which - if true causes the result to - be pretty-printed. Pretty-printing suppresses unnecessary - parentheses and adds whitespace for legibility. - The pretty-printed format is more readable, but the default format - is more likely to be interpreted the same way by future versions of - PostgreSQL; so avoid using pretty-printed output - for dump purposes. Passing false for - the pretty parameter yields the same result as - omitting the parameter. - - - - Index Column Properties - - - NameDescription - - - - asc - Does the column sort in ascending order on a forward scan? - - - - desc - Does the column sort in descending order on a forward scan? - - - - nulls_first - Does the column sort with nulls first on a forward scan? - - - - nulls_last - Does the column sort with nulls last on a forward scan? - - - - orderable - Does the column possess any defined sort ordering? - - - - distance_orderable - Can the column be scanned in order by a distance - operator, for example ORDER BY col <-> constant ? - - - - returnable - Can the column value be returned by an index-only scan? - - - - search_array - Does the column natively support col = ANY(array) - searches? - - - - search_nulls - Does the column support IS NULL and - IS NOT NULL searches? - - - - -
- - - Index Properties - - - NameDescription - - - - clusterable - Can the index be used in a CLUSTER command? - - - - index_scan - Does the index support plain (non-bitmap) scans? - - - - bitmap_scan - Does the index support bitmap scans? - - - - backward_scan - Can the scan direction be changed in mid-scan (to - support FETCH BACKWARD on a cursor without - needing materialization)? - - - - -
- - - Index Access Method Properties - - - NameDescription - - - - can_order - Does the access method support ASC, - DESC and related keywords in - CREATE INDEX? - - - - can_unique - Does the access method support unique indexes? - - - - can_multi_col - Does the access method support indexes with multiple columns? - - - - can_exclude - Does the access method support exclusion constraints? - - - - can_include - Does the access method support the INCLUDE - clause of CREATE INDEX? - - - - -
- - - GUC Flags - - - FlagDescription - - - - EXPLAIN - Parameters with this flag are included in - EXPLAIN (SETTINGS) commands. - - - - NO_SHOW_ALL - Parameters with this flag are excluded from - SHOW ALL commands. - - - - NO_RESET - Parameters with this flag do not support - RESET commands. - - - - NO_RESET_ALL - Parameters with this flag are excluded from - RESET ALL commands. - - - - NOT_IN_SAMPLE - Parameters with this flag are not included in - postgresql.conf by default. - - - - RUNTIME_COMPUTED - Parameters with this flag are runtime-computed ones. - - - - -
- -
- - - Object Information and Addressing Functions - - - lists functions related to - database object identification and addressing. - - - - Object Information and Addressing Functions - - - - - Function - - - Description - - - - - - - - - pg_get_acl - - pg_get_acl ( classid oid, objid oid, objsubid integer ) - aclitem[] - - - Returns the ACL for a database object, specified - by catalog OID, object OID and sub-object ID. This function returns - NULL values for undefined objects. - - - - - - - pg_describe_object - - pg_describe_object ( classid oid, objid oid, objsubid integer ) - text - - - Returns a textual description of a database object identified by - catalog OID, object OID, and sub-object ID (such as a column number - within a table; the sub-object ID is zero when referring to a whole - object). This description is intended to be human-readable, and might - be translated, depending on server configuration. This is especially - useful to determine the identity of an object referenced in the - pg_depend catalog. This function returns - NULL values for undefined objects. - - - - - - - pg_identify_object - - pg_identify_object ( classid oid, objid oid, objsubid integer ) - record - ( type text, - schema text, - name text, - identity text ) - - - Returns a row containing enough information to uniquely identify the - database object specified by catalog OID, object OID and sub-object - ID. - This information is intended to be machine-readable, and is never - translated. - type identifies the type of database object; - schema is the schema name that the object - belongs in, or NULL for object types that do not - belong to schemas; - name is the name of the object, quoted if - necessary, if the name (along with schema name, if pertinent) is - sufficient to uniquely identify the object, - otherwise NULL; - identity is the complete object identity, with - the precise format depending on object type, and each name within the - format being schema-qualified and quoted as necessary. Undefined - objects are identified with NULL values. - - - - - - - pg_identify_object_as_address - - pg_identify_object_as_address ( classid oid, objid oid, objsubid integer ) - record - ( type text, - object_names text[], - object_args text[] ) - - - Returns a row containing enough information to uniquely identify the - database object specified by catalog OID, object OID and sub-object - ID. - The returned information is independent of the current server, that - is, it could be used to identify an identically named object in - another server. - type identifies the type of database object; - object_names and - object_args - are text arrays that together form a reference to the object. - These three values can be passed - to pg_get_object_address to obtain the internal - address of the object. - - - - - - - pg_get_object_address - - pg_get_object_address ( type text, object_names text[], object_args text[] ) - record - ( classid oid, - objid oid, - objsubid integer ) - - - Returns a row containing enough information to uniquely identify the - database object specified by a type code and object name and argument - arrays. - The returned values are the ones that would be used in system catalogs - such as pg_depend; they can be passed to - other system functions such as pg_describe_object - or pg_identify_object. - classid is the OID of the system catalog - containing the object; - objid is the OID of the object itself, and - objsubid is the sub-object ID, or zero if none. - This function is the inverse - of pg_identify_object_as_address. - Undefined objects are identified with NULL values. - - - - -
- - - pg_get_acl is useful for retrieving and inspecting - the privileges associated with database objects without looking at - specific catalogs. For example, to retrieve all the granted privileges - on objects in the current database: - -postgres=# SELECT - (pg_identify_object(s.classid,s.objid,s.objsubid)).*, - pg_catalog.pg_get_acl(s.classid,s.objid,s.objsubid) AS acl -FROM pg_catalog.pg_shdepend AS s -JOIN pg_catalog.pg_database AS d - ON d.datname = current_database() AND - d.oid = s.dbid -JOIN pg_catalog.pg_authid AS a - ON a.oid = s.refobjid AND - s.refclassid = 'pg_authid'::regclass -WHERE s.deptype = 'a'; --[ RECORD 1 ]----------------------------------------- -type | table -schema | public -name | testtab -identity | public.testtab -acl | {postgres=arwdDxtm/postgres,foo=r/postgres} - - - -
- - - Comment Information Functions - - - comment - about database objects - - - - The functions shown in - extract comments previously stored with the - command. A null value is returned if no - comment could be found for the specified parameters. - - - - Comment Information Functions - - - - - Function - - - Description - - - - - - - - - col_description - - col_description ( table oid, column integer ) - text - - - Returns the comment for a table column, which is specified by the OID - of its table and its column number. - (obj_description cannot be used for table - columns, since columns do not have OIDs of their own.) - - - - - - - obj_description - - obj_description ( object oid, catalog name ) - text - - - Returns the comment for a database object specified by its OID and the - name of the containing system catalog. For - example, obj_description(123456, 'pg_class') would - retrieve the comment for the table with OID 123456. - - - - - - obj_description ( object oid ) - text - - - Returns the comment for a database object specified by its OID alone. - This is deprecated since there is no guarantee - that OIDs are unique across different system catalogs; therefore, the - wrong comment might be returned. - - - - - - - shobj_description - - shobj_description ( object oid, catalog name ) - text - - - Returns the comment for a shared database object specified by its OID - and the name of the containing system catalog. This is just - like obj_description except that it is used for - retrieving comments on shared objects (that is, databases, roles, and - tablespaces). Some system catalogs are global to all databases within - each cluster, and the descriptions for objects in them are stored - globally as well. - - - - -
- -
- - - Data Validity Checking Functions - - - The functions shown in - can be helpful for checking validity of proposed input data. - - - - Data Validity Checking Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - pg_input_is_valid - - pg_input_is_valid ( - string text, - type text - ) - boolean - - - Tests whether the given string is valid - input for the specified data type, returning true or false. - - - This function will only work as desired if the data type's input - function has been updated to report invalid input as - a soft error. Otherwise, invalid input will abort - the transaction, just as if the string had been cast to the type - directly. - - - pg_input_is_valid('42', 'integer') - t - - - pg_input_is_valid('42000000000', 'integer') - f - - - pg_input_is_valid('1234.567', 'numeric(7,4)') - f - - - - - - pg_input_error_info - - pg_input_error_info ( - string text, - type text - ) - record - ( message text, - detail text, - hint text, - sql_error_code text ) - - - Tests whether the given string is valid - input for the specified data type; if not, return the details of - the error that would have been thrown. If the input is valid, the - results are NULL. The inputs are the same as - for pg_input_is_valid. - - - This function will only work as desired if the data type's input - function has been updated to report invalid input as - a soft error. Otherwise, invalid input will abort - the transaction, just as if the string had been cast to the type - directly. - - - SELECT * FROM pg_input_error_info('42000000000', 'integer') - - - message | detail | hint | sql_error_code -------------------------------------------------------+--------+------+---------------- - value "42000000000" is out of range for type integer | | | 22003 - - - - - -
- -
- - - Transaction ID and Snapshot Information Functions - - - The functions shown in - provide server transaction information in an exportable form. The main - use of these functions is to determine which transactions were committed - between two snapshots. - - - - Transaction ID and Snapshot Information Functions - - - - - Function - - - Description - - - - - - - - - age - - age ( xid ) - integer - - - Returns the number of transactions between the supplied - transaction id and the current transaction counter. - - - - - - - mxid_age - - mxid_age ( xid ) - integer - - - Returns the number of multixacts IDs between the supplied - multixact ID and the current multixacts counter. - - - - - - - pg_current_xact_id - - pg_current_xact_id () - xid8 - - - Returns the current transaction's ID. It will assign a new one if the - current transaction does not have one already (because it has not - performed any database updates); see for details. If executed in a - subtransaction, this will return the top-level transaction ID; - see for details. - - - - - - - pg_current_xact_id_if_assigned - - pg_current_xact_id_if_assigned () - xid8 - - - Returns the current transaction's ID, or NULL if no - ID is assigned yet. (It's best to use this variant if the transaction - might otherwise be read-only, to avoid unnecessary consumption of an - XID.) - If executed in a subtransaction, this will return the top-level - transaction ID. - - - - - - - pg_xact_status - - pg_xact_status ( xid8 ) - text - - - Reports the commit status of a recent transaction. - The result is one of in progress, - committed, or aborted, - provided that the transaction is recent enough that the system retains - the commit status of that transaction. - If it is old enough that no references to the transaction survive in - the system and the commit status information has been discarded, the - result is NULL. - Applications might use this function, for example, to determine - whether their transaction committed or aborted after the application - and database server become disconnected while - a COMMIT is in progress. - Note that prepared transactions are reported as in - progress; applications must check pg_prepared_xacts - if they need to determine whether a transaction ID belongs to a - prepared transaction. - - - - - - - pg_current_snapshot - - pg_current_snapshot () - pg_snapshot - - - Returns a current snapshot, a data structure - showing which transaction IDs are now in-progress. - Only top-level transaction IDs are included in the snapshot; - subtransaction IDs are not shown; see - for details. - - - - - - - pg_snapshot_xip - - pg_snapshot_xip ( pg_snapshot ) - setof xid8 - - - Returns the set of in-progress transaction IDs contained in a snapshot. - - - - - - - pg_snapshot_xmax - - pg_snapshot_xmax ( pg_snapshot ) - xid8 - - - Returns the xmax of a snapshot. - - - - - - - pg_snapshot_xmin - - pg_snapshot_xmin ( pg_snapshot ) - xid8 - - - Returns the xmin of a snapshot. - - - - - - - pg_visible_in_snapshot - - pg_visible_in_snapshot ( xid8, pg_snapshot ) - boolean - - - Is the given transaction ID visible according - to this snapshot (that is, was it completed before the snapshot was - taken)? Note that this function will not give the correct answer for - a subtransaction ID (subxid); see for - details. - - - - - - - pg_get_multixact_members - - pg_get_multixact_members ( multixid xid ) - setof record - ( xid xid, - mode text ) - - - Returns the transaction ID and lock mode for each member of the - specified multixact ID. The lock modes forupd, - fornokeyupd, sh, and - keysh correspond to the row-level locks - FOR UPDATE, FOR NO KEY UPDATE, - FOR SHARE, and FOR KEY SHARE, - respectively, as described in . Two - additional modes are specific to multixacts: - nokeyupd, used by updates that do not modify key - columns, and upd, used by updates or deletes that - modify key columns. - - - - -
- - - The internal transaction ID type xid is 32 bits wide and - wraps around every 4 billion transactions. However, - the functions shown in , except - age, mxid_age, and - pg_get_multixact_members, use a - 64-bit type xid8 that does not wrap around during the life - of an installation and can be converted to xid by casting if - required; see for details. - The data type pg_snapshot stores information about - transaction ID visibility at a particular moment in time. Its components - are described in . - pg_snapshot's textual representation is - xmin:xmax:xip_list. - For example 10:20:10,14,15 means - xmin=10, xmax=20, xip_list=10, 14, 15. - - - - Snapshot Components - - - - Name - Description - - - - - - xmin - - Lowest transaction ID that was still active. All transaction IDs - less than xmin are either committed and visible, - or rolled back and dead. - - - - - xmax - - One past the highest completed transaction ID. All transaction IDs - greater than or equal to xmax had not yet - completed as of the time of the snapshot, and thus are invisible. - - - - - xip_list - - Transactions in progress at the time of the snapshot. A transaction - ID that is xmin <= X < - xmax and not in this list was already completed at the time - of the snapshot, and thus is either visible or dead according to its - commit status. This list does not include the transaction IDs of - subtransactions (subxids). - - - - -
- - - In releases of PostgreSQL before 13 there was - no xid8 type, so variants of these functions were provided - that used bigint to represent a 64-bit XID, with a - correspondingly distinct snapshot data type txid_snapshot. - These older functions have txid in their names. They - are still supported for backward compatibility, but may be removed from a - future release. See . - - - - Deprecated Transaction ID and Snapshot Information Functions - - - - - Function - - - Description - - - - - - - - - - txid_current - - txid_current () - bigint - - - See pg_current_xact_id(). - - - - - - - txid_current_if_assigned - - txid_current_if_assigned () - bigint - - - See pg_current_xact_id_if_assigned(). - - - - - - - txid_current_snapshot - - txid_current_snapshot () - txid_snapshot - - - See pg_current_snapshot(). - - - - - - - txid_snapshot_xip - - txid_snapshot_xip ( txid_snapshot ) - setof bigint - - - See pg_snapshot_xip(). - - - - - - - txid_snapshot_xmax - - txid_snapshot_xmax ( txid_snapshot ) - bigint - - - See pg_snapshot_xmax(). - - - - - - - txid_snapshot_xmin - - txid_snapshot_xmin ( txid_snapshot ) - bigint - - - See pg_snapshot_xmin(). - - - - - - - txid_visible_in_snapshot - - txid_visible_in_snapshot ( bigint, txid_snapshot ) - boolean - - - See pg_visible_in_snapshot(). - - - - - - - txid_status - - txid_status ( bigint ) - text - - - See pg_xact_status(). - - - - -
- -
- - - Committed Transaction Information Functions - - - The functions shown in - provide information about when past transactions were committed. - They only provide useful data when the - configuration option is - enabled, and only for transactions that were committed after it was - enabled. Commit timestamp information is routinely removed during - vacuum. - - - - Committed Transaction Information Functions - - - - - Function - - - Description - - - - - - - - - pg_xact_commit_timestamp - - pg_xact_commit_timestamp ( xid ) - timestamp with time zone - - - Returns the commit timestamp of a transaction. - - - - - - - pg_xact_commit_timestamp_origin - - pg_xact_commit_timestamp_origin ( xid ) - record - ( timestamp timestamp with time zone, - roident oid) - - - Returns the commit timestamp and replication origin of a transaction. - - - - - - - pg_last_committed_xact - - pg_last_committed_xact () - record - ( xid xid, - timestamp timestamp with time zone, - roident oid ) - - - Returns the transaction ID, commit timestamp and replication origin - of the latest committed transaction. - - - - -
- -
- - - Control Data Functions - - - The functions shown in - print information initialized during initdb, such - as the catalog version. They also show information about write-ahead - logging and checkpoint processing. This information is cluster-wide, - not specific to any one database. These functions provide most of the same - information, from the same source, as the - application. - - - - Control Data Functions - - - - - Function - - - Description - - - - - - - - - pg_control_checkpoint - - pg_control_checkpoint () - record - - - Returns information about current checkpoint state, as shown in - . - - - - - - - pg_control_system - - pg_control_system () - record - - - Returns information about current control file state, as shown in - . - - - - - - - pg_control_init - - pg_control_init () - record - - - Returns information about cluster initialization state, as shown in - . - - - - - - - pg_control_recovery - - pg_control_recovery () - record - - - Returns information about recovery state, as shown in - . - - - - -
- - - <function>pg_control_checkpoint</function> Output Columns - - - - Column Name - Data Type - - - - - - - checkpoint_lsn - pg_lsn - - - - redo_lsn - pg_lsn - - - - redo_wal_file - text - - - - timeline_id - integer - - - - prev_timeline_id - integer - - - - full_page_writes - boolean - - - - next_xid - text - - - - next_oid - oid - - - - next_multixact_id - xid - - - - next_multi_offset - xid - - - - oldest_xid - xid - - - - oldest_xid_dbid - oid - - - - oldest_active_xid - xid - - - - oldest_multi_xid - xid - - - - oldest_multi_dbid - oid - - - - oldest_commit_ts_xid - xid - - - - newest_commit_ts_xid - xid - - - - checkpoint_time - timestamp with time zone - - - - -
- - - <function>pg_control_system</function> Output Columns - - - - Column Name - Data Type - - - - - - - pg_control_version - integer - - - - catalog_version_no - integer - - - - system_identifier - bigint - - - - pg_control_last_modified - timestamp with time zone - - - - -
- - - <function>pg_control_init</function> Output Columns - - - - Column Name - Data Type - - - - - - - max_data_alignment - integer - - - - database_block_size - integer - - - - blocks_per_segment - integer - - - - wal_block_size - integer - - - - bytes_per_wal_segment - integer - - - - max_identifier_length - integer - - - - max_index_columns - integer - - - - max_toast_chunk_size - integer - - - - large_object_chunk_size - integer - - - - float8_pass_by_value - boolean - - - - data_page_checksum_version - integer - - - - default_char_signedness - boolean - - - - -
- - - <function>pg_control_recovery</function> Output Columns - - - - Column Name - Data Type - - - - - - - min_recovery_end_lsn - pg_lsn - - - - min_recovery_end_timeline - integer - - - - backup_start_lsn - pg_lsn - - - - backup_end_lsn - pg_lsn - - - - end_of_backup_record_required - boolean - - - - -
- -
- - - Version Information Functions - - - The functions shown in - print version information. - - - - Version Information Functions - - - - - Function - - - Description - - - - - - - - - version - - version () - text - - - Returns a string describing the PostgreSQL - server's version. You can also get this information from - , or for a machine-readable - version use . Software - developers should use server_version_num (available - since 8.2) or instead of - parsing the text version. - - - - - - - unicode_version - - unicode_version () - text - - - Returns a string representing the version of Unicode used by - PostgreSQL. - - - - - - icu_unicode_version - - icu_unicode_version () - text - - - Returns a string representing the version of Unicode used by ICU, if - the server was built with ICU support; otherwise returns - NULL - - - -
- -
- - - WAL Summarization Information Functions - - - The functions shown in - print information about the status of WAL summarization. - See . - - - - WAL Summarization Information Functions - - - - - Function - - - Description - - - - - - - - - pg_available_wal_summaries - - pg_available_wal_summaries () - setof record - ( tli bigint, - start_lsn pg_lsn, - end_lsn pg_lsn ) - - - Returns information about the WAL summary files present in the - data directory, under pg_wal/summaries. - One row will be returned per WAL summary file. Each file summarizes - WAL on the indicated TLI within the indicated LSN range. This function - might be useful to determine whether enough WAL summaries are present - on the server to take an incremental backup based on some prior - backup whose start LSN is known. - - - - - - - pg_wal_summary_contents - - pg_wal_summary_contents ( tli bigint, start_lsn pg_lsn, end_lsn pg_lsn ) - setof record - ( relfilenode oid, - reltablespace oid, - reldatabase oid, - relforknumber smallint, - relblocknumber bigint, - is_limit_block boolean ) - - - Returns one information about the contents of a single WAL summary file - identified by TLI and starting and ending LSNs. Each row with - is_limit_block false indicates that the block - identified by the remaining output columns was modified by at least - one WAL record within the range of records summarized by this file. - Each row with is_limit_block true indicates either - that (a) the relation fork was truncated to the length given by - relblocknumber within the relevant range of WAL - records or (b) that the relation fork was created or dropped within - the relevant range of WAL records; in such cases, - relblocknumber will be zero. - - - - - - - pg_get_wal_summarizer_state - - pg_get_wal_summarizer_state () - record - ( summarized_tli bigint, - summarized_lsn pg_lsn, - pending_lsn pg_lsn, - summarizer_pid int ) - - - Returns information about the progress of the WAL summarizer. If the - WAL summarizer has never run since the instance was started, then - summarized_tli and summarized_lsn - will be 0 and 0/00000000 respectively; - otherwise, they will be the TLI and ending LSN of the last WAL summary - file written to disk. If the WAL summarizer is currently running, - pending_lsn will be the ending LSN of the last - record that it has consumed, which must always be greater than or - equal to summarized_lsn; if the WAL summarizer is - not running, it will be equal to summarized_lsn. - summarizer_pid is the PID of the WAL summarizer - process, if it is running, and otherwise NULL. - - - As a special exception, the WAL summarizer will refuse to generate - WAL summary files if run on WAL generated under - wal_level=minimal, since such summaries would be - unsafe to use as the basis for an incremental backup. In this case, - the fields above will continue to advance as if summaries were being - generated, but nothing will be written to disk. Once the summarizer - reaches WAL generated while wal_level was set - to replica or higher, it will resume writing - summaries to disk. - - - - -
- -
- -
- - - System Administration Functions - - - The functions described in this section are used to control and - monitor a PostgreSQL installation. - - - - Configuration Settings Functions - - - SET - - - - SHOW - - - - configuration - of the server - functions - - - - shows the functions - available to query and alter run-time configuration parameters. - - - - Configuration Settings Functions - - - - - Function - - - Description - - - Example(s) - - - - - - - - - current_setting - - current_setting ( setting_name text , missing_ok boolean ) - text - - - Returns the current value of the - setting setting_name. If there is no such - setting, current_setting throws an error - unless missing_ok is supplied and - is true (in which case NULL is returned). - This function corresponds to - the SQL command . - - - current_setting('datestyle') - ISO, MDY - - - - - - - set_config - - set_config ( - setting_name text, - new_value text, - is_local boolean ) - text - - - Sets the parameter setting_name - to new_value, and returns that value. - If is_local is true, the new - value will only apply during the current transaction. If you want the - new value to apply for the rest of the current session, - use false instead. This function corresponds to - the SQL command . - - - set_config accepts the NULL value for - new_value, but as settings cannot be null, it - is interpreted as a request to reset the setting to its default value. - - - set_config('log_statement_stats', 'off', false) - off - - - - -
- -
- - - Server Signaling Functions - - - signal - backend processes - - - - The functions shown in send control signals to - other server processes. Use of these functions is restricted to - superusers by default but access may be granted to others using - GRANT, with noted exceptions. - - - - Each of these functions returns true if - the signal was successfully sent and false - if sending the signal failed. - - - - Server Signaling Functions - - - - - Function - - - Description - - - - - - - - - pg_cancel_backend - - pg_cancel_backend ( pid integer ) - boolean - - - Cancels the current query of the session whose backend process has the - specified process ID. This is also allowed if the - calling role is a member of the role whose backend is being canceled or - the calling role has privileges of pg_signal_backend, - however only superusers can cancel superuser backends. - As an exception, roles with privileges of - pg_signal_autovacuum_worker are permitted to - cancel autovacuum worker processes, which are otherwise considered - superuser backends. - - - - - - - pg_log_backend_memory_contexts - - pg_log_backend_memory_contexts ( pid integer ) - boolean - - - Requests to log the memory contexts of the backend with the - specified process ID. This function can send the request to - backends and auxiliary processes except logger. These memory contexts - will be logged at - LOG message level. They will appear in - the server log based on the log configuration set - (see for more information), - but will not be sent to the client regardless of - . - - - - - - - pg_reload_conf - - pg_reload_conf () - boolean - - - Causes all processes of the PostgreSQL - server to reload their configuration files. (This is initiated by - sending a SIGHUP signal to the postmaster - process, which in turn sends SIGHUP to each - of its children.) You can use the - pg_file_settings, - pg_hba_file_rules and - pg_ident_file_mappings views - to check the configuration files for possible errors, before reloading. - - - - - - - pg_rotate_logfile - - pg_rotate_logfile () - boolean - - - Signals the log-file manager to switch to a new output file - immediately. This works only when the built-in log collector is - running, since otherwise there is no log-file manager subprocess. - - - - - - - pg_terminate_backend - - pg_terminate_backend ( pid integer, timeout bigint DEFAULT 0 ) - boolean - - - Terminates the session whose backend process has the - specified process ID. This is also allowed if the calling role - is a member of the role whose backend is being terminated or the - calling role has privileges of pg_signal_backend, - however only superusers can terminate superuser backends. - As an exception, roles with privileges of - pg_signal_autovacuum_worker are permitted to - terminate autovacuum worker processes, which are otherwise considered - superuser backends. - - - If timeout is not specified or zero, this - function returns true whether the process actually - terminates or not, indicating only that the sending of the signal was - successful. If the timeout is specified (in - milliseconds) and greater than zero, the function waits until the - process is actually terminated or until the given time has passed. If - the process is terminated, the function - returns true. On timeout, a warning is emitted and - false is returned. - - - - -
- - - pg_cancel_backend and pg_terminate_backend - send signals (SIGINT or SIGTERM - respectively) to backend processes identified by process ID. - The process ID of an active backend can be found from - the pid column of the - pg_stat_activity view, or by listing the - postgres processes on the server (using - ps on Unix or the Task - Manager on Windows). - The role of an active backend can be found from the - usename column of the - pg_stat_activity view. - - - - pg_log_backend_memory_contexts can be used - to log the memory contexts of a backend process. For example: - -postgres=# SELECT pg_log_backend_memory_contexts(pg_backend_pid()); - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -One message for each memory context will be logged. For example: - -LOG: logging memory contexts of PID 10377 -STATEMENT: SELECT pg_log_backend_memory_contexts(pg_backend_pid()); -LOG: level: 1; TopMemoryContext: 80800 total in 6 blocks; 14432 free (5 chunks); 66368 used -LOG: level: 2; pgstat TabStatusArray lookup hash table: 8192 total in 1 blocks; 1408 free (0 chunks); 6784 used -LOG: level: 2; TopTransactionContext: 8192 total in 1 blocks; 7720 free (1 chunks); 472 used -LOG: level: 2; RowDescriptionContext: 8192 total in 1 blocks; 6880 free (0 chunks); 1312 used -LOG: level: 2; MessageContext: 16384 total in 2 blocks; 5152 free (0 chunks); 11232 used -LOG: level: 2; Operator class cache: 8192 total in 1 blocks; 512 free (0 chunks); 7680 used -LOG: level: 2; smgr relation table: 16384 total in 2 blocks; 4544 free (3 chunks); 11840 used -LOG: level: 2; TransactionAbortContext: 32768 total in 1 blocks; 32504 free (0 chunks); 264 used -... -LOG: level: 2; ErrorContext: 8192 total in 1 blocks; 7928 free (3 chunks); 264 used -LOG: Grand total: 1651920 bytes in 201 blocks; 622360 free (88 chunks); 1029560 used - - If there are more than 100 child contexts under the same parent, the first - 100 child contexts are logged, along with a summary of the remaining contexts. - Note that frequent calls to this function could incur significant overhead, - because it may generate a large number of log messages. - - -
- - - Backup Control Functions - - - backup - - - - The functions shown in assist in making on-line backups. - These functions cannot be executed during recovery (except - pg_backup_start, - pg_backup_stop, - and pg_wal_lsn_diff). - - - - For details about proper usage of these functions, see - . - - - - Backup Control Functions - - - - - Function - - - Description - - - - - - - - - pg_create_restore_point - - pg_create_restore_point ( name text ) - pg_lsn - - - Creates a named marker record in the write-ahead log that can later be - used as a recovery target, and returns the corresponding write-ahead - log location. The given name can then be used with - to specify the point up to - which recovery will proceed. Avoid creating multiple restore points - with the same name, since recovery will stop at the first one whose - name matches the recovery target. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - - - - pg_current_wal_flush_lsn - - pg_current_wal_flush_lsn () - pg_lsn - - - Returns the current write-ahead log flush location (see notes below). - - - - - - - pg_current_wal_insert_lsn - - pg_current_wal_insert_lsn () - pg_lsn - - - Returns the current write-ahead log insert location (see notes below). - - - - - - - pg_current_wal_lsn - - pg_current_wal_lsn () - pg_lsn - - - Returns the current write-ahead log write location (see notes below). - - - - - - - pg_backup_start - - pg_backup_start ( - label text - , fast boolean - ) - pg_lsn - - - Prepares the server to begin an on-line backup. The only required - parameter is an arbitrary user-defined label for the backup. - (Typically this would be the name under which the backup dump file - will be stored.) - If the optional second parameter is given as true, - it specifies executing pg_backup_start as quickly - as possible. This forces a fast checkpoint which will cause a - spike in I/O operations, slowing any concurrently executing queries. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - - - - pg_backup_stop - - pg_backup_stop ( - wait_for_archive boolean - ) - record - ( lsn pg_lsn, - labelfile text, - spcmapfile text ) - - - Finishes performing an on-line backup. The desired contents of the - backup label file and the tablespace map file are returned as part of - the result of the function and must be written to files in the - backup area. These files must not be written to the live data directory - (doing so will cause PostgreSQL to fail to restart in the event of a - crash). - - - There is an optional parameter of type boolean. - If false, the function will return immediately after the backup is - completed, without waiting for WAL to be archived. This behavior is - only useful with backup software that independently monitors WAL - archiving. Otherwise, WAL required to make the backup consistent might - be missing and make the backup useless. By default or when this - parameter is true, pg_backup_stop will wait for - WAL to be archived when archiving is enabled. (On a standby, this - means that it will wait only when archive_mode = - always. If write activity on the primary is low, - it may be useful to run pg_switch_wal on the - primary in order to trigger an immediate segment switch.) - - - When executed on a primary, this function also creates a backup - history file in the write-ahead log archive area. The history file - includes the label given to pg_backup_start, the - starting and ending write-ahead log locations for the backup, and the - starting and ending times of the backup. After recording the ending - location, the current write-ahead log insertion point is automatically - advanced to the next write-ahead log file, so that the ending - write-ahead log file can be archived immediately to complete the - backup. - - - The result of the function is a single record. - The lsn column holds the backup's ending - write-ahead log location (which again can be ignored). The second - column returns the contents of the backup label file, and the third - column returns the contents of the tablespace map file. These must be - stored as part of the backup and are required as part of the restore - process. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - - - - pg_switch_wal - - pg_switch_wal () - pg_lsn - - - Forces the server to switch to a new write-ahead log file, which - allows the current file to be archived (assuming you are using - continuous archiving). The result is the ending write-ahead log - location plus 1 within the just-completed write-ahead log file. If - there has been no write-ahead log activity since the last write-ahead - log switch, pg_switch_wal does nothing and - returns the start location of the write-ahead log file currently in - use. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - - - - pg_walfile_name - - pg_walfile_name ( lsn pg_lsn ) - text - - - Converts a write-ahead log location to the name of the WAL file - holding that location. - - - - - - - pg_walfile_name_offset - - pg_walfile_name_offset ( lsn pg_lsn ) - record - ( file_name text, - file_offset integer ) - - - Converts a write-ahead log location to a WAL file name and byte offset - within that file. - - - - - - - pg_split_walfile_name - - pg_split_walfile_name ( file_name text ) - record - ( segment_number numeric, - timeline_id bigint ) - - - Extracts the sequence number and timeline ID from a WAL file - name. - - - - - - - pg_wal_lsn_diff - - pg_wal_lsn_diff ( lsn1 pg_lsn, lsn2 pg_lsn ) - numeric - - - Calculates the difference in bytes (lsn1 - lsn2) between two write-ahead log - locations. This can be used - with pg_stat_replication or some of the - functions shown in to - get the replication lag. - - - - -
- - - pg_current_wal_lsn displays the current write-ahead - log write location in the same format used by the above functions. - Similarly, pg_current_wal_insert_lsn displays the - current write-ahead log insertion location - and pg_current_wal_flush_lsn displays the current - write-ahead log flush location. The insertion location is - the logical end of the write-ahead log at any instant, - while the write location is the end of what has actually been written out - from the server's internal buffers, and the flush location is the last - location known to be written to durable storage. The write location is the - end of what can be examined from outside the server, and is usually what - you want if you are interested in archiving partially-complete write-ahead - log files. The insertion and flush locations are made available primarily - for server debugging purposes. These are all read-only operations and do - not require superuser permissions. - - - - You can use pg_walfile_name_offset to extract the - corresponding write-ahead log file name and byte offset from - a pg_lsn value. For example: - -postgres=# SELECT * FROM pg_walfile_name_offset((pg_backup_stop()).lsn); - file_name | file_offset ---------------------------+------------- - 00000001000000000000000D | 4039624 -(1 row) - - Similarly, pg_walfile_name extracts just the write-ahead log file name. - - - - pg_split_walfile_name is useful to compute a - LSN from a file offset and WAL file name, for example: - -postgres=# \set file_name '000000010000000100C000AB' -postgres=# \set offset 256 -postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset AS lsn - FROM pg_split_walfile_name(:'file_name') pd, - pg_show_all_settings() ps - WHERE ps.name = 'wal_segment_size'; - lsn ---------------- - C001/AB000100 -(1 row) - - - -
- - - Recovery Control Functions - - - The functions shown in provide information - about the current status of a standby server. - These functions may be executed both during recovery and in normal running. - - - - Recovery Information Functions - - - - - Function - - - Description - - - - - - - - - pg_is_in_recovery - - pg_is_in_recovery () - boolean - - - Returns true if recovery is still in progress. - - - - - - - pg_last_wal_receive_lsn - - pg_last_wal_receive_lsn () - pg_lsn - - - Returns the last write-ahead log location that has been received and - synced to disk by streaming replication. While streaming replication - is in progress this will increase monotonically. If recovery has - completed then this will remain static at the location of the last WAL - record received and synced to disk during recovery. If streaming - replication is disabled, or if it has not yet started, the function - returns NULL. - - - - - - - pg_last_wal_replay_lsn - - pg_last_wal_replay_lsn () - pg_lsn - - - Returns the last write-ahead log location that has been replayed - during recovery. If recovery is still in progress this will increase - monotonically. If recovery has completed then this will remain - static at the location of the last WAL record applied during recovery. - When the server has been started normally without recovery, the - function returns NULL. - - - - - - - pg_last_xact_replay_timestamp - - pg_last_xact_replay_timestamp () - timestamp with time zone - - - Returns the time stamp of the last transaction replayed during - recovery. This is the time at which the commit or abort WAL record - for that transaction was generated on the primary. If no transactions - have been replayed during recovery, the function - returns NULL. Otherwise, if recovery is still in - progress this will increase monotonically. If recovery has completed - then this will remain static at the time of the last transaction - applied during recovery. When the server has been started normally - without recovery, the function returns NULL. - - - - - - - pg_get_wal_resource_managers - - pg_get_wal_resource_managers () - setof record - ( rm_id integer, - rm_name text, - rm_builtin boolean ) - - - Returns the currently-loaded WAL resource managers in the system. The - column rm_builtin indicates whether it's a - built-in resource manager, or a custom resource manager loaded by an - extension. - - - - -
- - - The functions shown in control the progress of recovery. - These functions may be executed only during recovery. - - - - Recovery Control Functions - - - - - Function - - - Description - - - - - - - - - pg_is_wal_replay_paused - - pg_is_wal_replay_paused () - boolean - - - Returns true if recovery pause is requested. - - - - - - - pg_get_wal_replay_pause_state - - pg_get_wal_replay_pause_state () - text - - - Returns recovery pause state. The return values are - not paused if pause is not requested, - pause requested if pause is requested but recovery is - not yet paused, and paused if the recovery is - actually paused. - - - - - - - pg_promote - - pg_promote ( wait boolean DEFAULT true, wait_seconds integer DEFAULT 60 ) - boolean - - - Promotes a standby server to primary status. - With wait set to true (the - default), the function waits until promotion is completed - or wait_seconds seconds have passed, and - returns true if promotion is successful - and false otherwise. - If wait is set to false, the - function returns true immediately after sending a - SIGUSR1 signal to the postmaster to trigger - promotion. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - - - - pg_wal_replay_pause - - pg_wal_replay_pause () - void - - - Request to pause recovery. A request doesn't mean that recovery stops - right away. If you want a guarantee that recovery is actually paused, - you need to check for the recovery pause state returned by - pg_get_wal_replay_pause_state(). Note that - pg_is_wal_replay_paused() returns whether a request - is made. While recovery is paused, no further database changes are applied. - If hot standby is active, all new queries will see the same consistent - snapshot of the database, and no further query conflicts will be generated - until recovery is resumed. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - - - - pg_wal_replay_resume - - pg_wal_replay_resume () - void - - - Restarts recovery if it was paused. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - -
- - - pg_wal_replay_pause and - pg_wal_replay_resume cannot be executed while - a promotion is ongoing. If a promotion is triggered while recovery - is paused, the paused state ends and promotion continues. - - - - If streaming replication is disabled, the paused state may continue - indefinitely without a problem. If streaming replication is in - progress then WAL records will continue to be received, which will - eventually fill available disk space, depending upon the duration of - the pause, the rate of WAL generation and available disk space. - - -
- - - Snapshot Synchronization Functions - - - PostgreSQL allows database sessions to synchronize their - snapshots. A snapshot determines which data is visible to the - transaction that is using the snapshot. Synchronized snapshots are - necessary when two or more sessions need to see identical content in the - database. If two sessions just start their transactions independently, - there is always a possibility that some third transaction commits - between the executions of the two START TRANSACTION commands, - so that one session sees the effects of that transaction and the other - does not. - - - - To solve this problem, PostgreSQL allows a transaction to - export the snapshot it is using. As long as the exporting - transaction remains open, other transactions can import its - snapshot, and thereby be guaranteed that they see exactly the same view - of the database that the first transaction sees. But note that any - database changes made by any one of these transactions remain invisible - to the other transactions, as is usual for changes made by uncommitted - transactions. So the transactions are synchronized with respect to - pre-existing data, but act normally for changes they make themselves. - - - - Snapshots are exported with the pg_export_snapshot function, - shown in , and - imported with the command. - - - - Snapshot Synchronization Functions - - - - - Function - - - Description - - - - - - - - - pg_export_snapshot - - pg_export_snapshot () - text - - - Saves the transaction's current snapshot and returns - a text string identifying the snapshot. This string must - be passed (outside the database) to clients that want to import the - snapshot. The snapshot is available for import only until the end of - the transaction that exported it. - - - A transaction can export more than one snapshot, if needed. Note that - doing so is only useful in READ COMMITTED - transactions, since in REPEATABLE READ and higher - isolation levels, transactions use the same snapshot throughout their - lifetime. Once a transaction has exported any snapshots, it cannot be - prepared with . - - - - - - pg_log_standby_snapshot - - pg_log_standby_snapshot () - pg_lsn - - - Take a snapshot of running transactions and write it to WAL, without - having to wait for bgwriter or checkpointer to log one. This is useful - for logical decoding on standby, as logical slot creation has to wait - until such a record is replayed on the standby. - - - - -
- -
- - - Replication Management Functions - - - The functions shown - in are for - controlling and interacting with replication features. - See , - , and - - for information about the underlying features. - Use of functions for replication origin is only allowed to the - superuser by default, but may be allowed to other users by using the - GRANT command. - Use of functions for replication slots is restricted to superusers - and users having REPLICATION privilege. - - - - Many of these functions have equivalent commands in the replication - protocol; see . - - - - The functions described in - , - , and - - are also relevant for replication. - - - - Replication Management Functions - - - - - Function - - - Description - - - - - - - - - pg_create_physical_replication_slot - - pg_create_physical_replication_slot ( slot_name name , immediately_reserve boolean, temporary boolean ) - record - ( slot_name name, - lsn pg_lsn ) - - - Creates a new physical replication slot named - slot_name. The name cannot be - pg_conflict_detection as it is reserved for the - conflict detection slot. The optional second parameter, - when true, specifies that the LSN for this - replication slot be reserved immediately; otherwise - the LSN is reserved on first connection from a streaming - replication client. Streaming changes from a physical slot is only - possible with the streaming-replication protocol — - see . The optional third - parameter, temporary, when set to true, specifies that - the slot should not be permanently stored to disk and is only meant - for use by the current session. Temporary slots are also - released upon any error. This function corresponds - to the replication protocol command CREATE_REPLICATION_SLOT - ... PHYSICAL. - - - - - - - pg_drop_replication_slot - - pg_drop_replication_slot ( slot_name name ) - void - - - Drops the physical or logical replication slot - named slot_name. Same as replication protocol - command DROP_REPLICATION_SLOT. - - - - - - - pg_create_logical_replication_slot - - pg_create_logical_replication_slot ( slot_name name, plugin name , temporary boolean, twophase boolean, failover boolean ) - record - ( slot_name name, - lsn pg_lsn ) - - - Creates a new logical (decoding) replication slot named - slot_name using the output plugin - plugin. The name cannot be - pg_conflict_detection as it is reserved for - the conflict detection slot. The optional third - parameter, temporary, when set to true, specifies that - the slot should not be permanently stored to disk and is only meant - for use by the current session. Temporary slots are also - released upon any error. The optional fourth parameter, - twophase, when set to true, specifies - that the decoding of prepared transactions is enabled for this - slot. The optional fifth parameter, - failover, when set to true, - specifies that this slot is enabled to be synced to the - standbys so that logical replication can be resumed after - failover. A call to this function has the same effect as - the replication protocol command - CREATE_REPLICATION_SLOT ... LOGICAL. - - - - - - - pg_copy_physical_replication_slot - - pg_copy_physical_replication_slot ( src_slot_name name, dst_slot_name name , temporary boolean ) - record - ( slot_name name, - lsn pg_lsn ) - - - Copies an existing physical replication slot named src_slot_name - to a physical replication slot named dst_slot_name. - The new slot name cannot be pg_conflict_detection, - as it is reserved for the conflict detection. - The copied physical slot starts to reserve WAL from the same LSN as the - source slot. - temporary is optional. If temporary - is omitted, the same value as the source slot is used. Copy of an - invalidated slot is not allowed. - - - - - - - pg_copy_logical_replication_slot - - pg_copy_logical_replication_slot ( src_slot_name name, dst_slot_name name , temporary boolean , plugin name ) - record - ( slot_name name, - lsn pg_lsn ) - - - Copies an existing logical replication slot - named src_slot_name to a logical replication - slot named dst_slot_name, optionally changing - the output plugin and persistence. The new slot name cannot be - pg_conflict_detection as it is reserved for - the conflict detection. The copied logical slot starts from the same - LSN as the source logical slot. Both - temporary and plugin are - optional; if they are omitted, the values of the source slot are used. - The failover option of the source logical slot - is not copied and is set to false by default. This - is to avoid the risk of being unable to continue logical replication - after failover to standby where the slot is being synchronized. Copy of - an invalidated slot is not allowed. - - - - - - - pg_logical_slot_get_changes - - pg_logical_slot_get_changes ( slot_name name, upto_lsn pg_lsn, upto_nchanges integer, VARIADIC options text[] ) - setof record - ( lsn pg_lsn, - xid xid, - data text ) - - - Returns changes in the slot slot_name, starting - from the point from which changes have been consumed last. If - upto_lsn - and upto_nchanges are NULL, - logical decoding will continue until end of WAL. If - upto_lsn is non-NULL, decoding will include only - those transactions which commit prior to the specified LSN. If - upto_nchanges is non-NULL, decoding will - stop when the number of rows produced by decoding exceeds - the specified value. Note, however, that the actual number of - rows returned may be larger, since this limit is only checked after - adding the rows produced when decoding each new transaction commit. - If the specified slot is a logical failover slot then the function will - not return until all physical slots specified in - synchronized_standby_slots - have confirmed WAL receipt. - - - - - - - pg_logical_slot_peek_changes - - pg_logical_slot_peek_changes ( slot_name name, upto_lsn pg_lsn, upto_nchanges integer, VARIADIC options text[] ) - setof record - ( lsn pg_lsn, - xid xid, - data text ) - - - Behaves just like - the pg_logical_slot_get_changes() function, - except that changes are not consumed; that is, they will be returned - again on future calls. - - - - - - - pg_logical_slot_get_binary_changes - - pg_logical_slot_get_binary_changes ( slot_name name, upto_lsn pg_lsn, upto_nchanges integer, VARIADIC options text[] ) - setof record - ( lsn pg_lsn, - xid xid, - data bytea ) - - - Behaves just like - the pg_logical_slot_get_changes() function, - except that changes are returned as bytea. - - - - - - - pg_logical_slot_peek_binary_changes - - pg_logical_slot_peek_binary_changes ( slot_name name, upto_lsn pg_lsn, upto_nchanges integer, VARIADIC options text[] ) - setof record - ( lsn pg_lsn, - xid xid, - data bytea ) - - - Behaves just like - the pg_logical_slot_peek_changes() function, - except that changes are returned as bytea. - - - - - - - pg_replication_slot_advance - - pg_replication_slot_advance ( slot_name name, upto_lsn pg_lsn ) - record - ( slot_name name, - end_lsn pg_lsn ) - - - Advances the current confirmed position of a replication slot named - slot_name. The slot will not be moved backwards, - and it will not be moved beyond the current insert location. Returns - the name of the slot and the actual position that it was advanced to. - The updated slot position information is written out at the next - checkpoint if any advancing is done. So in the event of a crash, the - slot may return to an earlier position. If the specified slot is a - logical failover slot then the function will not return until all - physical slots specified in - synchronized_standby_slots - have confirmed WAL receipt. - - - - - - - pg_replication_origin_create - - pg_replication_origin_create ( node_name text ) - oid - - - Creates a replication origin with the given external - name, and returns the internal ID assigned to it. - The name must be no longer than 512 bytes. - - - - - - - pg_replication_origin_drop - - pg_replication_origin_drop ( node_name text ) - void - - - Deletes a previously-created replication origin, including any - associated replay progress. - - - - - - - pg_replication_origin_oid - - pg_replication_origin_oid ( node_name text ) - oid - - - Looks up a replication origin by name and returns the internal ID. If - no such replication origin is found, NULL is - returned. - - - - - - - pg_replication_origin_session_setup - - pg_replication_origin_session_setup ( node_name text ) - void - - - Marks the current session as replaying from the given - origin, allowing replay progress to be tracked. - Can only be used if no origin is currently selected. - Use pg_replication_origin_session_reset to undo. - - - - - - - pg_replication_origin_session_reset - - pg_replication_origin_session_reset () - void - - - Cancels the effects - of pg_replication_origin_session_setup(). - - - - - - - pg_replication_origin_session_is_setup - - pg_replication_origin_session_is_setup () - boolean - - - Returns true if a replication origin has been selected in the - current session. - - - - - - - pg_replication_origin_session_progress - - pg_replication_origin_session_progress ( flush boolean ) - pg_lsn - - - Returns the replay location for the replication origin selected in - the current session. The parameter flush - determines whether the corresponding local transaction will be - guaranteed to have been flushed to disk or not. - - - - - - - pg_replication_origin_xact_setup - - pg_replication_origin_xact_setup ( origin_lsn pg_lsn, origin_timestamp timestamp with time zone ) - void - - - Marks the current transaction as replaying a transaction that has - committed at the given LSN and timestamp. Can - only be called when a replication origin has been selected - using pg_replication_origin_session_setup. - - - - - - - pg_replication_origin_xact_reset - - pg_replication_origin_xact_reset () - void - - - Cancels the effects of - pg_replication_origin_xact_setup(). - - - - - - - pg_replication_origin_advance - - pg_replication_origin_advance ( node_name text, lsn pg_lsn ) - void - - - Sets replication progress for the given node to the given - location. This is primarily useful for setting up the initial - location, or setting a new location after configuration changes and - similar. Be aware that careless use of this function can lead to - inconsistently replicated data. - - - - - - - pg_replication_origin_progress - - pg_replication_origin_progress ( node_name text, flush boolean ) - pg_lsn - - - Returns the replay location for the given replication origin. The - parameter flush determines whether the - corresponding local transaction will be guaranteed to have been - flushed to disk or not. - - - - - - - pg_logical_emit_message - - pg_logical_emit_message ( transactional boolean, prefix text, content text , flush boolean DEFAULT false ) - pg_lsn - - - pg_logical_emit_message ( transactional boolean, prefix text, content bytea , flush boolean DEFAULT false ) - pg_lsn - - - Emits a logical decoding message. This can be used to pass generic - messages to logical decoding plugins through - WAL. The transactional parameter specifies if - the message should be part of the current transaction, or if it should - be written immediately and decoded as soon as the logical decoder - reads the record. The prefix parameter is a - textual prefix that can be used by logical decoding plugins to easily - recognize messages that are interesting for them. - The content parameter is the content of the - message, given either in text or binary form. - The flush parameter (default set to - false) controls if the message is immediately - flushed to WAL or not. flush has no effect - with transactional, as the message's WAL - record is flushed along with its transaction. - - - - - - - pg_sync_replication_slots - - pg_sync_replication_slots () - void - - - Synchronize the logical failover replication slots from the primary - server to the standby server. This function can only be executed on the - standby server. Temporary synced slots, if any, cannot be used for - logical decoding and must be dropped after promotion. See - for details. - Note that this function is primarily intended for testing and - debugging purposes and should be used with caution. Additionally, - this function cannot be executed if - - sync_replication_slots is enabled and the slotsync - worker is already running to perform the synchronization of slots. - - - - - If, after executing the function, - - hot_standby_feedback is disabled on - the standby or the physical slot configured in - - primary_slot_name is - removed, then it is possible that the necessary rows of the - synchronized slot will be removed by the VACUUM process on the primary - server, resulting in the synchronized slot becoming invalidated. - - - - - - - -
- -
- - - Database Object Management Functions - - - The functions shown in calculate - the disk space usage of database objects, or assist in presentation - or understanding of usage results. bigint results - are measured in bytes. If an OID that does - not represent an existing object is passed to one of these - functions, NULL is returned. - - - - Database Object Size Functions - - - - - Function - - - Description - - - - - - - - - pg_column_size - - pg_column_size ( "any" ) - integer - - - Shows the number of bytes used to store any individual data value. If - applied directly to a table column value, this reflects any - compression that was done. - - - - - - - pg_column_compression - - pg_column_compression ( "any" ) - text - - - Shows the compression algorithm that was used to compress - an individual variable-length value. Returns NULL - if the value is not compressed. - - - - - - - pg_column_toast_chunk_id - - pg_column_toast_chunk_id ( "any" ) - oid - - - Shows the chunk_id of an on-disk - TOASTed value. Returns NULL - if the value is un-TOASTed or not on-disk. See - for more information about - TOAST. - - - - - - - pg_database_size - - pg_database_size ( name ) - bigint - - - pg_database_size ( oid ) - bigint - - - Computes the total disk space used by the database with the specified - name or OID. To use this function, you must - have CONNECT privilege on the specified database - (which is granted by default) or have privileges of - the pg_read_all_stats role. - - - - - - - pg_indexes_size - - pg_indexes_size ( regclass ) - bigint - - - Computes the total disk space used by indexes attached to the - specified table. - - - - - - - pg_relation_size - - pg_relation_size ( relation regclass , fork text ) - bigint - - - Computes the disk space used by one fork of the - specified relation. (Note that for most purposes it is more - convenient to use the higher-level - functions pg_total_relation_size - or pg_table_size, which sum the sizes of all - forks.) With one argument, this returns the size of the main data - fork of the relation. The second argument can be provided to specify - which fork to examine: - - - - main returns the size of the main - data fork of the relation. - - - - - fsm returns the size of the Free Space Map - (see ) associated with the relation. - - - - - vm returns the size of the Visibility Map - (see ) associated with the relation. - - - - - init returns the size of the initialization - fork, if any, associated with the relation. - - - - - - - - - - pg_size_bytes - - pg_size_bytes ( text ) - bigint - - - Converts a size in human-readable format (as returned - by pg_size_pretty) into bytes. Valid units are - bytes, B, kB, - MB, GB, TB, - and PB. - - - - - - - pg_size_pretty - - pg_size_pretty ( bigint ) - text - - - pg_size_pretty ( numeric ) - text - - - Converts a size in bytes into a more easily human-readable format with - size units (bytes, kB, MB, GB, TB, or PB as appropriate). Note that the - units are powers of 2 rather than powers of 10, so 1kB is 1024 bytes, - 1MB is 10242 = 1048576 bytes, and so on. - - - - - - - pg_table_size - - pg_table_size ( regclass ) - bigint - - - Computes the disk space used by the specified table, excluding indexes - (but including its TOAST table if any, free space map, and visibility - map). - - - - - - - pg_tablespace_size - - pg_tablespace_size ( name ) - bigint - - - pg_tablespace_size ( oid ) - bigint - - - Computes the total disk space used in the tablespace with the - specified name or OID. To use this function, you must - have CREATE privilege on the specified tablespace - or have privileges of the pg_read_all_stats role, - unless it is the default tablespace for the current database. - - - - - - - pg_total_relation_size - - pg_total_relation_size ( regclass ) - bigint - - - Computes the total disk space used by the specified table, including - all indexes and TOAST data. The result is - equivalent to pg_table_size - + pg_indexes_size. - - - - -
- - - The functions above that operate on tables or indexes accept a - regclass argument, which is simply the OID of the table or index - in the pg_class system catalog. You do not have to look up - the OID by hand, however, since the regclass data type's input - converter will do the work for you. See - for details. - - - - The functions shown in assist - in identifying the specific disk files associated with database objects. - - - - Database Object Location Functions - - - - - Function - - - Description - - - - - - - - - pg_relation_filenode - - pg_relation_filenode ( relation regclass ) - oid - - - Returns the filenode number currently assigned to the - specified relation. The filenode is the base component of the file - name(s) used for the relation (see - for more information). - For most relations the result is the same as - pg_class.relfilenode, - but for certain system catalogs relfilenode - is zero and this function must be used to get the correct value. The - function returns NULL if passed a relation that does not have storage, - such as a view. - - - - - - - pg_relation_filepath - - pg_relation_filepath ( relation regclass ) - text - - - Returns the entire file path name (relative to the database cluster's - data directory, PGDATA) of the relation. - - - - - - - pg_filenode_relation - - pg_filenode_relation ( tablespace oid, filenode oid ) - regclass - - - Returns a relation's OID given the tablespace OID and filenode it is - stored under. This is essentially the inverse mapping of - pg_relation_filepath. For a relation in the - database's default tablespace, the tablespace can be specified as zero. - Returns NULL if no relation in the current database - is associated with the given values. - - - - -
- - - lists functions used to manage - collations. - - - - Collation Management Functions - - - - - Function - - - Description - - - - - - - - - pg_collation_actual_version - - pg_collation_actual_version ( oid ) - text - - - Returns the actual version of the collation object as it is currently - installed in the operating system. If this is different from the - value in - pg_collation.collversion, - then objects depending on the collation might need to be rebuilt. See - also . - - - - - - - pg_database_collation_actual_version - - pg_database_collation_actual_version ( oid ) - text - - - Returns the actual version of the database's collation as it is currently - installed in the operating system. If this is different from the - value in - pg_database.datcollversion, - then objects depending on the collation might need to be rebuilt. See - also . - - - - - - - pg_import_system_collations - - pg_import_system_collations ( schema regnamespace ) - integer - - - Adds collations to the system - catalog pg_collation based on all the locales - it finds in the operating system. This is - what initdb uses; see - for more details. If additional - locales are installed into the operating system later on, this - function can be run again to add collations for the new locales. - Locales that match existing entries - in pg_collation will be skipped. (But - collation objects based on locales that are no longer present in the - operating system are not removed by this function.) - The schema parameter would typically - be pg_catalog, but that is not a requirement; the - collations could be installed into some other schema as well. The - function returns the number of new collation objects it created. - Use of this function is restricted to superusers. - - - - -
- - - lists functions used to - manipulate statistics. - These functions cannot be executed during recovery. - - - Changes made by these statistics manipulation functions are likely to be - overwritten by autovacuum (or manual - VACUUM or ANALYZE) and should be - considered temporary. - - - - - - Database Object Statistics Manipulation Functions - - - - - Function - - - Description - - - - - - - - - pg_restore_relation_stats - - pg_restore_relation_stats ( - VARIADIC kwargs "any" ) - boolean - - - Updates table-level statistics. Ordinarily, these statistics are - collected automatically or updated as a part of or , so it's not - necessary to call this function. However, it is useful after a - restore to enable the optimizer to choose better plans if - ANALYZE has not been run yet. - - - The tracked statistics may change from version to version, so - arguments are passed as pairs of argname - and argvalue in the form: - -SELECT pg_restore_relation_stats( - 'arg1name', 'arg1value'::arg1type, - 'arg2name', 'arg2value'::arg2type, - 'arg3name', 'arg3value'::arg3type); - - - - For example, to set the relpages and - reltuples values for the table - mytable: - -SELECT pg_restore_relation_stats( - 'schemaname', 'myschema', - 'relname', 'mytable', - 'relpages', 173::integer, - 'reltuples', 10000::real); - - - - The arguments schemaname and - relname are required, and specify the table. Other - arguments are the names and values of statistics corresponding to - certain columns in pg_class. - The currently-supported relation statistics are - relpages with a value of type - integer, reltuples with a value of - type real, relallvisible with a value - of type integer, and relallfrozen - with a value of type integer. - - - Additionally, this function accepts argument name - version of type integer, which - specifies the server version from which the statistics originated. - This is anticipated to be helpful in porting statistics from older - versions of PostgreSQL. - - - Minor errors are reported as a WARNING and - ignored, and remaining statistics will still be restored. If all - specified statistics are successfully restored, returns - true, otherwise false. - - - The caller must have the MAINTAIN privilege on the - table or be the owner of the database. - - - - - - - - - pg_clear_relation_stats - - pg_clear_relation_stats ( schemaname text, relname text ) - void - - - Clears table-level statistics for the given relation, as though the - table was newly created. - - - The caller must have the MAINTAIN privilege on the - table or be the owner of the database. - - - - - - - - pg_restore_attribute_stats - - pg_restore_attribute_stats ( - VARIADIC kwargs "any" ) - boolean - - - Creates or updates column-level statistics. Ordinarily, these - statistics are collected automatically or updated as a part of or , so it's not - necessary to call this function. However, it is useful after a - restore to enable the optimizer to choose better plans if - ANALYZE has not been run yet. - - - The tracked statistics may change from version to version, so - arguments are passed as pairs of argname - and argvalue in the form: - -SELECT pg_restore_attribute_stats( - 'arg1name', 'arg1value'::arg1type, - 'arg2name', 'arg2value'::arg2type, - 'arg3name', 'arg3value'::arg3type); - - - - For example, to set the avg_width and - null_frac values for the attribute - col1 of the table - mytable: - -SELECT pg_restore_attribute_stats( - 'schemaname', 'myschema', - 'relname', 'mytable', - 'attname', 'col1', - 'inherited', false, - 'avg_width', 125::integer, - 'null_frac', 0.5::real); - - - - The required arguments are schemaname and - relname with a value of type text - which specify the table; either attname with a - value of type text or attnum with a - value of type smallint, which specifies the column; and - inherited, which specifies whether the statistics - include values from child tables. Other arguments are the names and - values of statistics corresponding to columns in pg_stats. - - - Additionally, this function accepts argument name - version of type integer, which - specifies the server version from which the statistics originated. - This is anticipated to be helpful in porting statistics from older - versions of PostgreSQL. - - - Minor errors are reported as a WARNING and - ignored, and remaining statistics will still be restored. If all - specified statistics are successfully restored, returns - true, otherwise false. - - - The caller must have the MAINTAIN privilege on the - table or be the owner of the database. - - - - - - - - - pg_clear_attribute_stats - - pg_clear_attribute_stats ( - schemaname text, - relname text, - attname text, - inherited boolean ) - void - - - Clears column-level statistics for the given relation and - attribute, as though the table was newly created. - - - The caller must have the MAINTAIN privilege on - the table or be the owner of the database. - - - - - -
- - - lists functions that provide - information about the structure of partitioned tables. - - - - Partitioning Information Functions - - - - - Function - - - Description - - - - - - - - - pg_partition_tree - - pg_partition_tree ( regclass ) - setof record - ( relid regclass, - parentrelid regclass, - isleaf boolean, - level integer ) - - - Lists the tables or indexes in the partition tree of the - given partitioned table or partitioned index, with one row for each - partition. Information provided includes the OID of the partition, - the OID of its immediate parent, a boolean value telling if the - partition is a leaf, and an integer telling its level in the hierarchy. - The level value is 0 for the input table or index, 1 for its - immediate child partitions, 2 for their partitions, and so on. - Returns no rows if the relation does not exist or is not a partition - or partitioned table. - - - - - - - pg_partition_ancestors - - pg_partition_ancestors ( regclass ) - setof regclass - - - Lists the ancestor relations of the given partition, - including the relation itself. Returns no rows if the relation - does not exist or is not a partition or partitioned table. - - - - - - - pg_partition_root - - pg_partition_root ( regclass ) - regclass - - - Returns the top-most parent of the partition tree to which the given - relation belongs. Returns NULL if the relation - does not exist or is not a partition or partitioned table. - - - - -
- - - For example, to check the total size of the data contained in a - partitioned table measurement, one could use the - following query: - -SELECT pg_size_pretty(sum(pg_relation_size(relid))) AS total_size - FROM pg_partition_tree('measurement'); - - - -
- - - Index Maintenance Functions - - - shows the functions - available for index maintenance tasks. (Note that these maintenance - tasks are normally done automatically by autovacuum; use of these - functions is only required in special cases.) - These functions cannot be executed during recovery. - Use of these functions is restricted to superusers and the owner - of the given index. - - - - Index Maintenance Functions - - - - - Function - - - Description - - - - - - - - - brin_summarize_new_values - - brin_summarize_new_values ( index regclass ) - integer - - - Scans the specified BRIN index to find page ranges in the base table - that are not currently summarized by the index; for any such range it - creates a new summary index tuple by scanning those table pages. - Returns the number of new page range summaries that were inserted - into the index. - - - - - - - brin_summarize_range - - brin_summarize_range ( index regclass, blockNumber bigint ) - integer - - - Summarizes the page range covering the given block, if not already - summarized. This is - like brin_summarize_new_values except that it - only processes the page range that covers the given table block number. - - - - - - - brin_desummarize_range - - brin_desummarize_range ( index regclass, blockNumber bigint ) - void - - - Removes the BRIN index tuple that summarizes the page range covering - the given table block, if there is one. - - - - - - - gin_clean_pending_list - - gin_clean_pending_list ( index regclass ) - bigint - - - Cleans up the pending list of the specified GIN index - by moving entries in it, in bulk, to the main GIN data structure. - Returns the number of pages removed from the pending list. - If the argument is a GIN index built with - the fastupdate option disabled, no cleanup happens - and the result is zero, because the index doesn't have a pending list. - See and - for details about the pending list and fastupdate - option. - - - - -
- -
- - - Generic File Access Functions - - - The functions shown in provide native access to - files on the machine hosting the server. Only files within the - database cluster directory and the log_directory can be - accessed, unless the user is a superuser or is granted the role - pg_read_server_files. Use a relative path for files in - the cluster directory, and a path matching the log_directory - configuration setting for log files. - - - - Note that granting users the EXECUTE privilege on - pg_read_file(), or related functions, allows them the - ability to read any file on the server that the database server process can - read; these functions bypass all in-database privilege checks. This means - that, for example, a user with such access is able to read the contents of - the pg_authid table where authentication - information is stored, as well as read any table data in the database. - Therefore, granting access to these functions should be carefully - considered. - - - - When granting privilege on these functions, note that the table entries - showing optional parameters are mostly implemented as several physical - functions with different parameter lists. Privilege must be granted - separately on each such function, if it is to be - used. psql's \df command - can be useful to check what the actual function signatures are. - - - - Some of these functions take an optional missing_ok - parameter, which specifies the behavior when the file or directory does - not exist. If true, the function - returns NULL or an empty result set, as appropriate. - If false, an error is raised. (Failure conditions - other than file not found are reported as errors in any - case.) The default is false. - - - - Generic File Access Functions - - - - - Function - - - Description - - - - - - - - - pg_ls_dir - - pg_ls_dir ( dirname text , missing_ok boolean, include_dot_dirs boolean ) - setof text - - - Returns the names of all files (and directories and other special - files) in the specified - directory. The include_dot_dirs parameter - indicates whether . and .. are to be - included in the result set; the default is to exclude them. Including - them can be useful when missing_ok - is true, to distinguish an empty directory from a - non-existent directory. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - - - - pg_ls_logdir - - pg_ls_logdir () - setof record - ( name text, - size bigint, - modification timestamp with time zone ) - - - Returns the name, size, and last modification time (mtime) of each - ordinary file in the server's log directory. Filenames beginning with - a dot, directories, and other special files are excluded. - - - This function is restricted to superusers and roles with privileges of - the pg_monitor role by default, but other users can - be granted EXECUTE to run the function. - - - - - - - pg_ls_waldir - - pg_ls_waldir () - setof record - ( name text, - size bigint, - modification timestamp with time zone ) - - - Returns the name, size, and last modification time (mtime) of each - ordinary file in the server's write-ahead log (WAL) directory. - Filenames beginning with a dot, directories, and other special files - are excluded. - - - This function is restricted to superusers and roles with privileges of - the pg_monitor role by default, but other users can - be granted EXECUTE to run the function. - - - - - - - pg_ls_logicalmapdir - - pg_ls_logicalmapdir () - setof record - ( name text, - size bigint, - modification timestamp with time zone ) - - - Returns the name, size, and last modification time (mtime) of each - ordinary file in the server's pg_logical/mappings - directory. Filenames beginning with a dot, directories, and other - special files are excluded. - - - This function is restricted to superusers and members of - the pg_monitor role by default, but other users can - be granted EXECUTE to run the function. - - - - - - - pg_ls_logicalsnapdir - - pg_ls_logicalsnapdir () - setof record - ( name text, - size bigint, - modification timestamp with time zone ) - - - Returns the name, size, and last modification time (mtime) of each - ordinary file in the server's pg_logical/snapshots - directory. Filenames beginning with a dot, directories, and other - special files are excluded. - - - This function is restricted to superusers and members of - the pg_monitor role by default, but other users can - be granted EXECUTE to run the function. - - - - - - - pg_ls_replslotdir - - pg_ls_replslotdir ( slot_name text ) - setof record - ( name text, - size bigint, - modification timestamp with time zone ) - - - Returns the name, size, and last modification time (mtime) of each - ordinary file in the server's pg_replslot/slot_name - directory, where slot_name is the name of the - replication slot provided as input of the function. Filenames beginning - with a dot, directories, and other special files are excluded. - - - This function is restricted to superusers and members of - the pg_monitor role by default, but other users can - be granted EXECUTE to run the function. - - - - - - - pg_ls_summariesdir - - pg_ls_summariesdir () - setof record - ( name text, - size bigint, - modification timestamp with time zone ) - - - Returns the name, size, and last modification time (mtime) of each - ordinary file in the server's WAL summaries directory - (pg_wal/summaries). Filenames beginning - with a dot, directories, and other special files are excluded. - - - This function is restricted to superusers and members of - the pg_monitor role by default, but other users can - be granted EXECUTE to run the function. - - - - - - - pg_ls_archive_statusdir - - pg_ls_archive_statusdir () - setof record - ( name text, - size bigint, - modification timestamp with time zone ) - - - Returns the name, size, and last modification time (mtime) of each - ordinary file in the server's WAL archive status directory - (pg_wal/archive_status). Filenames beginning - with a dot, directories, and other special files are excluded. - - - This function is restricted to superusers and members of - the pg_monitor role by default, but other users can - be granted EXECUTE to run the function. - - - - - - - - pg_ls_tmpdir - - pg_ls_tmpdir ( tablespace oid ) - setof record - ( name text, - size bigint, - modification timestamp with time zone ) - - - Returns the name, size, and last modification time (mtime) of each - ordinary file in the temporary file directory for the - specified tablespace. - If tablespace is not provided, - the pg_default tablespace is examined. Filenames - beginning with a dot, directories, and other special files are - excluded. - - - This function is restricted to superusers and members of - the pg_monitor role by default, but other users can - be granted EXECUTE to run the function. - - - - - - - pg_read_file - - pg_read_file ( filename text , offset bigint, length bigint , missing_ok boolean ) - text - - - Returns all or part of a text file, starting at the - given byte offset, returning at - most length bytes (less if the end of file is - reached first). If offset is negative, it is - relative to the end of the file. If offset - and length are omitted, the entire file is - returned. The bytes read from the file are interpreted as a string in - the database's encoding; an error is thrown if they are not valid in - that encoding. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - - - - pg_read_binary_file - - pg_read_binary_file ( filename text , offset bigint, length bigint , missing_ok boolean ) - bytea - - - Returns all or part of a file. This function is identical to - pg_read_file except that it can read arbitrary - binary data, returning the result as bytea - not text; accordingly, no encoding checks are performed. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - In combination with the convert_from function, - this function can be used to read a text file in a specified encoding - and convert to the database's encoding: - -SELECT convert_from(pg_read_binary_file('file_in_utf8.txt'), 'UTF8'); - - - - - - - - pg_stat_file - - pg_stat_file ( filename text , missing_ok boolean ) - record - ( size bigint, - access timestamp with time zone, - modification timestamp with time zone, - change timestamp with time zone, - creation timestamp with time zone, - isdir boolean ) - - - Returns a record containing the file's size, last access time stamp, - last modification time stamp, last file status change time stamp (Unix - platforms only), file creation time stamp (Windows only), and a flag - indicating if it is a directory. - - - This function is restricted to superusers by default, but other users - can be granted EXECUTE to run the function. - - - - - -
- -
- - - Advisory Lock Functions - - - The functions shown in - manage advisory locks. For details about proper use of these functions, - see . - - - - All these functions are intended to be used to lock application-defined - resources, which can be identified either by a single 64-bit key value or - two 32-bit key values (note that these two key spaces do not overlap). - If another session already holds a conflicting lock on the same resource - identifier, the functions will either wait until the resource becomes - available, or return a false result, as appropriate for - the function. - Locks can be either shared or exclusive: a shared lock does not conflict - with other shared locks on the same resource, only with exclusive locks. - Locks can be taken at session level (so that they are held until released - or the session ends) or at transaction level (so that they are held until - the current transaction ends; there is no provision for manual release). - Multiple session-level lock requests stack, so that if the same resource - identifier is locked three times there must then be three unlock requests - to release the resource in advance of session end. - - - - Advisory Lock Functions - - - - - Function - - - Description - - - - - - - - - pg_advisory_lock - - pg_advisory_lock ( key bigint ) - void - - - pg_advisory_lock ( key1 integer, key2 integer ) - void - - - Obtains an exclusive session-level advisory lock, waiting if necessary. - - - - - - - pg_advisory_lock_shared - - pg_advisory_lock_shared ( key bigint ) - void - - - pg_advisory_lock_shared ( key1 integer, key2 integer ) - void - - - Obtains a shared session-level advisory lock, waiting if necessary. - - - - - - - pg_advisory_unlock - - pg_advisory_unlock ( key bigint ) - boolean - - - pg_advisory_unlock ( key1 integer, key2 integer ) - boolean - - - Releases a previously-acquired exclusive session-level advisory lock. - Returns true if the lock is successfully released. - If the lock was not held, false is returned, and in - addition, an SQL warning will be reported by the server. - - - - - - - pg_advisory_unlock_all - - pg_advisory_unlock_all () - void - - - Releases all session-level advisory locks held by the current session. - (This function is implicitly invoked at session end, even if the - client disconnects ungracefully.) - - - - - - - pg_advisory_unlock_shared - - pg_advisory_unlock_shared ( key bigint ) - boolean - - - pg_advisory_unlock_shared ( key1 integer, key2 integer ) - boolean - - - Releases a previously-acquired shared session-level advisory lock. - Returns true if the lock is successfully released. - If the lock was not held, false is returned, and in - addition, an SQL warning will be reported by the server. - - - - - - - pg_advisory_xact_lock - - pg_advisory_xact_lock ( key bigint ) - void - - - pg_advisory_xact_lock ( key1 integer, key2 integer ) - void - - - Obtains an exclusive transaction-level advisory lock, waiting if - necessary. - - - - - - - pg_advisory_xact_lock_shared - - pg_advisory_xact_lock_shared ( key bigint ) - void - - - pg_advisory_xact_lock_shared ( key1 integer, key2 integer ) - void - - - Obtains a shared transaction-level advisory lock, waiting if - necessary. - - - - - - - pg_try_advisory_lock - - pg_try_advisory_lock ( key bigint ) - boolean - - - pg_try_advisory_lock ( key1 integer, key2 integer ) - boolean - - - Obtains an exclusive session-level advisory lock if available. - This will either obtain the lock immediately and - return true, or return false - without waiting if the lock cannot be acquired immediately. - - - - - - - pg_try_advisory_lock_shared - - pg_try_advisory_lock_shared ( key bigint ) - boolean - - - pg_try_advisory_lock_shared ( key1 integer, key2 integer ) - boolean - - - Obtains a shared session-level advisory lock if available. - This will either obtain the lock immediately and - return true, or return false - without waiting if the lock cannot be acquired immediately. - - - - - - - pg_try_advisory_xact_lock - - pg_try_advisory_xact_lock ( key bigint ) - boolean - - - pg_try_advisory_xact_lock ( key1 integer, key2 integer ) - boolean - - - Obtains an exclusive transaction-level advisory lock if available. - This will either obtain the lock immediately and - return true, or return false - without waiting if the lock cannot be acquired immediately. - - - - - - - pg_try_advisory_xact_lock_shared - - pg_try_advisory_xact_lock_shared ( key bigint ) - boolean - - - pg_try_advisory_xact_lock_shared ( key1 integer, key2 integer ) - boolean - - - Obtains a shared transaction-level advisory lock if available. - This will either obtain the lock immediately and - return true, or return false - without waiting if the lock cannot be acquired immediately. - - - - -
- -
- -
- - - Trigger Functions - - - While many uses of triggers involve user-written trigger functions, - PostgreSQL provides a few built-in trigger - functions that can be used directly in user-defined triggers. These - are summarized in . - (Additional built-in trigger functions exist, which implement foreign - key constraints and deferred index constraints. Those are not documented - here since users need not use them directly.) - - - - For more information about creating triggers, see - . - - - - Built-In Trigger Functions - - - - - Function - - - Description - - - Example Usage - - - - - - - - - suppress_redundant_updates_trigger - - suppress_redundant_updates_trigger ( ) - trigger - - - Suppresses do-nothing update operations. See below for details. - - - CREATE TRIGGER ... suppress_redundant_updates_trigger() - - - - - - - tsvector_update_trigger - - tsvector_update_trigger ( ) - trigger - - - Automatically updates a tsvector column from associated - plain-text document column(s). The text search configuration to use - is specified by name as a trigger argument. See - for details. - - - CREATE TRIGGER ... tsvector_update_trigger(tsvcol, 'pg_catalog.swedish', title, body) - - - - - - - tsvector_update_trigger_column - - tsvector_update_trigger_column ( ) - trigger - - - Automatically updates a tsvector column from associated - plain-text document column(s). The text search configuration to use - is taken from a regconfig column of the table. See - for details. - - - CREATE TRIGGER ... tsvector_update_trigger_column(tsvcol, tsconfigcol, title, body) - - - - -
- - - The suppress_redundant_updates_trigger function, - when applied as a row-level BEFORE UPDATE trigger, - will prevent any update that does not actually change the data in the - row from taking place. This overrides the normal behavior which always - performs a physical row update - regardless of whether or not the data has changed. (This normal behavior - makes updates run faster, since no checking is required, and is also - useful in certain cases.) - - - - Ideally, you should avoid running updates that don't actually - change the data in the record. Redundant updates can cost considerable - unnecessary time, especially if there are lots of indexes to alter, - and space in dead rows that will eventually have to be vacuumed. - However, detecting such situations in client code is not - always easy, or even possible, and writing expressions to detect - them can be error-prone. An alternative is to use - suppress_redundant_updates_trigger, which will skip - updates that don't change the data. You should use this with care, - however. The trigger takes a small but non-trivial time for each record, - so if most of the records affected by updates do actually change, - use of this trigger will make updates run slower on average. - - - - The suppress_redundant_updates_trigger function can be - added to a table like this: - -CREATE TRIGGER z_min_update -BEFORE UPDATE ON tablename -FOR EACH ROW EXECUTE FUNCTION suppress_redundant_updates_trigger(); - - In most cases, you need to fire this trigger last for each row, so that - it does not override other triggers that might wish to alter the row. - Bearing in mind that triggers fire in name order, you would therefore - choose a trigger name that comes after the name of any other trigger - you might have on the table. (Hence the z prefix in the - example.) - -
- - - Event Trigger Functions - - - PostgreSQL provides these helper functions - to retrieve information from event triggers. - - - - For more information about event triggers, - see . - - - - Capturing Changes at Command End - - - pg_event_trigger_ddl_commands - - - -pg_event_trigger_ddl_commands () setof record - - - - pg_event_trigger_ddl_commands returns a list of - DDL commands executed by each user action, - when invoked in a function attached to a - ddl_command_end event trigger. If called in any other - context, an error is raised. - pg_event_trigger_ddl_commands returns one row for each - base command executed; some commands that are a single SQL sentence - may return more than one row. This function returns the following - columns: - - - - - - Name - Type - Description - - - - - - classid - oid - OID of catalog the object belongs in - - - objid - oid - OID of the object itself - - - objsubid - integer - Sub-object ID (e.g., attribute number for a column) - - - command_tag - text - Command tag - - - object_type - text - Type of the object - - - schema_name - text - - Name of the schema the object belongs in, if any; otherwise NULL. - No quoting is applied. - - - - object_identity - text - - Text rendering of the object identity, schema-qualified. Each - identifier included in the identity is quoted if necessary. - - - - in_extension - boolean - True if the command is part of an extension script - - - command - pg_ddl_command - - A complete representation of the command, in internal format. - This cannot be output directly, but it can be passed to other - functions to obtain different pieces of information about the - command. - - - - - - - - - - Processing Objects Dropped by a DDL Command - - - pg_event_trigger_dropped_objects - - - -pg_event_trigger_dropped_objects () setof record - - - - pg_event_trigger_dropped_objects returns a list of all objects - dropped by the command in whose sql_drop event it is called. - If called in any other context, an error is raised. - This function returns the following columns: - - - - - - Name - Type - Description - - - - - - classid - oid - OID of catalog the object belonged in - - - objid - oid - OID of the object itself - - - objsubid - integer - Sub-object ID (e.g., attribute number for a column) - - - original - boolean - True if this was one of the root object(s) of the deletion - - - normal - boolean - - True if there was a normal dependency relationship - in the dependency graph leading to this object - - - - is_temporary - boolean - - True if this was a temporary object - - - - object_type - text - Type of the object - - - schema_name - text - - Name of the schema the object belonged in, if any; otherwise NULL. - No quoting is applied. - - - - object_name - text - - Name of the object, if the combination of schema and name can be - used as a unique identifier for the object; otherwise NULL. - No quoting is applied, and name is never schema-qualified. - - - - object_identity - text - - Text rendering of the object identity, schema-qualified. Each - identifier included in the identity is quoted if necessary. - - - - address_names - text[] - - An array that, together with object_type and - address_args, can be used by - the pg_get_object_address function to - recreate the object address in a remote server containing an - identically named object of the same kind. - - - - address_args - text[] - - Complement for address_names - - - - - - - - - The pg_event_trigger_dropped_objects function can be used - in an event trigger like this: - -CREATE FUNCTION test_event_trigger_for_drops() - RETURNS event_trigger LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() - LOOP - RAISE NOTICE '% dropped object: % %.% %', - tg_tag, - obj.object_type, - obj.schema_name, - obj.object_name, - obj.object_identity; - END LOOP; -END; -$$; -CREATE EVENT TRIGGER test_event_trigger_for_drops - ON sql_drop - EXECUTE FUNCTION test_event_trigger_for_drops(); - - - - - - Handling a Table Rewrite Event - - - The functions shown in - - provide information about a table for which a - table_rewrite event has just been called. - If called in any other context, an error is raised. - - - - Table Rewrite Information Functions - - - - - Function - - - Description - - - - - - - - - pg_event_trigger_table_rewrite_oid - - pg_event_trigger_table_rewrite_oid () - oid - - - Returns the OID of the table about to be rewritten. - - - - - - - pg_event_trigger_table_rewrite_reason - - pg_event_trigger_table_rewrite_reason () - integer - - - Returns a code explaining the reason(s) for rewriting. The value is - a bitmap built from the following values: 1 - (the table has changed its persistence), 2 - (default value of a column has changed), 4 - (a column has a new data type) and 8 - (the table access method has changed). - - - - -
- - - These functions can be used in an event trigger like this: - -CREATE FUNCTION test_event_trigger_table_rewrite_oid() - RETURNS event_trigger - LANGUAGE plpgsql AS -$$ -BEGIN - RAISE NOTICE 'rewriting table % for reason %', - pg_event_trigger_table_rewrite_oid()::regclass, - pg_event_trigger_table_rewrite_reason(); -END; -$$; - -CREATE EVENT TRIGGER test_table_rewrite_oid - ON table_rewrite - EXECUTE FUNCTION test_event_trigger_table_rewrite_oid(); - - -
-
- - - Statistics Information Functions - - - function - statistics - - - - PostgreSQL provides a function to inspect complex - statistics defined using the CREATE STATISTICS command. - - - - Inspecting MCV Lists - - - pg_mcv_list_items - - - -pg_mcv_list_items ( pg_mcv_list ) setof record - - - - pg_mcv_list_items returns a set of records describing - all items stored in a multi-column MCV list. It - returns the following columns: - - - - - - Name - Type - Description - - - - - - index - integer - index of the item in the MCV list - - - values - text[] - values stored in the MCV item - - - nulls - boolean[] - flags identifying NULL values - - - frequency - double precision - frequency of this MCV item - - - base_frequency - double precision - base frequency of this MCV item - - - - - - - - The pg_mcv_list_items function can be used like this: - - -SELECT m.* FROM pg_statistic_ext join pg_statistic_ext_data on (oid = stxoid), - pg_mcv_list_items(stxdmcv) m WHERE stxname = 'stts'; - - - Values of the pg_mcv_list type can be obtained only from the - pg_statistic_ext_data.stxdmcv - column. - - - - - - diff --git a/doc/src/sgml/func/allfiles.sgml b/doc/src/sgml/func/allfiles.sgml new file mode 100644 index 0000000000000..ce11ef1d5d8ed --- /dev/null +++ b/doc/src/sgml/func/allfiles.sgml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/src/sgml/func/func-admin.sgml b/doc/src/sgml/func/func-admin.sgml new file mode 100644 index 0000000000000..57ff333159f00 --- /dev/null +++ b/doc/src/sgml/func/func-admin.sgml @@ -0,0 +1,2963 @@ + + System Administration Functions + + + The functions described in this section are used to control and + monitor a PostgreSQL installation. + + + + Configuration Settings Functions + + + SET + + + + SHOW + + + + configuration + of the server + functions + + + + shows the functions + available to query and alter run-time configuration parameters. + + + + Configuration Settings Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + current_setting + + current_setting ( setting_name text , missing_ok boolean ) + text + + + Returns the current value of the + setting setting_name. If there is no such + setting, current_setting throws an error + unless missing_ok is supplied and + is true (in which case NULL is returned). + This function corresponds to + the SQL command . + + + current_setting('datestyle') + ISO, MDY + + + + + + + set_config + + set_config ( + setting_name text, + new_value text, + is_local boolean ) + text + + + Sets the parameter setting_name + to new_value, and returns that value. + If is_local is true, the new + value will only apply during the current transaction. If you want the + new value to apply for the rest of the current session, + use false instead. This function corresponds to + the SQL command . + + + set_config accepts the NULL value for + new_value, but as settings cannot be null, it + is interpreted as a request to reset the setting to its default value. + + + set_config('log_statement_stats', 'off', false) + off + + + + +
+ +
+ + + Server Signaling Functions + + + signal + backend processes + + + + The functions shown in send control signals to + other server processes. Use of these functions is restricted to + superusers by default but access may be granted to others using + GRANT, with noted exceptions. + + + + Each of these functions returns true if + the signal was successfully sent and false + if sending the signal failed. + + + + Server Signaling Functions + + + + + Function + + + Description + + + + + + + + + pg_cancel_backend + + pg_cancel_backend ( pid integer ) + boolean + + + Cancels the current query of the session whose backend process has the + specified process ID. This is also allowed if the + calling role is a member of the role whose backend is being canceled or + the calling role has privileges of pg_signal_backend, + however only superusers can cancel superuser backends. + As an exception, roles with privileges of + pg_signal_autovacuum_worker are permitted to + cancel autovacuum worker processes, which are otherwise considered + superuser backends. + + + + + + + pg_log_backend_memory_contexts + + pg_log_backend_memory_contexts ( pid integer ) + boolean + + + Requests to log the memory contexts of the backend with the + specified process ID. This function can send the request to + backends and auxiliary processes except logger. These memory contexts + will be logged at + LOG message level. They will appear in + the server log based on the log configuration set + (see for more information), + but will not be sent to the client regardless of + . + + + + + + + pg_reload_conf + + pg_reload_conf () + boolean + + + Causes all processes of the PostgreSQL + server to reload their configuration files. (This is initiated by + sending a SIGHUP signal to the postmaster + process, which in turn sends SIGHUP to each + of its children.) You can use the + pg_file_settings, + pg_hba_file_rules and + pg_ident_file_mappings views + to check the configuration files for possible errors, before reloading. + + + + + + + pg_rotate_logfile + + pg_rotate_logfile () + boolean + + + Signals the log-file manager to switch to a new output file + immediately. This works only when the built-in log collector is + running, since otherwise there is no log-file manager subprocess. + + + + + + + pg_terminate_backend + + pg_terminate_backend ( pid integer, timeout bigint DEFAULT 0 ) + boolean + + + Terminates the session whose backend process has the + specified process ID. This is also allowed if the calling role + is a member of the role whose backend is being terminated or the + calling role has privileges of pg_signal_backend, + however only superusers can terminate superuser backends. + As an exception, roles with privileges of + pg_signal_autovacuum_worker are permitted to + terminate autovacuum worker processes, which are otherwise considered + superuser backends. + + + If timeout is not specified or zero, this + function returns true whether the process actually + terminates or not, indicating only that the sending of the signal was + successful. If the timeout is specified (in + milliseconds) and greater than zero, the function waits until the + process is actually terminated or until the given time has passed. If + the process is terminated, the function + returns true. On timeout, a warning is emitted and + false is returned. + + + + +
+ + + pg_cancel_backend and pg_terminate_backend + send signals (SIGINT or SIGTERM + respectively) to backend processes identified by process ID. + The process ID of an active backend can be found from + the pid column of the + pg_stat_activity view, or by listing the + postgres processes on the server (using + ps on Unix or the Task + Manager on Windows). + The role of an active backend can be found from the + usename column of the + pg_stat_activity view. + + + + pg_log_backend_memory_contexts can be used + to log the memory contexts of a backend process. For example: + +postgres=# SELECT pg_log_backend_memory_contexts(pg_backend_pid()); + pg_log_backend_memory_contexts +-------------------------------- + t +(1 row) + +One message for each memory context will be logged. For example: + +LOG: logging memory contexts of PID 10377 +STATEMENT: SELECT pg_log_backend_memory_contexts(pg_backend_pid()); +LOG: level: 1; TopMemoryContext: 80800 total in 6 blocks; 14432 free (5 chunks); 66368 used +LOG: level: 2; pgstat TabStatusArray lookup hash table: 8192 total in 1 blocks; 1408 free (0 chunks); 6784 used +LOG: level: 2; TopTransactionContext: 8192 total in 1 blocks; 7720 free (1 chunks); 472 used +LOG: level: 2; RowDescriptionContext: 8192 total in 1 blocks; 6880 free (0 chunks); 1312 used +LOG: level: 2; MessageContext: 16384 total in 2 blocks; 5152 free (0 chunks); 11232 used +LOG: level: 2; Operator class cache: 8192 total in 1 blocks; 512 free (0 chunks); 7680 used +LOG: level: 2; smgr relation table: 16384 total in 2 blocks; 4544 free (3 chunks); 11840 used +LOG: level: 2; TransactionAbortContext: 32768 total in 1 blocks; 32504 free (0 chunks); 264 used +... +LOG: level: 2; ErrorContext: 8192 total in 1 blocks; 7928 free (3 chunks); 264 used +LOG: Grand total: 1651920 bytes in 201 blocks; 622360 free (88 chunks); 1029560 used + + If there are more than 100 child contexts under the same parent, the first + 100 child contexts are logged, along with a summary of the remaining contexts. + Note that frequent calls to this function could incur significant overhead, + because it may generate a large number of log messages. + + +
+ + + Backup Control Functions + + + backup + + + + The functions shown in assist in making on-line backups. + These functions cannot be executed during recovery (except + pg_backup_start, + pg_backup_stop, + and pg_wal_lsn_diff). + + + + For details about proper usage of these functions, see + . + + + + Backup Control Functions + + + + + Function + + + Description + + + + + + + + + pg_create_restore_point + + pg_create_restore_point ( name text ) + pg_lsn + + + Creates a named marker record in the write-ahead log that can later be + used as a recovery target, and returns the corresponding write-ahead + log location. The given name can then be used with + to specify the point up to + which recovery will proceed. Avoid creating multiple restore points + with the same name, since recovery will stop at the first one whose + name matches the recovery target. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + + + + pg_current_wal_flush_lsn + + pg_current_wal_flush_lsn () + pg_lsn + + + Returns the current write-ahead log flush location (see notes below). + + + + + + + pg_current_wal_insert_lsn + + pg_current_wal_insert_lsn () + pg_lsn + + + Returns the current write-ahead log insert location (see notes below). + + + + + + + pg_current_wal_lsn + + pg_current_wal_lsn () + pg_lsn + + + Returns the current write-ahead log write location (see notes below). + + + + + + + pg_backup_start + + pg_backup_start ( + label text + , fast boolean + ) + pg_lsn + + + Prepares the server to begin an on-line backup. The only required + parameter is an arbitrary user-defined label for the backup. + (Typically this would be the name under which the backup dump file + will be stored.) + If the optional second parameter is given as true, + it specifies executing pg_backup_start as quickly + as possible. This forces a fast checkpoint which will cause a + spike in I/O operations, slowing any concurrently executing queries. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + + + + pg_backup_stop + + pg_backup_stop ( + wait_for_archive boolean + ) + record + ( lsn pg_lsn, + labelfile text, + spcmapfile text ) + + + Finishes performing an on-line backup. The desired contents of the + backup label file and the tablespace map file are returned as part of + the result of the function and must be written to files in the + backup area. These files must not be written to the live data directory + (doing so will cause PostgreSQL to fail to restart in the event of a + crash). + + + There is an optional parameter of type boolean. + If false, the function will return immediately after the backup is + completed, without waiting for WAL to be archived. This behavior is + only useful with backup software that independently monitors WAL + archiving. Otherwise, WAL required to make the backup consistent might + be missing and make the backup useless. By default or when this + parameter is true, pg_backup_stop will wait for + WAL to be archived when archiving is enabled. (On a standby, this + means that it will wait only when archive_mode = + always. If write activity on the primary is low, + it may be useful to run pg_switch_wal on the + primary in order to trigger an immediate segment switch.) + + + When executed on a primary, this function also creates a backup + history file in the write-ahead log archive area. The history file + includes the label given to pg_backup_start, the + starting and ending write-ahead log locations for the backup, and the + starting and ending times of the backup. After recording the ending + location, the current write-ahead log insertion point is automatically + advanced to the next write-ahead log file, so that the ending + write-ahead log file can be archived immediately to complete the + backup. + + + The result of the function is a single record. + The lsn column holds the backup's ending + write-ahead log location (which again can be ignored). The second + column returns the contents of the backup label file, and the third + column returns the contents of the tablespace map file. These must be + stored as part of the backup and are required as part of the restore + process. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + + + + pg_switch_wal + + pg_switch_wal () + pg_lsn + + + Forces the server to switch to a new write-ahead log file, which + allows the current file to be archived (assuming you are using + continuous archiving). The result is the ending write-ahead log + location plus 1 within the just-completed write-ahead log file. If + there has been no write-ahead log activity since the last write-ahead + log switch, pg_switch_wal does nothing and + returns the start location of the write-ahead log file currently in + use. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + + + + pg_walfile_name + + pg_walfile_name ( lsn pg_lsn ) + text + + + Converts a write-ahead log location to the name of the WAL file + holding that location. + + + + + + + pg_walfile_name_offset + + pg_walfile_name_offset ( lsn pg_lsn ) + record + ( file_name text, + file_offset integer ) + + + Converts a write-ahead log location to a WAL file name and byte offset + within that file. + + + + + + + pg_split_walfile_name + + pg_split_walfile_name ( file_name text ) + record + ( segment_number numeric, + timeline_id bigint ) + + + Extracts the sequence number and timeline ID from a WAL file + name. + + + + + + + pg_wal_lsn_diff + + pg_wal_lsn_diff ( lsn1 pg_lsn, lsn2 pg_lsn ) + numeric + + + Calculates the difference in bytes (lsn1 - lsn2) between two write-ahead log + locations. This can be used + with pg_stat_replication or some of the + functions shown in to + get the replication lag. + + + + +
+ + + pg_current_wal_lsn displays the current write-ahead + log write location in the same format used by the above functions. + Similarly, pg_current_wal_insert_lsn displays the + current write-ahead log insertion location + and pg_current_wal_flush_lsn displays the current + write-ahead log flush location. The insertion location is + the logical end of the write-ahead log at any instant, + while the write location is the end of what has actually been written out + from the server's internal buffers, and the flush location is the last + location known to be written to durable storage. The write location is the + end of what can be examined from outside the server, and is usually what + you want if you are interested in archiving partially-complete write-ahead + log files. The insertion and flush locations are made available primarily + for server debugging purposes. These are all read-only operations and do + not require superuser permissions. + + + + You can use pg_walfile_name_offset to extract the + corresponding write-ahead log file name and byte offset from + a pg_lsn value. For example: + +postgres=# SELECT * FROM pg_walfile_name_offset((pg_backup_stop()).lsn); + file_name | file_offset +--------------------------+------------- + 00000001000000000000000D | 4039624 +(1 row) + + Similarly, pg_walfile_name extracts just the write-ahead log file name. + + + + pg_split_walfile_name is useful to compute a + LSN from a file offset and WAL file name, for example: + +postgres=# \set file_name '000000010000000100C000AB' +postgres=# \set offset 256 +postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset AS lsn + FROM pg_split_walfile_name(:'file_name') pd, + pg_show_all_settings() ps + WHERE ps.name = 'wal_segment_size'; + lsn +--------------- + C001/AB000100 +(1 row) + + + +
+ + + Recovery Control Functions + + + The functions shown in provide information + about the current status of a standby server. + These functions may be executed both during recovery and in normal running. + + + + Recovery Information Functions + + + + + Function + + + Description + + + + + + + + + pg_is_in_recovery + + pg_is_in_recovery () + boolean + + + Returns true if recovery is still in progress. + + + + + + + pg_last_wal_receive_lsn + + pg_last_wal_receive_lsn () + pg_lsn + + + Returns the last write-ahead log location that has been received and + synced to disk by streaming replication. While streaming replication + is in progress this will increase monotonically. If recovery has + completed then this will remain static at the location of the last WAL + record received and synced to disk during recovery. If streaming + replication is disabled, or if it has not yet started, the function + returns NULL. + + + + + + + pg_last_wal_replay_lsn + + pg_last_wal_replay_lsn () + pg_lsn + + + Returns the last write-ahead log location that has been replayed + during recovery. If recovery is still in progress this will increase + monotonically. If recovery has completed then this will remain + static at the location of the last WAL record applied during recovery. + When the server has been started normally without recovery, the + function returns NULL. + + + + + + + pg_last_xact_replay_timestamp + + pg_last_xact_replay_timestamp () + timestamp with time zone + + + Returns the time stamp of the last transaction replayed during + recovery. This is the time at which the commit or abort WAL record + for that transaction was generated on the primary. If no transactions + have been replayed during recovery, the function + returns NULL. Otherwise, if recovery is still in + progress this will increase monotonically. If recovery has completed + then this will remain static at the time of the last transaction + applied during recovery. When the server has been started normally + without recovery, the function returns NULL. + + + + + + + pg_get_wal_resource_managers + + pg_get_wal_resource_managers () + setof record + ( rm_id integer, + rm_name text, + rm_builtin boolean ) + + + Returns the currently-loaded WAL resource managers in the system. The + column rm_builtin indicates whether it's a + built-in resource manager, or a custom resource manager loaded by an + extension. + + + + +
+ + + The functions shown in control the progress of recovery. + These functions may be executed only during recovery. + + + + Recovery Control Functions + + + + + Function + + + Description + + + + + + + + + pg_is_wal_replay_paused + + pg_is_wal_replay_paused () + boolean + + + Returns true if recovery pause is requested. + + + + + + + pg_get_wal_replay_pause_state + + pg_get_wal_replay_pause_state () + text + + + Returns recovery pause state. The return values are + not paused if pause is not requested, + pause requested if pause is requested but recovery is + not yet paused, and paused if the recovery is + actually paused. + + + + + + + pg_promote + + pg_promote ( wait boolean DEFAULT true, wait_seconds integer DEFAULT 60 ) + boolean + + + Promotes a standby server to primary status. + With wait set to true (the + default), the function waits until promotion is completed + or wait_seconds seconds have passed, and + returns true if promotion is successful + and false otherwise. + If wait is set to false, the + function returns true immediately after sending a + SIGUSR1 signal to the postmaster to trigger + promotion. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + + + + pg_wal_replay_pause + + pg_wal_replay_pause () + void + + + Request to pause recovery. A request doesn't mean that recovery stops + right away. If you want a guarantee that recovery is actually paused, + you need to check for the recovery pause state returned by + pg_get_wal_replay_pause_state(). Note that + pg_is_wal_replay_paused() returns whether a request + is made. While recovery is paused, no further database changes are applied. + If hot standby is active, all new queries will see the same consistent + snapshot of the database, and no further query conflicts will be generated + until recovery is resumed. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + + + + pg_wal_replay_resume + + pg_wal_replay_resume () + void + + + Restarts recovery if it was paused. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + +
+ + + pg_wal_replay_pause and + pg_wal_replay_resume cannot be executed while + a promotion is ongoing. If a promotion is triggered while recovery + is paused, the paused state ends and promotion continues. + + + + If streaming replication is disabled, the paused state may continue + indefinitely without a problem. If streaming replication is in + progress then WAL records will continue to be received, which will + eventually fill available disk space, depending upon the duration of + the pause, the rate of WAL generation and available disk space. + + +
+ + + Snapshot Synchronization Functions + + + PostgreSQL allows database sessions to synchronize their + snapshots. A snapshot determines which data is visible to the + transaction that is using the snapshot. Synchronized snapshots are + necessary when two or more sessions need to see identical content in the + database. If two sessions just start their transactions independently, + there is always a possibility that some third transaction commits + between the executions of the two START TRANSACTION commands, + so that one session sees the effects of that transaction and the other + does not. + + + + To solve this problem, PostgreSQL allows a transaction to + export the snapshot it is using. As long as the exporting + transaction remains open, other transactions can import its + snapshot, and thereby be guaranteed that they see exactly the same view + of the database that the first transaction sees. But note that any + database changes made by any one of these transactions remain invisible + to the other transactions, as is usual for changes made by uncommitted + transactions. So the transactions are synchronized with respect to + pre-existing data, but act normally for changes they make themselves. + + + + Snapshots are exported with the pg_export_snapshot function, + shown in , and + imported with the command. + + + + Snapshot Synchronization Functions + + + + + Function + + + Description + + + + + + + + + pg_export_snapshot + + pg_export_snapshot () + text + + + Saves the transaction's current snapshot and returns + a text string identifying the snapshot. This string must + be passed (outside the database) to clients that want to import the + snapshot. The snapshot is available for import only until the end of + the transaction that exported it. + + + A transaction can export more than one snapshot, if needed. Note that + doing so is only useful in READ COMMITTED + transactions, since in REPEATABLE READ and higher + isolation levels, transactions use the same snapshot throughout their + lifetime. Once a transaction has exported any snapshots, it cannot be + prepared with . + + + + + + pg_log_standby_snapshot + + pg_log_standby_snapshot () + pg_lsn + + + Take a snapshot of running transactions and write it to WAL, without + having to wait for bgwriter or checkpointer to log one. This is useful + for logical decoding on standby, as logical slot creation has to wait + until such a record is replayed on the standby. + + + + +
+ +
+ + + Replication Management Functions + + + The functions shown + in are for + controlling and interacting with replication features. + See , + , and + + for information about the underlying features. + Use of functions for replication origin is only allowed to the + superuser by default, but may be allowed to other users by using the + GRANT command. + Use of functions for replication slots is restricted to superusers + and users having REPLICATION privilege. + + + + Many of these functions have equivalent commands in the replication + protocol; see . + + + + The functions described in + , + , and + + are also relevant for replication. + + + + Replication Management Functions + + + + + Function + + + Description + + + + + + + + + pg_create_physical_replication_slot + + pg_create_physical_replication_slot ( slot_name name , immediately_reserve boolean, temporary boolean ) + record + ( slot_name name, + lsn pg_lsn ) + + + Creates a new physical replication slot named + slot_name. The name cannot be + pg_conflict_detection as it is reserved for the + conflict detection slot. The optional second parameter, + when true, specifies that the LSN for this + replication slot be reserved immediately; otherwise + the LSN is reserved on first connection from a streaming + replication client. Streaming changes from a physical slot is only + possible with the streaming-replication protocol — + see . The optional third + parameter, temporary, when set to true, specifies that + the slot should not be permanently stored to disk and is only meant + for use by the current session. Temporary slots are also + released upon any error. This function corresponds + to the replication protocol command CREATE_REPLICATION_SLOT + ... PHYSICAL. + + + + + + + pg_drop_replication_slot + + pg_drop_replication_slot ( slot_name name ) + void + + + Drops the physical or logical replication slot + named slot_name. Same as replication protocol + command DROP_REPLICATION_SLOT. + + + + + + + pg_create_logical_replication_slot + + pg_create_logical_replication_slot ( slot_name name, plugin name , temporary boolean, twophase boolean, failover boolean ) + record + ( slot_name name, + lsn pg_lsn ) + + + Creates a new logical (decoding) replication slot named + slot_name using the output plugin + plugin. The name cannot be + pg_conflict_detection as it is reserved for + the conflict detection slot. The optional third + parameter, temporary, when set to true, specifies that + the slot should not be permanently stored to disk and is only meant + for use by the current session. Temporary slots are also + released upon any error. The optional fourth parameter, + twophase, when set to true, specifies + that the decoding of prepared transactions is enabled for this + slot. The optional fifth parameter, + failover, when set to true, + specifies that this slot is enabled to be synced to the + standbys so that logical replication can be resumed after + failover. A call to this function has the same effect as + the replication protocol command + CREATE_REPLICATION_SLOT ... LOGICAL. + + + + + + + pg_copy_physical_replication_slot + + pg_copy_physical_replication_slot ( src_slot_name name, dst_slot_name name , temporary boolean ) + record + ( slot_name name, + lsn pg_lsn ) + + + Copies an existing physical replication slot named src_slot_name + to a physical replication slot named dst_slot_name. + The new slot name cannot be pg_conflict_detection, + as it is reserved for the conflict detection. + The copied physical slot starts to reserve WAL from the same LSN as the + source slot. + temporary is optional. If temporary + is omitted, the same value as the source slot is used. Copy of an + invalidated slot is not allowed. + + + + + + + pg_copy_logical_replication_slot + + pg_copy_logical_replication_slot ( src_slot_name name, dst_slot_name name , temporary boolean , plugin name ) + record + ( slot_name name, + lsn pg_lsn ) + + + Copies an existing logical replication slot + named src_slot_name to a logical replication + slot named dst_slot_name, optionally changing + the output plugin and persistence. The new slot name cannot be + pg_conflict_detection as it is reserved for + the conflict detection. The copied logical slot starts from the same + LSN as the source logical slot. Both + temporary and plugin are + optional; if they are omitted, the values of the source slot are used. + The failover option of the source logical slot + is not copied and is set to false by default. This + is to avoid the risk of being unable to continue logical replication + after failover to standby where the slot is being synchronized. Copy of + an invalidated slot is not allowed. + + + + + + + pg_logical_slot_get_changes + + pg_logical_slot_get_changes ( slot_name name, upto_lsn pg_lsn, upto_nchanges integer, VARIADIC options text[] ) + setof record + ( lsn pg_lsn, + xid xid, + data text ) + + + Returns changes in the slot slot_name, starting + from the point from which changes have been consumed last. If + upto_lsn + and upto_nchanges are NULL, + logical decoding will continue until end of WAL. If + upto_lsn is non-NULL, decoding will include only + those transactions which commit prior to the specified LSN. If + upto_nchanges is non-NULL, decoding will + stop when the number of rows produced by decoding exceeds + the specified value. Note, however, that the actual number of + rows returned may be larger, since this limit is only checked after + adding the rows produced when decoding each new transaction commit. + If the specified slot is a logical failover slot then the function will + not return until all physical slots specified in + synchronized_standby_slots + have confirmed WAL receipt. + + + + + + + pg_logical_slot_peek_changes + + pg_logical_slot_peek_changes ( slot_name name, upto_lsn pg_lsn, upto_nchanges integer, VARIADIC options text[] ) + setof record + ( lsn pg_lsn, + xid xid, + data text ) + + + Behaves just like + the pg_logical_slot_get_changes() function, + except that changes are not consumed; that is, they will be returned + again on future calls. + + + + + + + pg_logical_slot_get_binary_changes + + pg_logical_slot_get_binary_changes ( slot_name name, upto_lsn pg_lsn, upto_nchanges integer, VARIADIC options text[] ) + setof record + ( lsn pg_lsn, + xid xid, + data bytea ) + + + Behaves just like + the pg_logical_slot_get_changes() function, + except that changes are returned as bytea. + + + + + + + pg_logical_slot_peek_binary_changes + + pg_logical_slot_peek_binary_changes ( slot_name name, upto_lsn pg_lsn, upto_nchanges integer, VARIADIC options text[] ) + setof record + ( lsn pg_lsn, + xid xid, + data bytea ) + + + Behaves just like + the pg_logical_slot_peek_changes() function, + except that changes are returned as bytea. + + + + + + + pg_replication_slot_advance + + pg_replication_slot_advance ( slot_name name, upto_lsn pg_lsn ) + record + ( slot_name name, + end_lsn pg_lsn ) + + + Advances the current confirmed position of a replication slot named + slot_name. The slot will not be moved backwards, + and it will not be moved beyond the current insert location. Returns + the name of the slot and the actual position that it was advanced to. + The updated slot position information is written out at the next + checkpoint if any advancing is done. So in the event of a crash, the + slot may return to an earlier position. If the specified slot is a + logical failover slot then the function will not return until all + physical slots specified in + synchronized_standby_slots + have confirmed WAL receipt. + + + + + + + pg_replication_origin_create + + pg_replication_origin_create ( node_name text ) + oid + + + Creates a replication origin with the given external + name, and returns the internal ID assigned to it. + The name must be no longer than 512 bytes. + + + + + + + pg_replication_origin_drop + + pg_replication_origin_drop ( node_name text ) + void + + + Deletes a previously-created replication origin, including any + associated replay progress. + + + + + + + pg_replication_origin_oid + + pg_replication_origin_oid ( node_name text ) + oid + + + Looks up a replication origin by name and returns the internal ID. If + no such replication origin is found, NULL is + returned. + + + + + + + pg_replication_origin_session_setup + + pg_replication_origin_session_setup ( node_name text ) + void + + + Marks the current session as replaying from the given + origin, allowing replay progress to be tracked. + Can only be used if no origin is currently selected. + Use pg_replication_origin_session_reset to undo. + + + + + + + pg_replication_origin_session_reset + + pg_replication_origin_session_reset () + void + + + Cancels the effects + of pg_replication_origin_session_setup(). + + + + + + + pg_replication_origin_session_is_setup + + pg_replication_origin_session_is_setup () + boolean + + + Returns true if a replication origin has been selected in the + current session. + + + + + + + pg_replication_origin_session_progress + + pg_replication_origin_session_progress ( flush boolean ) + pg_lsn + + + Returns the replay location for the replication origin selected in + the current session. The parameter flush + determines whether the corresponding local transaction will be + guaranteed to have been flushed to disk or not. + + + + + + + pg_replication_origin_xact_setup + + pg_replication_origin_xact_setup ( origin_lsn pg_lsn, origin_timestamp timestamp with time zone ) + void + + + Marks the current transaction as replaying a transaction that has + committed at the given LSN and timestamp. Can + only be called when a replication origin has been selected + using pg_replication_origin_session_setup. + + + + + + + pg_replication_origin_xact_reset + + pg_replication_origin_xact_reset () + void + + + Cancels the effects of + pg_replication_origin_xact_setup(). + + + + + + + pg_replication_origin_advance + + pg_replication_origin_advance ( node_name text, lsn pg_lsn ) + void + + + Sets replication progress for the given node to the given + location. This is primarily useful for setting up the initial + location, or setting a new location after configuration changes and + similar. Be aware that careless use of this function can lead to + inconsistently replicated data. + + + + + + + pg_replication_origin_progress + + pg_replication_origin_progress ( node_name text, flush boolean ) + pg_lsn + + + Returns the replay location for the given replication origin. The + parameter flush determines whether the + corresponding local transaction will be guaranteed to have been + flushed to disk or not. + + + + + + + pg_logical_emit_message + + pg_logical_emit_message ( transactional boolean, prefix text, content text , flush boolean DEFAULT false ) + pg_lsn + + + pg_logical_emit_message ( transactional boolean, prefix text, content bytea , flush boolean DEFAULT false ) + pg_lsn + + + Emits a logical decoding message. This can be used to pass generic + messages to logical decoding plugins through + WAL. The transactional parameter specifies if + the message should be part of the current transaction, or if it should + be written immediately and decoded as soon as the logical decoder + reads the record. The prefix parameter is a + textual prefix that can be used by logical decoding plugins to easily + recognize messages that are interesting for them. + The content parameter is the content of the + message, given either in text or binary form. + The flush parameter (default set to + false) controls if the message is immediately + flushed to WAL or not. flush has no effect + with transactional, as the message's WAL + record is flushed along with its transaction. + + + + + + + pg_sync_replication_slots + + pg_sync_replication_slots () + void + + + Synchronize the logical failover replication slots from the primary + server to the standby server. This function can only be executed on the + standby server. Temporary synced slots, if any, cannot be used for + logical decoding and must be dropped after promotion. See + for details. + Note that this function is primarily intended for testing and + debugging purposes and should be used with caution. Additionally, + this function cannot be executed if + + sync_replication_slots is enabled and the slotsync + worker is already running to perform the synchronization of slots. + + + + + If, after executing the function, + + hot_standby_feedback is disabled on + the standby or the physical slot configured in + + primary_slot_name is + removed, then it is possible that the necessary rows of the + synchronized slot will be removed by the VACUUM process on the primary + server, resulting in the synchronized slot becoming invalidated. + + + + + + + +
+ +
+ + + Database Object Management Functions + + + The functions shown in calculate + the disk space usage of database objects, or assist in presentation + or understanding of usage results. bigint results + are measured in bytes. If an OID that does + not represent an existing object is passed to one of these + functions, NULL is returned. + + + + Database Object Size Functions + + + + + Function + + + Description + + + + + + + + + pg_column_size + + pg_column_size ( "any" ) + integer + + + Shows the number of bytes used to store any individual data value. If + applied directly to a table column value, this reflects any + compression that was done. + + + + + + + pg_column_compression + + pg_column_compression ( "any" ) + text + + + Shows the compression algorithm that was used to compress + an individual variable-length value. Returns NULL + if the value is not compressed. + + + + + + + pg_column_toast_chunk_id + + pg_column_toast_chunk_id ( "any" ) + oid + + + Shows the chunk_id of an on-disk + TOASTed value. Returns NULL + if the value is un-TOASTed or not on-disk. See + for more information about + TOAST. + + + + + + + pg_database_size + + pg_database_size ( name ) + bigint + + + pg_database_size ( oid ) + bigint + + + Computes the total disk space used by the database with the specified + name or OID. To use this function, you must + have CONNECT privilege on the specified database + (which is granted by default) or have privileges of + the pg_read_all_stats role. + + + + + + + pg_indexes_size + + pg_indexes_size ( regclass ) + bigint + + + Computes the total disk space used by indexes attached to the + specified table. + + + + + + + pg_relation_size + + pg_relation_size ( relation regclass , fork text ) + bigint + + + Computes the disk space used by one fork of the + specified relation. (Note that for most purposes it is more + convenient to use the higher-level + functions pg_total_relation_size + or pg_table_size, which sum the sizes of all + forks.) With one argument, this returns the size of the main data + fork of the relation. The second argument can be provided to specify + which fork to examine: + + + + main returns the size of the main + data fork of the relation. + + + + + fsm returns the size of the Free Space Map + (see ) associated with the relation. + + + + + vm returns the size of the Visibility Map + (see ) associated with the relation. + + + + + init returns the size of the initialization + fork, if any, associated with the relation. + + + + + + + + + + pg_size_bytes + + pg_size_bytes ( text ) + bigint + + + Converts a size in human-readable format (as returned + by pg_size_pretty) into bytes. Valid units are + bytes, B, kB, + MB, GB, TB, + and PB. + + + + + + + pg_size_pretty + + pg_size_pretty ( bigint ) + text + + + pg_size_pretty ( numeric ) + text + + + Converts a size in bytes into a more easily human-readable format with + size units (bytes, kB, MB, GB, TB, or PB as appropriate). Note that the + units are powers of 2 rather than powers of 10, so 1kB is 1024 bytes, + 1MB is 10242 = 1048576 bytes, and so on. + + + + + + + pg_table_size + + pg_table_size ( regclass ) + bigint + + + Computes the disk space used by the specified table, excluding indexes + (but including its TOAST table if any, free space map, and visibility + map). + + + + + + + pg_tablespace_size + + pg_tablespace_size ( name ) + bigint + + + pg_tablespace_size ( oid ) + bigint + + + Computes the total disk space used in the tablespace with the + specified name or OID. To use this function, you must + have CREATE privilege on the specified tablespace + or have privileges of the pg_read_all_stats role, + unless it is the default tablespace for the current database. + + + + + + + pg_total_relation_size + + pg_total_relation_size ( regclass ) + bigint + + + Computes the total disk space used by the specified table, including + all indexes and TOAST data. The result is + equivalent to pg_table_size + + pg_indexes_size. + + + + +
+ + + The functions above that operate on tables or indexes accept a + regclass argument, which is simply the OID of the table or index + in the pg_class system catalog. You do not have to look up + the OID by hand, however, since the regclass data type's input + converter will do the work for you. See + for details. + + + + The functions shown in assist + in identifying the specific disk files associated with database objects. + + + + Database Object Location Functions + + + + + Function + + + Description + + + + + + + + + pg_relation_filenode + + pg_relation_filenode ( relation regclass ) + oid + + + Returns the filenode number currently assigned to the + specified relation. The filenode is the base component of the file + name(s) used for the relation (see + for more information). + For most relations the result is the same as + pg_class.relfilenode, + but for certain system catalogs relfilenode + is zero and this function must be used to get the correct value. The + function returns NULL if passed a relation that does not have storage, + such as a view. + + + + + + + pg_relation_filepath + + pg_relation_filepath ( relation regclass ) + text + + + Returns the entire file path name (relative to the database cluster's + data directory, PGDATA) of the relation. + + + + + + + pg_filenode_relation + + pg_filenode_relation ( tablespace oid, filenode oid ) + regclass + + + Returns a relation's OID given the tablespace OID and filenode it is + stored under. This is essentially the inverse mapping of + pg_relation_filepath. For a relation in the + database's default tablespace, the tablespace can be specified as zero. + Returns NULL if no relation in the current database + is associated with the given values, or if dealing with a temporary + relation. + + + + +
+ + + lists functions used to manage + collations. + + + + Collation Management Functions + + + + + Function + + + Description + + + + + + + + + pg_collation_actual_version + + pg_collation_actual_version ( oid ) + text + + + Returns the actual version of the collation object as it is currently + installed in the operating system. If this is different from the + value in + pg_collation.collversion, + then objects depending on the collation might need to be rebuilt. See + also . + + + + + + + pg_database_collation_actual_version + + pg_database_collation_actual_version ( oid ) + text + + + Returns the actual version of the database's collation as it is currently + installed in the operating system. If this is different from the + value in + pg_database.datcollversion, + then objects depending on the collation might need to be rebuilt. See + also . + + + + + + + pg_import_system_collations + + pg_import_system_collations ( schema regnamespace ) + integer + + + Adds collations to the system + catalog pg_collation based on all the locales + it finds in the operating system. This is + what initdb uses; see + for more details. If additional + locales are installed into the operating system later on, this + function can be run again to add collations for the new locales. + Locales that match existing entries + in pg_collation will be skipped. (But + collation objects based on locales that are no longer present in the + operating system are not removed by this function.) + The schema parameter would typically + be pg_catalog, but that is not a requirement; the + collations could be installed into some other schema as well. The + function returns the number of new collation objects it created. + Use of this function is restricted to superusers. + + + + +
+ + + lists functions used to + manipulate statistics. + These functions cannot be executed during recovery. + + + Changes made by these statistics manipulation functions are likely to be + overwritten by autovacuum (or manual + VACUUM or ANALYZE) and should be + considered temporary. + + + + + + Database Object Statistics Manipulation Functions + + + + + Function + + + Description + + + + + + + + + pg_restore_relation_stats + + pg_restore_relation_stats ( + VARIADIC kwargs "any" ) + boolean + + + Updates table-level statistics. Ordinarily, these statistics are + collected automatically or updated as a part of or , so it's not + necessary to call this function. However, it is useful after a + restore to enable the optimizer to choose better plans if + ANALYZE has not been run yet. + + + The tracked statistics may change from version to version, so + arguments are passed as pairs of argname + and argvalue in the form: + +SELECT pg_restore_relation_stats( + 'arg1name', 'arg1value'::arg1type, + 'arg2name', 'arg2value'::arg2type, + 'arg3name', 'arg3value'::arg3type); + + + + For example, to set the relpages and + reltuples values for the table + mytable: + +SELECT pg_restore_relation_stats( + 'schemaname', 'myschema', + 'relname', 'mytable', + 'relpages', 173::integer, + 'reltuples', 10000::real); + + + + The arguments schemaname and + relname are required, and specify the table. Other + arguments are the names and values of statistics corresponding to + certain columns in pg_class. + The currently-supported relation statistics are + relpages with a value of type + integer, reltuples with a value of + type real, relallvisible with a value + of type integer, and relallfrozen + with a value of type integer. + + + Additionally, this function accepts argument name + version of type integer, which + specifies the server version from which the statistics originated. + This is anticipated to be helpful in porting statistics from older + versions of PostgreSQL. + + + Minor errors are reported as a WARNING and + ignored, and remaining statistics will still be restored. If all + specified statistics are successfully restored, returns + true, otherwise false. + + + The caller must have the MAINTAIN privilege on the + table or be the owner of the database. + + + + + + + + + pg_clear_relation_stats + + pg_clear_relation_stats ( schemaname text, relname text ) + void + + + Clears table-level statistics for the given relation, as though the + table was newly created. + + + The caller must have the MAINTAIN privilege on the + table or be the owner of the database. + + + + + + + + pg_restore_attribute_stats + + pg_restore_attribute_stats ( + VARIADIC kwargs "any" ) + boolean + + + Creates or updates column-level statistics. Ordinarily, these + statistics are collected automatically or updated as a part of or , so it's not + necessary to call this function. However, it is useful after a + restore to enable the optimizer to choose better plans if + ANALYZE has not been run yet. + + + The tracked statistics may change from version to version, so + arguments are passed as pairs of argname + and argvalue in the form: + +SELECT pg_restore_attribute_stats( + 'arg1name', 'arg1value'::arg1type, + 'arg2name', 'arg2value'::arg2type, + 'arg3name', 'arg3value'::arg3type); + + + + For example, to set the avg_width and + null_frac values for the attribute + col1 of the table + mytable: + +SELECT pg_restore_attribute_stats( + 'schemaname', 'myschema', + 'relname', 'mytable', + 'attname', 'col1', + 'inherited', false, + 'avg_width', 125::integer, + 'null_frac', 0.5::real); + + + + The required arguments are schemaname and + relname with a value of type text + which specify the table; either attname with a + value of type text or attnum with a + value of type smallint, which specifies the column; and + inherited, which specifies whether the statistics + include values from child tables. Other arguments are the names and + values of statistics corresponding to columns in pg_stats. + + + Additionally, this function accepts argument name + version of type integer, which + specifies the server version from which the statistics originated. + This is anticipated to be helpful in porting statistics from older + versions of PostgreSQL. + + + Minor errors are reported as a WARNING and + ignored, and remaining statistics will still be restored. If all + specified statistics are successfully restored, returns + true, otherwise false. + + + The caller must have the MAINTAIN privilege on the + table or be the owner of the database. + + + + + + + + + pg_clear_attribute_stats + + pg_clear_attribute_stats ( + schemaname text, + relname text, + attname text, + inherited boolean ) + void + + + Clears column-level statistics for the given relation and + attribute, as though the table was newly created. + + + The caller must have the MAINTAIN privilege on + the table or be the owner of the database. + + + + + +
+ + + lists functions that provide + information about the structure of partitioned tables. + + + + Partitioning Information Functions + + + + + Function + + + Description + + + + + + + + + pg_partition_tree + + pg_partition_tree ( regclass ) + setof record + ( relid regclass, + parentrelid regclass, + isleaf boolean, + level integer ) + + + Lists the tables or indexes in the partition tree of the + given partitioned table or partitioned index, with one row for each + partition. Information provided includes the OID of the partition, + the OID of its immediate parent, a boolean value telling if the + partition is a leaf, and an integer telling its level in the hierarchy. + The level value is 0 for the input table or index, 1 for its + immediate child partitions, 2 for their partitions, and so on. + Returns no rows if the relation does not exist or is not a partition + or partitioned table. + + + + + + + pg_partition_ancestors + + pg_partition_ancestors ( regclass ) + setof regclass + + + Lists the ancestor relations of the given partition, + including the relation itself. Returns no rows if the relation + does not exist or is not a partition or partitioned table. + + + + + + + pg_partition_root + + pg_partition_root ( regclass ) + regclass + + + Returns the top-most parent of the partition tree to which the given + relation belongs. Returns NULL if the relation + does not exist or is not a partition or partitioned table. + + + + +
+ + + For example, to check the total size of the data contained in a + partitioned table measurement, one could use the + following query: + +SELECT pg_size_pretty(sum(pg_relation_size(relid))) AS total_size + FROM pg_partition_tree('measurement'); + + + +
+ + + Index Maintenance Functions + + + shows the functions + available for index maintenance tasks. (Note that these maintenance + tasks are normally done automatically by autovacuum; use of these + functions is only required in special cases.) + These functions cannot be executed during recovery. + Use of these functions is restricted to superusers and the owner + of the given index. + + + + Index Maintenance Functions + + + + + Function + + + Description + + + + + + + + + brin_summarize_new_values + + brin_summarize_new_values ( index regclass ) + integer + + + Scans the specified BRIN index to find page ranges in the base table + that are not currently summarized by the index; for any such range it + creates a new summary index tuple by scanning those table pages. + Returns the number of new page range summaries that were inserted + into the index. + + + + + + + brin_summarize_range + + brin_summarize_range ( index regclass, blockNumber bigint ) + integer + + + Summarizes the page range covering the given block, if not already + summarized. This is + like brin_summarize_new_values except that it + only processes the page range that covers the given table block number. + + + + + + + brin_desummarize_range + + brin_desummarize_range ( index regclass, blockNumber bigint ) + void + + + Removes the BRIN index tuple that summarizes the page range covering + the given table block, if there is one. + + + + + + + gin_clean_pending_list + + gin_clean_pending_list ( index regclass ) + bigint + + + Cleans up the pending list of the specified GIN index + by moving entries in it, in bulk, to the main GIN data structure. + Returns the number of pages removed from the pending list. + If the argument is a GIN index built with + the fastupdate option disabled, no cleanup happens + and the result is zero, because the index doesn't have a pending list. + See and + for details about the pending list and fastupdate + option. + + + + +
+ +
+ + + Generic File Access Functions + + + The functions shown in provide native access to + files on the machine hosting the server. Only files within the + database cluster directory and the log_directory can be + accessed, unless the user is a superuser or is granted the role + pg_read_server_files. Use a relative path for files in + the cluster directory, and a path matching the log_directory + configuration setting for log files. + + + + Note that granting users the EXECUTE privilege on + pg_read_file(), or related functions, allows them the + ability to read any file on the server that the database server process can + read; these functions bypass all in-database privilege checks. This means + that, for example, a user with such access is able to read the contents of + the pg_authid table where authentication + information is stored, as well as read any table data in the database. + Therefore, granting access to these functions should be carefully + considered. + + + + When granting privilege on these functions, note that the table entries + showing optional parameters are mostly implemented as several physical + functions with different parameter lists. Privilege must be granted + separately on each such function, if it is to be + used. psql's \df command + can be useful to check what the actual function signatures are. + + + + Some of these functions take an optional missing_ok + parameter, which specifies the behavior when the file or directory does + not exist. If true, the function + returns NULL or an empty result set, as appropriate. + If false, an error is raised. (Failure conditions + other than file not found are reported as errors in any + case.) The default is false. + + + + Generic File Access Functions + + + + + Function + + + Description + + + + + + + + + pg_ls_dir + + pg_ls_dir ( dirname text , missing_ok boolean, include_dot_dirs boolean ) + setof text + + + Returns the names of all files (and directories and other special + files) in the specified + directory. The include_dot_dirs parameter + indicates whether . and .. are to be + included in the result set; the default is to exclude them. Including + them can be useful when missing_ok + is true, to distinguish an empty directory from a + non-existent directory. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + + + + pg_ls_logdir + + pg_ls_logdir () + setof record + ( name text, + size bigint, + modification timestamp with time zone ) + + + Returns the name, size, and last modification time (mtime) of each + ordinary file in the server's log directory. Filenames beginning with + a dot, directories, and other special files are excluded. + + + This function is restricted to superusers and roles with privileges of + the pg_monitor role by default, but other users can + be granted EXECUTE to run the function. + + + + + + + pg_ls_waldir + + pg_ls_waldir () + setof record + ( name text, + size bigint, + modification timestamp with time zone ) + + + Returns the name, size, and last modification time (mtime) of each + ordinary file in the server's write-ahead log (WAL) directory. + Filenames beginning with a dot, directories, and other special files + are excluded. + + + This function is restricted to superusers and roles with privileges of + the pg_monitor role by default, but other users can + be granted EXECUTE to run the function. + + + + + + + pg_ls_logicalmapdir + + pg_ls_logicalmapdir () + setof record + ( name text, + size bigint, + modification timestamp with time zone ) + + + Returns the name, size, and last modification time (mtime) of each + ordinary file in the server's pg_logical/mappings + directory. Filenames beginning with a dot, directories, and other + special files are excluded. + + + This function is restricted to superusers and members of + the pg_monitor role by default, but other users can + be granted EXECUTE to run the function. + + + + + + + pg_ls_logicalsnapdir + + pg_ls_logicalsnapdir () + setof record + ( name text, + size bigint, + modification timestamp with time zone ) + + + Returns the name, size, and last modification time (mtime) of each + ordinary file in the server's pg_logical/snapshots + directory. Filenames beginning with a dot, directories, and other + special files are excluded. + + + This function is restricted to superusers and members of + the pg_monitor role by default, but other users can + be granted EXECUTE to run the function. + + + + + + + pg_ls_replslotdir + + pg_ls_replslotdir ( slot_name text ) + setof record + ( name text, + size bigint, + modification timestamp with time zone ) + + + Returns the name, size, and last modification time (mtime) of each + ordinary file in the server's pg_replslot/slot_name + directory, where slot_name is the name of the + replication slot provided as input of the function. Filenames beginning + with a dot, directories, and other special files are excluded. + + + This function is restricted to superusers and members of + the pg_monitor role by default, but other users can + be granted EXECUTE to run the function. + + + + + + + pg_ls_summariesdir + + pg_ls_summariesdir () + setof record + ( name text, + size bigint, + modification timestamp with time zone ) + + + Returns the name, size, and last modification time (mtime) of each + ordinary file in the server's WAL summaries directory + (pg_wal/summaries). Filenames beginning + with a dot, directories, and other special files are excluded. + + + This function is restricted to superusers and members of + the pg_monitor role by default, but other users can + be granted EXECUTE to run the function. + + + + + + + pg_ls_archive_statusdir + + pg_ls_archive_statusdir () + setof record + ( name text, + size bigint, + modification timestamp with time zone ) + + + Returns the name, size, and last modification time (mtime) of each + ordinary file in the server's WAL archive status directory + (pg_wal/archive_status). Filenames beginning + with a dot, directories, and other special files are excluded. + + + This function is restricted to superusers and members of + the pg_monitor role by default, but other users can + be granted EXECUTE to run the function. + + + + + + + + pg_ls_tmpdir + + pg_ls_tmpdir ( tablespace oid ) + setof record + ( name text, + size bigint, + modification timestamp with time zone ) + + + Returns the name, size, and last modification time (mtime) of each + ordinary file in the temporary file directory for the + specified tablespace. + If tablespace is not provided, + the pg_default tablespace is examined. Filenames + beginning with a dot, directories, and other special files are + excluded. + + + This function is restricted to superusers and members of + the pg_monitor role by default, but other users can + be granted EXECUTE to run the function. + + + + + + + pg_read_file + + pg_read_file ( filename text , offset bigint, length bigint , missing_ok boolean ) + text + + + Returns all or part of a text file, starting at the + given byte offset, returning at + most length bytes (less if the end of file is + reached first). If offset is negative, it is + relative to the end of the file. If offset + and length are omitted, the entire file is + returned. The bytes read from the file are interpreted as a string in + the database's encoding; an error is thrown if they are not valid in + that encoding. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + + + + pg_read_binary_file + + pg_read_binary_file ( filename text , offset bigint, length bigint , missing_ok boolean ) + bytea + + + Returns all or part of a file. This function is identical to + pg_read_file except that it can read arbitrary + binary data, returning the result as bytea + not text; accordingly, no encoding checks are performed. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + In combination with the convert_from function, + this function can be used to read a text file in a specified encoding + and convert to the database's encoding: + +SELECT convert_from(pg_read_binary_file('file_in_utf8.txt'), 'UTF8'); + + + + + + + + pg_stat_file + + pg_stat_file ( filename text , missing_ok boolean ) + record + ( size bigint, + access timestamp with time zone, + modification timestamp with time zone, + change timestamp with time zone, + creation timestamp with time zone, + isdir boolean ) + + + Returns a record containing the file's size, last access time stamp, + last modification time stamp, last file status change time stamp (Unix + platforms only), file creation time stamp (Windows only), and a flag + indicating if it is a directory. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + + + + +
+ +
+ + + Advisory Lock Functions + + + The functions shown in + manage advisory locks. For details about proper use of these functions, + see . + + + + All these functions are intended to be used to lock application-defined + resources, which can be identified either by a single 64-bit key value or + two 32-bit key values (note that these two key spaces do not overlap). + If another session already holds a conflicting lock on the same resource + identifier, the functions will either wait until the resource becomes + available, or return a false result, as appropriate for + the function. + Locks can be either shared or exclusive: a shared lock does not conflict + with other shared locks on the same resource, only with exclusive locks. + Locks can be taken at session level (so that they are held until released + or the session ends) or at transaction level (so that they are held until + the current transaction ends; there is no provision for manual release). + Multiple session-level lock requests stack, so that if the same resource + identifier is locked three times there must then be three unlock requests + to release the resource in advance of session end. + + + + Advisory Lock Functions + + + + + Function + + + Description + + + + + + + + + pg_advisory_lock + + pg_advisory_lock ( key bigint ) + void + + + pg_advisory_lock ( key1 integer, key2 integer ) + void + + + Obtains an exclusive session-level advisory lock, waiting if necessary. + + + + + + + pg_advisory_lock_shared + + pg_advisory_lock_shared ( key bigint ) + void + + + pg_advisory_lock_shared ( key1 integer, key2 integer ) + void + + + Obtains a shared session-level advisory lock, waiting if necessary. + + + + + + + pg_advisory_unlock + + pg_advisory_unlock ( key bigint ) + boolean + + + pg_advisory_unlock ( key1 integer, key2 integer ) + boolean + + + Releases a previously-acquired exclusive session-level advisory lock. + Returns true if the lock is successfully released. + If the lock was not held, false is returned, and in + addition, an SQL warning will be reported by the server. + + + + + + + pg_advisory_unlock_all + + pg_advisory_unlock_all () + void + + + Releases all session-level advisory locks held by the current session. + (This function is implicitly invoked at session end, even if the + client disconnects ungracefully.) + + + + + + + pg_advisory_unlock_shared + + pg_advisory_unlock_shared ( key bigint ) + boolean + + + pg_advisory_unlock_shared ( key1 integer, key2 integer ) + boolean + + + Releases a previously-acquired shared session-level advisory lock. + Returns true if the lock is successfully released. + If the lock was not held, false is returned, and in + addition, an SQL warning will be reported by the server. + + + + + + + pg_advisory_xact_lock + + pg_advisory_xact_lock ( key bigint ) + void + + + pg_advisory_xact_lock ( key1 integer, key2 integer ) + void + + + Obtains an exclusive transaction-level advisory lock, waiting if + necessary. + + + + + + + pg_advisory_xact_lock_shared + + pg_advisory_xact_lock_shared ( key bigint ) + void + + + pg_advisory_xact_lock_shared ( key1 integer, key2 integer ) + void + + + Obtains a shared transaction-level advisory lock, waiting if + necessary. + + + + + + + pg_try_advisory_lock + + pg_try_advisory_lock ( key bigint ) + boolean + + + pg_try_advisory_lock ( key1 integer, key2 integer ) + boolean + + + Obtains an exclusive session-level advisory lock if available. + This will either obtain the lock immediately and + return true, or return false + without waiting if the lock cannot be acquired immediately. + + + + + + + pg_try_advisory_lock_shared + + pg_try_advisory_lock_shared ( key bigint ) + boolean + + + pg_try_advisory_lock_shared ( key1 integer, key2 integer ) + boolean + + + Obtains a shared session-level advisory lock if available. + This will either obtain the lock immediately and + return true, or return false + without waiting if the lock cannot be acquired immediately. + + + + + + + pg_try_advisory_xact_lock + + pg_try_advisory_xact_lock ( key bigint ) + boolean + + + pg_try_advisory_xact_lock ( key1 integer, key2 integer ) + boolean + + + Obtains an exclusive transaction-level advisory lock if available. + This will either obtain the lock immediately and + return true, or return false + without waiting if the lock cannot be acquired immediately. + + + + + + + pg_try_advisory_xact_lock_shared + + pg_try_advisory_xact_lock_shared ( key bigint ) + boolean + + + pg_try_advisory_xact_lock_shared ( key1 integer, key2 integer ) + boolean + + + Obtains a shared transaction-level advisory lock if available. + This will either obtain the lock immediately and + return true, or return false + without waiting if the lock cannot be acquired immediately. + + + + +
+ +
+ +
diff --git a/doc/src/sgml/func/func-aggregate.sgml b/doc/src/sgml/func/func-aggregate.sgml new file mode 100644 index 0000000000000..f50b692516b62 --- /dev/null +++ b/doc/src/sgml/func/func-aggregate.sgml @@ -0,0 +1,1418 @@ + + Aggregate Functions + + + aggregate function + built-in + + + + Aggregate functions compute a single result + from a set of input values. The built-in general-purpose aggregate + functions are listed in + while statistical aggregates are in . + The built-in within-group ordered-set aggregate functions + are listed in + while the built-in within-group hypothetical-set ones are in . Grouping operations, + which are closely related to aggregate functions, are listed in + . + The special syntax considerations for aggregate + functions are explained in . + Consult for additional introductory + information. + + + + Aggregate functions that support Partial Mode + are eligible to participate in various optimizations, such as parallel + aggregation. + + + + While all aggregates below accept an optional + ORDER BY clause (as outlined in ), the clause has only been added to + aggregates whose output is affected by ordering. + + + + General-Purpose Aggregate Functions + + + + + + + Function + + + Description + + Partial Mode + + + + + + + + any_value + + any_value ( anyelement ) + same as input type + + + Returns an arbitrary value from the non-null input values. + + Yes + + + + + + array_agg + + array_agg ( anynonarray ORDER BY input_sort_columns ) + anyarray + + + Collects all the input values, including nulls, into an array. + + Yes + + + + + array_agg ( anyarray ORDER BY input_sort_columns ) + anyarray + + + Concatenates all the input arrays into an array of one higher + dimension. (The inputs must all have the same dimensionality, and + cannot be empty or null.) + + Yes + + + + + + average + + + avg + + avg ( smallint ) + numeric + + + avg ( integer ) + numeric + + + avg ( bigint ) + numeric + + + avg ( numeric ) + numeric + + + avg ( real ) + double precision + + + avg ( double precision ) + double precision + + + avg ( interval ) + interval + + + Computes the average (arithmetic mean) of all the non-null input + values. + + Yes + + + + + + bit_and + + bit_and ( smallint ) + smallint + + + bit_and ( integer ) + integer + + + bit_and ( bigint ) + bigint + + + bit_and ( bit ) + bit + + + Computes the bitwise AND of all non-null input values. + + Yes + + + + + + bit_or + + bit_or ( smallint ) + smallint + + + bit_or ( integer ) + integer + + + bit_or ( bigint ) + bigint + + + bit_or ( bit ) + bit + + + Computes the bitwise OR of all non-null input values. + + Yes + + + + + + bit_xor + + bit_xor ( smallint ) + smallint + + + bit_xor ( integer ) + integer + + + bit_xor ( bigint ) + bigint + + + bit_xor ( bit ) + bit + + + Computes the bitwise exclusive OR of all non-null input values. + Can be useful as a checksum for an unordered set of values. + + Yes + + + + + + bool_and + + bool_and ( boolean ) + boolean + + + Returns true if all non-null input values are true, otherwise false. + + Yes + + + + + + bool_or + + bool_or ( boolean ) + boolean + + + Returns true if any non-null input value is true, otherwise false. + + Yes + + + + + + count + + count ( * ) + bigint + + + Computes the number of input rows. + + Yes + + + + + count ( "any" ) + bigint + + + Computes the number of input rows in which the input value is not + null. + + Yes + + + + + + every + + every ( boolean ) + boolean + + + This is the SQL standard's equivalent to bool_and. + + Yes + + + + + + json_agg + + json_agg ( anyelement ORDER BY input_sort_columns ) + json + + + + jsonb_agg + + jsonb_agg ( anyelement ORDER BY input_sort_columns ) + jsonb + + + Collects all the input values, including nulls, into a JSON array. + Values are converted to JSON as per to_json + or to_jsonb. + + No + + + + + + json_agg_strict + + json_agg_strict ( anyelement ) + json + + + + jsonb_agg_strict + + jsonb_agg_strict ( anyelement ) + jsonb + + + Collects all the input values, skipping nulls, into a JSON array. + Values are converted to JSON as per to_json + or to_jsonb. + + No + + + + + json_arrayagg + json_arrayagg ( + value_expression + ORDER BY sort_expression + { NULL | ABSENT } ON NULL + RETURNING data_type FORMAT JSON ENCODING UTF8 ) + + + Behaves in the same way as json_array + but as an aggregate function so it only takes one + value_expression parameter. + If ABSENT ON NULL is specified, any NULL + values are omitted. + If ORDER BY is specified, the elements will + appear in the array in that order rather than in the input order. + + + SELECT json_arrayagg(v) FROM (VALUES(2),(1)) t(v) + [2, 1] + + No + + + + + json_objectagg + json_objectagg ( + { key_expression { VALUE | ':' } value_expression } + { NULL | ABSENT } ON NULL + { WITH | WITHOUT } UNIQUE KEYS + RETURNING data_type FORMAT JSON ENCODING UTF8 ) + + + Behaves like json_object, but as an + aggregate function, so it only takes one + key_expression and one + value_expression parameter. + + + SELECT json_objectagg(k:v) FROM (VALUES ('a'::text,current_date),('b',current_date + 1)) AS t(k,v) + { "a" : "2022-05-10", "b" : "2022-05-11" } + + No + + + + + + json_object_agg + + json_object_agg ( key + "any", value + "any" + ORDER BY input_sort_columns ) + json + + + + jsonb_object_agg + + jsonb_object_agg ( key + "any", value + "any" + ORDER BY input_sort_columns ) + jsonb + + + Collects all the key/value pairs into a JSON object. Key arguments + are coerced to text; value arguments are converted as per + to_json or to_jsonb. + Values can be null, but keys cannot. + + No + + + + + + json_object_agg_strict + + json_object_agg_strict ( + key "any", + value "any" ) + json + + + + jsonb_object_agg_strict + + jsonb_object_agg_strict ( + key "any", + value "any" ) + jsonb + + + Collects all the key/value pairs into a JSON object. Key arguments + are coerced to text; value arguments are converted as per + to_json or to_jsonb. + The key can not be null. If the + value is null then the entry is skipped, + + No + + + + + + json_object_agg_unique + + json_object_agg_unique ( + key "any", + value "any" ) + json + + + + jsonb_object_agg_unique + + jsonb_object_agg_unique ( + key "any", + value "any" ) + jsonb + + + Collects all the key/value pairs into a JSON object. Key arguments + are coerced to text; value arguments are converted as per + to_json or to_jsonb. + Values can be null, but keys cannot. + If there is a duplicate key an error is thrown. + + No + + + + + + json_object_agg_unique_strict + + json_object_agg_unique_strict ( + key "any", + value "any" ) + json + + + + jsonb_object_agg_unique_strict + + jsonb_object_agg_unique_strict ( + key "any", + value "any" ) + jsonb + + + Collects all the key/value pairs into a JSON object. Key arguments + are coerced to text; value arguments are converted as per + to_json or to_jsonb. + The key can not be null. If the + value is null then the entry is skipped. + If there is a duplicate key an error is thrown. + + No + + + + + + max + + max ( see text ) + same as input type + + + Computes the maximum of the non-null input + values. Available for any numeric, string, date/time, or enum type, + as well as bytea, inet, interval, + money, oid, pg_lsn, + tid, xid8, + and also arrays and composite types containing sortable data types. + + Yes + + + + + + min + + min ( see text ) + same as input type + + + Computes the minimum of the non-null input + values. Available for any numeric, string, date/time, or enum type, + as well as bytea, inet, interval, + money, oid, pg_lsn, + tid, xid8, + and also arrays and composite types containing sortable data types. + + Yes + + + + + + range_agg + + range_agg ( value + anyrange ) + anymultirange + + + range_agg ( value + anymultirange ) + anymultirange + + + Computes the union of the non-null input values. + + No + + + + + + range_intersect_agg + + range_intersect_agg ( value + anyrange ) + anyrange + + + range_intersect_agg ( value + anymultirange ) + anymultirange + + + Computes the intersection of the non-null input values. + + No + + + + + + string_agg + + string_agg ( value + text, delimiter text ) + text + + + string_agg ( value + bytea, delimiter bytea + ORDER BY input_sort_columns ) + bytea + + + Concatenates the non-null input values into a string. Each value + after the first is preceded by the + corresponding delimiter (if it's not null). + + Yes + + + + + + sum + + sum ( smallint ) + bigint + + + sum ( integer ) + bigint + + + sum ( bigint ) + numeric + + + sum ( numeric ) + numeric + + + sum ( real ) + real + + + sum ( double precision ) + double precision + + + sum ( interval ) + interval + + + sum ( money ) + money + + + Computes the sum of the non-null input values. + + Yes + + + + + + xmlagg + + xmlagg ( xml ORDER BY input_sort_columns ) + xml + + + Concatenates the non-null XML input values (see + ). + + No + + + +
+ + + It should be noted that except for count, + these functions return a null value when no rows are selected. In + particular, sum of no rows returns null, not + zero as one might expect, and array_agg + returns null rather than an empty array when there are no input + rows. The coalesce function can be used to + substitute zero or an empty array for null when necessary. + + + + The aggregate functions array_agg, + json_agg, jsonb_agg, + json_agg_strict, jsonb_agg_strict, + json_object_agg, jsonb_object_agg, + json_object_agg_strict, jsonb_object_agg_strict, + json_object_agg_unique, jsonb_object_agg_unique, + json_object_agg_unique_strict, + jsonb_object_agg_unique_strict, + string_agg, + and xmlagg, as well as similar user-defined + aggregate functions, produce meaningfully different result values + depending on the order of the input values. This ordering is + unspecified by default, but can be controlled by writing an + ORDER BY clause within the aggregate call, as shown in + . + Alternatively, supplying the input values from a sorted subquery + will usually work. For example: + + + + Beware that this approach can fail if the outer query level contains + additional processing, such as a join, because that might cause the + subquery's output to be reordered before the aggregate is computed. + + + + + ANY + + + SOME + + + The boolean aggregates bool_and and + bool_or correspond to the standard SQL aggregates + every and any or + some. + PostgreSQL + supports every, but not any + or some, because there is an ambiguity built into + the standard syntax: + +SELECT b1 = ANY((SELECT b2 FROM t2 ...)) FROM t1 ...; + + Here ANY can be considered either as introducing + a subquery, or as being an aggregate function, if the subquery + returns one row with a Boolean value. + Thus the standard name cannot be given to these aggregates. + + + + + + Users accustomed to working with other SQL database management + systems might be disappointed by the performance of the + count aggregate when it is applied to the + entire table. A query like: + +SELECT count(*) FROM sometable; + + will require effort proportional to the size of the table: + PostgreSQL will need to scan either the + entire table or the entirety of an index that includes all rows in + the table. + + + + + shows + aggregate functions typically used in statistical analysis. + (These are separated out merely to avoid cluttering the listing + of more-commonly-used aggregates.) Functions shown as + accepting numeric_type are available for all + the types smallint, integer, + bigint, numeric, real, + and double precision. + Where the description mentions + N, it means the + number of input rows for which all the input expressions are non-null. + In all cases, null is returned if the computation is meaningless, + for example when N is zero. + + + + statistics + + + linear regression + + + + Aggregate Functions for Statistics + + + + + + + Function + + + Description + + Partial Mode + + + + + + + + correlation + + + corr + + corr ( Y double precision, X double precision ) + double precision + + + Computes the correlation coefficient. + + Yes + + + + + + covariance + population + + + covar_pop + + covar_pop ( Y double precision, X double precision ) + double precision + + + Computes the population covariance. + + Yes + + + + + + covariance + sample + + + covar_samp + + covar_samp ( Y double precision, X double precision ) + double precision + + + Computes the sample covariance. + + Yes + + + + + + regr_avgx + + regr_avgx ( Y double precision, X double precision ) + double precision + + + Computes the average of the independent variable, + sum(X)/N. + + Yes + + + + + + regr_avgy + + regr_avgy ( Y double precision, X double precision ) + double precision + + + Computes the average of the dependent variable, + sum(Y)/N. + + Yes + + + + + + regr_count + + regr_count ( Y double precision, X double precision ) + bigint + + + Computes the number of rows in which both inputs are non-null. + + Yes + + + + + + regression intercept + + + regr_intercept + + regr_intercept ( Y double precision, X double precision ) + double precision + + + Computes the y-intercept of the least-squares-fit linear equation + determined by the + (X, Y) pairs. + + Yes + + + + + + regr_r2 + + regr_r2 ( Y double precision, X double precision ) + double precision + + + Computes the square of the correlation coefficient. + + Yes + + + + + + regression slope + + + regr_slope + + regr_slope ( Y double precision, X double precision ) + double precision + + + Computes the slope of the least-squares-fit linear equation determined + by the (X, Y) + pairs. + + Yes + + + + + + regr_sxx + + regr_sxx ( Y double precision, X double precision ) + double precision + + + Computes the sum of squares of the independent + variable, + sum(X^2) - sum(X)^2/N. + + Yes + + + + + + regr_sxy + + regr_sxy ( Y double precision, X double precision ) + double precision + + + Computes the sum of products of independent times + dependent variables, + sum(X*Y) - sum(X) * sum(Y)/N. + + Yes + + + + + + regr_syy + + regr_syy ( Y double precision, X double precision ) + double precision + + + Computes the sum of squares of the dependent + variable, + sum(Y^2) - sum(Y)^2/N. + + Yes + + + + + + standard deviation + + + stddev + + stddev ( numeric_type ) + double precision + for real or double precision, + otherwise numeric + + + This is a historical alias for stddev_samp. + + Yes + + + + + + standard deviation + population + + + stddev_pop + + stddev_pop ( numeric_type ) + double precision + for real or double precision, + otherwise numeric + + + Computes the population standard deviation of the input values. + + Yes + + + + + + standard deviation + sample + + + stddev_samp + + stddev_samp ( numeric_type ) + double precision + for real or double precision, + otherwise numeric + + + Computes the sample standard deviation of the input values. + + Yes + + + + + + variance + + variance ( numeric_type ) + double precision + for real or double precision, + otherwise numeric + + + This is a historical alias for var_samp. + + Yes + + + + + + variance + population + + + var_pop + + var_pop ( numeric_type ) + double precision + for real or double precision, + otherwise numeric + + + Computes the population variance of the input values (square of the + population standard deviation). + + Yes + + + + + + variance + sample + + + var_samp + + var_samp ( numeric_type ) + double precision + for real or double precision, + otherwise numeric + + + Computes the sample variance of the input values (square of the sample + standard deviation). + + Yes + + + +
+ + + shows some + aggregate functions that use the ordered-set aggregate + syntax. These functions are sometimes referred to as inverse + distribution functions. Their aggregated input is introduced by + ORDER BY, and they may also take a direct + argument that is not aggregated, but is computed only once. + All these functions ignore null values in their aggregated input. + For those that take a fraction parameter, the + fraction value must be between 0 and 1; an error is thrown if not. + However, a null fraction value simply produces a + null result. + + + + ordered-set aggregate + built-in + + + inverse distribution + + + + Ordered-Set Aggregate Functions + + + + + + + Function + + + Description + + Partial Mode + + + + + + + + mode + statistical + + mode () WITHIN GROUP ( ORDER BY anyelement ) + anyelement + + + Computes the mode, the most frequent + value of the aggregated argument (arbitrarily choosing the first one + if there are multiple equally-frequent values). The aggregated + argument must be of a sortable type. + + No + + + + + + percentile + continuous + + percentile_cont ( fraction double precision ) WITHIN GROUP ( ORDER BY double precision ) + double precision + + + percentile_cont ( fraction double precision ) WITHIN GROUP ( ORDER BY interval ) + interval + + + Computes the continuous percentile, a value + corresponding to the specified fraction + within the ordered set of aggregated argument values. This will + interpolate between adjacent input items if needed. + + No + + + + + percentile_cont ( fractions double precision[] ) WITHIN GROUP ( ORDER BY double precision ) + double precision[] + + + percentile_cont ( fractions double precision[] ) WITHIN GROUP ( ORDER BY interval ) + interval[] + + + Computes multiple continuous percentiles. The result is an array of + the same dimensions as the fractions + parameter, with each non-null element replaced by the (possibly + interpolated) value corresponding to that percentile. + + No + + + + + + percentile + discrete + + percentile_disc ( fraction double precision ) WITHIN GROUP ( ORDER BY anyelement ) + anyelement + + + Computes the discrete percentile, the first + value within the ordered set of aggregated argument values whose + position in the ordering equals or exceeds the + specified fraction. The aggregated + argument must be of a sortable type. + + No + + + + + percentile_disc ( fractions double precision[] ) WITHIN GROUP ( ORDER BY anyelement ) + anyarray + + + Computes multiple discrete percentiles. The result is an array of the + same dimensions as the fractions parameter, + with each non-null element replaced by the input value corresponding + to that percentile. + The aggregated argument must be of a sortable type. + + No + + + +
+ + + hypothetical-set aggregate + built-in + + + + Each of the hypothetical-set aggregates listed in + is associated with a + window function of the same name defined in + . In each case, the aggregate's result + is the value that the associated window function would have + returned for the hypothetical row constructed from + args, if such a row had been added to the sorted + group of rows represented by the sorted_args. + For each of these functions, the list of direct arguments + given in args must match the number and types of + the aggregated arguments given in sorted_args. + Unlike most built-in aggregates, these aggregates are not strict, that is + they do not drop input rows containing nulls. Null values sort according + to the rule specified in the ORDER BY clause. + + + + Hypothetical-Set Aggregate Functions + + + + + + + Function + + + Description + + Partial Mode + + + + + + + + rank + hypothetical + + rank ( args ) WITHIN GROUP ( ORDER BY sorted_args ) + bigint + + + Computes the rank of the hypothetical row, with gaps; that is, the row + number of the first row in its peer group. + + No + + + + + + dense_rank + hypothetical + + dense_rank ( args ) WITHIN GROUP ( ORDER BY sorted_args ) + bigint + + + Computes the rank of the hypothetical row, without gaps; this function + effectively counts peer groups. + + No + + + + + + percent_rank + hypothetical + + percent_rank ( args ) WITHIN GROUP ( ORDER BY sorted_args ) + double precision + + + Computes the relative rank of the hypothetical row, that is + (rank - 1) / (total rows - 1). + The value thus ranges from 0 to 1 inclusive. + + No + + + + + + cume_dist + hypothetical + + cume_dist ( args ) WITHIN GROUP ( ORDER BY sorted_args ) + double precision + + + Computes the cumulative distribution, that is (number of rows + preceding or peers with hypothetical row) / (total rows). The value + thus ranges from 1/N to 1. + + No + + + +
+ + + Grouping Operations + + + + + Function + + + Description + + + + + + + + + GROUPING + + GROUPING ( group_by_expression(s) ) + integer + + + Returns a bit mask indicating which GROUP BY + expressions are not included in the current grouping set. + Bits are assigned with the rightmost argument corresponding to the + least-significant bit; each bit is 0 if the corresponding expression + is included in the grouping criteria of the grouping set generating + the current result row, and 1 if it is not included. + + + + +
+ + + The grouping operations shown in + are used in conjunction with + grouping sets (see ) to distinguish + result rows. The arguments to the GROUPING function + are not actually evaluated, but they must exactly match expressions given + in the GROUP BY clause of the associated query level. + For example: + +=> SELECT * FROM items_sold; + make | model | sales +-------+-------+------- + Foo | GT | 10 + Foo | Tour | 20 + Bar | City | 15 + Bar | Sport | 5 +(4 rows) + +=> SELECT make, model, GROUPING(make,model), sum(sales) FROM items_sold GROUP BY ROLLUP(make,model); + make | model | grouping | sum +-------+-------+----------+----- + Foo | GT | 0 | 10 + Foo | Tour | 0 | 20 + Bar | City | 0 | 15 + Bar | Sport | 0 | 5 + Foo | | 1 | 30 + Bar | | 1 | 20 + | | 3 | 50 +(7 rows) + + Here, the grouping value 0 in the + first four rows shows that those have been grouped normally, over both the + grouping columns. The value 1 indicates + that model was not grouped by in the next-to-last two + rows, and the value 3 indicates that + neither make nor model was grouped + by in the last row (which therefore is an aggregate over all the input + rows). + + +
diff --git a/doc/src/sgml/func/func-array.sgml b/doc/src/sgml/func/func-array.sgml new file mode 100644 index 0000000000000..97e4865a5f7d2 --- /dev/null +++ b/doc/src/sgml/func/func-array.sgml @@ -0,0 +1,646 @@ + + Array Functions and Operators + + + shows the specialized operators + available for array types. + In addition to those, the usual comparison operators shown in are available for + arrays. The comparison operators compare the array contents + element-by-element, using the default B-tree comparison function for + the element data type, and sort based on the first difference. + In multidimensional arrays the elements are visited in row-major order + (last subscript varies most rapidly). + If the contents of two arrays are equal but the dimensionality is + different, the first difference in the dimensionality information + determines the sort order. + + + + Array Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + anyarray @> anyarray + boolean + + + Does the first array contain the second, that is, does each element + appearing in the second array equal some element of the first array? + (Duplicates are not treated specially, + thus ARRAY[1] and ARRAY[1,1] are + each considered to contain the other.) + + + ARRAY[1,4,3] @> ARRAY[3,1,3] + t + + + + + + anyarray <@ anyarray + boolean + + + Is the first array contained by the second? + + + ARRAY[2,2,7] <@ ARRAY[1,7,4,2,6] + t + + + + + + anyarray && anyarray + boolean + + + Do the arrays overlap, that is, have any elements in common? + + + ARRAY[1,4,3] && ARRAY[2,1] + t + + + + + + anycompatiblearray || anycompatiblearray + anycompatiblearray + + + Concatenates the two arrays. Concatenating a null or empty array is a + no-op; otherwise the arrays must have the same number of dimensions + (as illustrated by the first example) or differ in number of + dimensions by one (as illustrated by the second). + If the arrays are not of identical element types, they will be coerced + to a common type (see ). + + + ARRAY[1,2,3] || ARRAY[4,5,6,7] + {1,2,3,4,5,6,7} + + + ARRAY[1,2,3] || ARRAY[[4,5,6],[7,8,9.9]] + {{1,2,3},{4,5,6},{7,8,9.9}} + + + + + + anycompatible || anycompatiblearray + anycompatiblearray + + + Concatenates an element onto the front of an array (which must be + empty or one-dimensional). + + + 3 || ARRAY[4,5,6] + {3,4,5,6} + + + + + + anycompatiblearray || anycompatible + anycompatiblearray + + + Concatenates an element onto the end of an array (which must be + empty or one-dimensional). + + + ARRAY[4,5,6] || 7 + {4,5,6,7} + + + + +
+ + + See for more details about array operator + behavior. See for more details about + which operators support indexed operations. + + + + shows the functions + available for use with array types. See + for more information and examples of the use of these functions. + + + + Array Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + array_append + + array_append ( anycompatiblearray, anycompatible ) + anycompatiblearray + + + Appends an element to the end of an array (same as + the anycompatiblearray || anycompatible + operator). + + + array_append(ARRAY[1,2], 3) + {1,2,3} + + + + + + + array_cat + + array_cat ( anycompatiblearray, anycompatiblearray ) + anycompatiblearray + + + Concatenates two arrays (same as + the anycompatiblearray || anycompatiblearray + operator). + + + array_cat(ARRAY[1,2,3], ARRAY[4,5]) + {1,2,3,4,5} + + + + + + + array_dims + + array_dims ( anyarray ) + text + + + Returns a text representation of the array's dimensions. + + + array_dims(ARRAY[[1,2,3], [4,5,6]]) + [1:2][1:3] + + + + + + + array_fill + + array_fill ( anyelement, integer[] + , integer[] ) + anyarray + + + Returns an array filled with copies of the given value, having + dimensions of the lengths specified by the second argument. + The optional third argument supplies lower-bound values for each + dimension (which default to all 1). + + + array_fill(11, ARRAY[2,3]) + {{11,11,11},{11,11,11}} + + + array_fill(7, ARRAY[3], ARRAY[2]) + [2:4]={7,7,7} + + + + + + + array_length + + array_length ( anyarray, integer ) + integer + + + Returns the length of the requested array dimension. + (Produces NULL instead of 0 for empty or missing array dimensions.) + + + array_length(array[1,2,3], 1) + 3 + + + array_length(array[]::int[], 1) + NULL + + + array_length(array['text'], 2) + NULL + + + + + + + array_lower + + array_lower ( anyarray, integer ) + integer + + + Returns the lower bound of the requested array dimension. + + + array_lower('[0:2]={1,2,3}'::integer[], 1) + 0 + + + + + + + array_ndims + + array_ndims ( anyarray ) + integer + + + Returns the number of dimensions of the array. + + + array_ndims(ARRAY[[1,2,3], [4,5,6]]) + 2 + + + + + + + array_position + + array_position ( anycompatiblearray, anycompatible , integer ) + integer + + + Returns the subscript of the first occurrence of the second argument + in the array, or NULL if it's not present. + If the third argument is given, the search begins at that subscript. + The array must be one-dimensional. + Comparisons are done using IS NOT DISTINCT FROM + semantics, so it is possible to search for NULL. + + + array_position(ARRAY['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'], 'mon') + 2 + + + + + + + array_positions + + array_positions ( anycompatiblearray, anycompatible ) + integer[] + + + Returns an array of the subscripts of all occurrences of the second + argument in the array given as first argument. + The array must be one-dimensional. + Comparisons are done using IS NOT DISTINCT FROM + semantics, so it is possible to search for NULL. + NULL is returned only if the array + is NULL; if the value is not found in the array, an + empty array is returned. + + + array_positions(ARRAY['A','A','B','A'], 'A') + {1,2,4} + + + + + + + array_prepend + + array_prepend ( anycompatible, anycompatiblearray ) + anycompatiblearray + + + Prepends an element to the beginning of an array (same as + the anycompatible || anycompatiblearray + operator). + + + array_prepend(1, ARRAY[2,3]) + {1,2,3} + + + + + + + array_remove + + array_remove ( anycompatiblearray, anycompatible ) + anycompatiblearray + + + Removes all elements equal to the given value from the array. + The array must be one-dimensional. + Comparisons are done using IS NOT DISTINCT FROM + semantics, so it is possible to remove NULLs. + + + array_remove(ARRAY[1,2,3,2], 2) + {1,3} + + + + + + + array_replace + + array_replace ( anycompatiblearray, anycompatible, anycompatible ) + anycompatiblearray + + + Replaces each array element equal to the second argument with the + third argument. + + + array_replace(ARRAY[1,2,5,4], 5, 3) + {1,2,3,4} + + + + + + + array_reverse + + array_reverse ( anyarray ) + anyarray + + + Reverses the first dimension of the array. + + + array_reverse(ARRAY[[1,2],[3,4],[5,6]]) + {{5,6},{3,4},{1,2}} + + + + + + + array_sample + + array_sample ( array anyarray, n integer ) + anyarray + + + Returns an array of n items randomly selected + from array. n may not + exceed the length of array's first dimension. + If array is multi-dimensional, + an item is a slice having a given first subscript. + + + array_sample(ARRAY[1,2,3,4,5,6], 3) + {2,6,1} + + + array_sample(ARRAY[[1,2],[3,4],[5,6]], 2) + {{5,6},{1,2}} + + + + + + + array_shuffle + + array_shuffle ( anyarray ) + anyarray + + + Randomly shuffles the first dimension of the array. + + + array_shuffle(ARRAY[[1,2],[3,4],[5,6]]) + {{5,6},{1,2},{3,4}} + + + + + + + array_sort + + array_sort ( + array anyarray + , descending boolean + , nulls_first boolean + ) + anyarray + + + Sorts the first dimension of the array. + The sort order is determined by the default sort ordering of the + array's element type; however, if the element type is collatable, + the collation to use can be specified by adding + a COLLATE clause to + the array argument. + + + If descending is true then sort in + descending order, otherwise ascending order. If omitted, the + default is ascending order. + If nulls_first is true then nulls appear + before non-null values, otherwise nulls appear after non-null + values. + If omitted, nulls_first is taken to have + the same value as descending. + + + array_sort(ARRAY[[2,4],[2,1],[6,5]]) + {{2,1},{2,4},{6,5}} + + + + + + + array_to_string + + array_to_string ( array anyarray, delimiter text , null_string text ) + text + + + Converts each array element to its text representation, and + concatenates those separated by + the delimiter string. + If null_string is given and is + not NULL, then NULL array + entries are represented by that string; otherwise, they are omitted. + See also string_to_array. + + + array_to_string(ARRAY[1, 2, 3, NULL, 5], ',', '*') + 1,2,3,*,5 + + + + + + + array_upper + + array_upper ( anyarray, integer ) + integer + + + Returns the upper bound of the requested array dimension. + + + array_upper(ARRAY[1,8,3,7], 1) + 4 + + + + + + + cardinality + + cardinality ( anyarray ) + integer + + + Returns the total number of elements in the array, or 0 if the array + is empty. + + + cardinality(ARRAY[[1,2],[3,4]]) + 4 + + + + + + + trim_array + + trim_array ( array anyarray, n integer ) + anyarray + + + Trims an array by removing the last n elements. + If the array is multidimensional, only the first dimension is trimmed. + + + trim_array(ARRAY[1,2,3,4,5,6], 2) + {1,2,3,4} + + + + + + + unnest + + unnest ( anyarray ) + setof anyelement + + + Expands an array into a set of rows. + The array's elements are read out in storage order. + + + unnest(ARRAY[1,2]) + + + 1 + 2 + + + + unnest(ARRAY[['foo','bar'],['baz','quux']]) + + + foo + bar + baz + quux + + + + + + + unnest ( anyarray, anyarray , ... ) + setof anyelement, anyelement [, ... ] + + + Expands multiple arrays (possibly of different data types) into a set of + rows. If the arrays are not all the same length then the shorter ones + are padded with NULLs. This form is only allowed + in a query's FROM clause; see . + + + select * from unnest(ARRAY[1,2], ARRAY['foo','bar','baz']) as x(a,b) + + + a | b +---+----- + 1 | foo + 2 | bar + | baz + + + + + +
+ + + See also about the aggregate + function array_agg for use with arrays. + +
diff --git a/doc/src/sgml/func/func-binarystring.sgml b/doc/src/sgml/func/func-binarystring.sgml new file mode 100644 index 0000000000000..78814ee0685eb --- /dev/null +++ b/doc/src/sgml/func/func-binarystring.sgml @@ -0,0 +1,854 @@ + + Binary String Functions and Operators + + + binary data + functions + + + + This section describes functions and operators for examining and + manipulating binary strings, that is values of type bytea. + Many of these are equivalent, in purpose and syntax, to the + text-string functions described in the previous section. + + + + SQL defines some string functions that use + key words, rather than commas, to separate + arguments. Details are in + . + PostgreSQL also provides versions of these functions + that use the regular function invocation syntax + (see ). + + + + <acronym>SQL</acronym> Binary String Functions and Operators + + + + + Function/Operator + + + Description + + + Example(s) + + + + + + + + + binary string + concatenation + + bytea || bytea + bytea + + + Concatenates the two binary strings. + + + '\x123456'::bytea || '\x789a00bcde'::bytea + \x123456789a00bcde + + + + + + + bit_length + + bit_length ( bytea ) + integer + + + Returns number of bits in the binary string (8 + times the octet_length). + + + bit_length('\x123456'::bytea) + 24 + + + + + + + btrim + + btrim ( bytes bytea, + bytesremoved bytea ) + bytea + + + Removes the longest string containing only bytes appearing in + bytesremoved from the start and end of + bytes. + + + btrim('\x1234567890'::bytea, '\x9012'::bytea) + \x345678 + + + + + + + ltrim + + ltrim ( bytes bytea, + bytesremoved bytea ) + bytea + + + Removes the longest string containing only bytes appearing in + bytesremoved from the start of + bytes. + + + ltrim('\x1234567890'::bytea, '\x9012'::bytea) + \x34567890 + + + + + + + octet_length + + octet_length ( bytea ) + integer + + + Returns number of bytes in the binary string. + + + octet_length('\x123456'::bytea) + 3 + + + + + + + overlay + + overlay ( bytes bytea PLACING newsubstring bytea FROM start integer FOR count integer ) + bytea + + + Replaces the substring of bytes that starts at + the start'th byte and extends + for count bytes + with newsubstring. + If count is omitted, it defaults to the length + of newsubstring. + + + overlay('\x1234567890'::bytea placing '\002\003'::bytea from 2 for 3) + \x12020390 + + + + + + + position + + position ( substring bytea IN bytes bytea ) + integer + + + Returns first starting index of the specified + substring within + bytes, or zero if it's not present. + + + position('\x5678'::bytea in '\x1234567890'::bytea) + 3 + + + + + + + rtrim + + rtrim ( bytes bytea, + bytesremoved bytea ) + bytea + + + Removes the longest string containing only bytes appearing in + bytesremoved from the end of + bytes. + + + rtrim('\x1234567890'::bytea, '\x9012'::bytea) + \x12345678 + + + + + + + substring + + substring ( bytes bytea FROM start integer FOR count integer ) + bytea + + + Extracts the substring of bytes starting at + the start'th byte if that is specified, + and stopping after count bytes if that is + specified. Provide at least one of start + and count. + + + substring('\x1234567890'::bytea from 3 for 2) + \x5678 + + + + + + + trim + + trim ( LEADING | TRAILING | BOTH + bytesremoved bytea FROM + bytes bytea ) + bytea + + + Removes the longest string containing only bytes appearing in + bytesremoved from the start, + end, or both ends (BOTH is the default) + of bytes. + + + trim('\x9012'::bytea from '\x1234567890'::bytea) + \x345678 + + + + + + trim ( LEADING | TRAILING | BOTH FROM + bytes bytea, + bytesremoved bytea ) + bytea + + + This is a non-standard syntax for trim(). + + + trim(both from '\x1234567890'::bytea, '\x9012'::bytea) + \x345678 + + + + +
+ + + Additional binary string manipulation functions are available and + are listed in . Some + of them are used internally to implement the + SQL-standard string functions listed in . + + + + Other Binary String Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + bit_count + + + popcount + bit_count + + bit_count ( bytes bytea ) + bigint + + + Returns the number of bits set in the binary string (also known as + popcount). + + + bit_count('\x1234567890'::bytea) + 15 + + + + + + + crc32 + + crc32 ( bytea ) + bigint + + + Computes the CRC-32 value of the binary string. + + + crc32('abc'::bytea) + 891568578 + + + + + + + crc32c + + crc32c ( bytea ) + bigint + + + Computes the CRC-32C value of the binary string. + + + crc32c('abc'::bytea) + 910901175 + + + + + + + get_bit + + get_bit ( bytes bytea, + n bigint ) + integer + + + Extracts n'th bit + from binary string. + + + get_bit('\x1234567890'::bytea, 30) + 1 + + + + + + + get_byte + + get_byte ( bytes bytea, + n integer ) + integer + + + Extracts n'th byte + from binary string. + + + get_byte('\x1234567890'::bytea, 4) + 144 + + + + + + + length + + + binary string + length + + + length + of a binary string + binary strings, length + + length ( bytea ) + integer + + + Returns the number of bytes in the binary string. + + + length('\x1234567890'::bytea) + 5 + + + + + + length ( bytes bytea, + encoding name ) + integer + + + Returns the number of characters in the binary string, assuming + that it is text in the given encoding. + + + length('jose'::bytea, 'UTF8') + 4 + + + + + + + md5 + + md5 ( bytea ) + text + + + Computes the MD5 hash of + the binary string, with the result written in hexadecimal. + + + md5('Th\000omas'::bytea) + 8ab2d3c9689aaf18&zwsp;b4958c334c82d8b1 + + + + + + + reverse + + reverse ( bytea ) + bytea + + + Reverses the order of the bytes in the binary string. + + + reverse('\xabcd'::bytea) + \xcdab + + + + + + + set_bit + + set_bit ( bytes bytea, + n bigint, + newvalue integer ) + bytea + + + Sets n'th bit in + binary string to newvalue. + + + set_bit('\x1234567890'::bytea, 30, 0) + \x1234563890 + + + + + + + set_byte + + set_byte ( bytes bytea, + n integer, + newvalue integer ) + bytea + + + Sets n'th byte in + binary string to newvalue. + + + set_byte('\x1234567890'::bytea, 4, 64) + \x1234567840 + + + + + + + sha224 + + sha224 ( bytea ) + bytea + + + Computes the SHA-224 hash + of the binary string. + + + sha224('abc'::bytea) + \x23097d223405d8228642a477bda2&zwsp;55b32aadbce4bda0b3f7e36c9da7 + + + + + + + sha256 + + sha256 ( bytea ) + bytea + + + Computes the SHA-256 hash + of the binary string. + + + sha256('abc'::bytea) + \xba7816bf8f01cfea414140de5dae2223&zwsp;b00361a396177a9cb410ff61f20015ad + + + + + + + sha384 + + sha384 ( bytea ) + bytea + + + Computes the SHA-384 hash + of the binary string. + + + sha384('abc'::bytea) + \xcb00753f45a35e8bb5a03d699ac65007&zwsp;272c32ab0eded1631a8b605a43ff5bed&zwsp;8086072ba1e7cc2358baeca134c825a7 + + + + + + + sha512 + + sha512 ( bytea ) + bytea + + + Computes the SHA-512 hash + of the binary string. + + + sha512('abc'::bytea) + \xddaf35a193617abacc417349ae204131&zwsp;12e6fa4e89a97ea20a9eeee64b55d39a&zwsp;2192992a274fc1a836ba3c23a3feebbd&zwsp;454d4423643ce80e2a9ac94fa54ca49f + + + + + + + substr + + substr ( bytes bytea, start integer , count integer ) + bytea + + + Extracts the substring of bytes starting at + the start'th byte, + and extending for count bytes if that is + specified. (Same + as substring(bytes + from start + for count).) + + + substr('\x1234567890'::bytea, 3, 2) + \x5678 + + + + +
+ + + Functions get_byte and set_byte + number the first byte of a binary string as byte 0. + Functions get_bit and set_bit + number bits from the right within each byte; for example bit 0 is the least + significant bit of the first byte, and bit 15 is the most significant bit + of the second byte. + + + + For historical reasons, the function md5 + returns a hex-encoded value of type text whereas the SHA-2 + functions return type bytea. Use the functions + encode + and decode to + convert between the two. For example write encode(sha256('abc'), + 'hex') to get a hex-encoded text representation, + or decode(md5('abc'), 'hex') to get + a bytea value. + + + + + character string + converting to binary string + + + binary string + converting to character string + + Functions for converting strings between different character sets + (encodings), and for representing arbitrary binary data in textual + form, are shown in + . For these + functions, an argument or result of type text is expressed + in the database's default encoding, while arguments or results of + type bytea are in an encoding named by another argument. + + + + Text/Binary String Conversion Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + convert + + convert ( bytes bytea, + src_encoding name, + dest_encoding name ) + bytea + + + Converts a binary string representing text in + encoding src_encoding + to a binary string in encoding dest_encoding + (see for + available conversions). + + + convert('text_in_utf8', 'UTF8', 'LATIN1') + \x746578745f696e5f75746638 + + + + + + + convert_from + + convert_from ( bytes bytea, + src_encoding name ) + text + + + Converts a binary string representing text in + encoding src_encoding + to text in the database encoding + (see for + available conversions). + + + convert_from('text_in_utf8', 'UTF8') + text_in_utf8 + + + + + + + convert_to + + convert_to ( string text, + dest_encoding name ) + bytea + + + Converts a text string (in the database encoding) to a + binary string encoded in encoding dest_encoding + (see for + available conversions). + + + convert_to('some_text', 'UTF8') + \x736f6d655f74657874 + + + + + + + encode + + encode ( bytes bytea, + format text ) + text + + + Encodes binary data into a textual representation; supported + format values are: + base64, + escape, + hex. + + + encode('123\000\001', 'base64') + MTIzAAE= + + + + + + + decode + + decode ( string text, + format text ) + bytea + + + Decodes binary data from a textual representation; supported + format values are the same as + for encode. + + + decode('MTIzAAE=', 'base64') + \x3132330001 + + + + +
+ + + The encode and decode + functions support the following textual formats: + + + + base64 + + base64 format + + + + The base64 format is that + of RFC + 2045 Section 6.8. As per the RFC, encoded lines are + broken at 76 characters. However instead of the MIME CRLF + end-of-line marker, only a newline is used for end-of-line. + The decode function ignores carriage-return, + newline, space, and tab characters. Otherwise, an error is + raised when decode is supplied invalid + base64 data — including when trailing padding is incorrect. + + + + + + escape + + escape format + + + + The escape format converts zero bytes and + bytes with the high bit set into octal escape sequences + (\nnn), and it doubles + backslashes. Other byte values are represented literally. + The decode function will raise an error if a + backslash is not followed by either a second backslash or three + octal digits; it accepts other byte values unchanged. + + + + + + hex + + hex format + + + + The hex format represents each 4 bits of + data as one hexadecimal digit, 0 + through f, writing the higher-order digit of + each byte first. The encode function outputs + the a-f hex digits in lower + case. Because the smallest unit of data is 8 bits, there are + always an even number of characters returned + by encode. + The decode function + accepts the a-f characters in + either upper or lower case. An error is raised + when decode is given invalid hex data + — including when given an odd number of characters. + + + + + + + + In addition, it is possible to cast integral values to and from type + bytea. Casting an integer to bytea produces + 2, 4, or 8 bytes, depending on the width of the integer type. The result + is the two's complement representation of the integer, with the most + significant byte first. Some examples: + +1234::smallint::bytea \x04d2 +cast(1234 as bytea) \x000004d2 +cast(-1234 as bytea) \xfffffb2e +'\x8000'::bytea::smallint -32768 +'\x8000'::bytea::integer 32768 + + Casting a bytea to an integer will raise an error if the + length of the bytea exceeds the width of the integer type. + + + + See also the aggregate function string_agg in + and the large object functions + in . + +
diff --git a/doc/src/sgml/func/func-bitstring.sgml b/doc/src/sgml/func/func-bitstring.sgml new file mode 100644 index 0000000000000..f03dd63afcc6a --- /dev/null +++ b/doc/src/sgml/func/func-bitstring.sgml @@ -0,0 +1,358 @@ + + Bit String Functions and Operators + + + bit strings + functions + + + + This section describes functions and operators for examining and + manipulating bit strings, that is values of the types + bit and bit varying. (While only + type bit is mentioned in these tables, values of + type bit varying can be used interchangeably.) + Bit strings support the usual comparison operators shown in + , as well as the + operators shown in . + + + + Bit String Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + bit || bit + bit + + + Concatenation + + + B'10001' || B'011' + 10001011 + + + + + + bit & bit + bit + + + Bitwise AND (inputs must be of equal length) + + + B'10001' & B'01101' + 00001 + + + + + + bit | bit + bit + + + Bitwise OR (inputs must be of equal length) + + + B'10001' | B'01101' + 11101 + + + + + + bit # bit + bit + + + Bitwise exclusive OR (inputs must be of equal length) + + + B'10001' # B'01101' + 11100 + + + + + + ~ bit + bit + + + Bitwise NOT + + + ~ B'10001' + 01110 + + + + + + bit << integer + bit + + + Bitwise shift left + (string length is preserved) + + + B'10001' << 3 + 01000 + + + + + + bit >> integer + bit + + + Bitwise shift right + (string length is preserved) + + + B'10001' >> 2 + 00100 + + + + +
+ + + Some of the functions available for binary strings are also available + for bit strings, as shown in . + + + + Bit String Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + bit_count + + bit_count ( bit ) + bigint + + + Returns the number of bits set in the bit string (also known as + popcount). + + + bit_count(B'10111') + 4 + + + + + + + bit_length + + bit_length ( bit ) + integer + + + Returns number of bits in the bit string. + + + bit_length(B'10111') + 5 + + + + + + + length + + + bit string + length + + length ( bit ) + integer + + + Returns number of bits in the bit string. + + + length(B'10111') + 5 + + + + + + + octet_length + + octet_length ( bit ) + integer + + + Returns number of bytes in the bit string. + + + octet_length(B'1011111011') + 2 + + + + + + + overlay + + overlay ( bits bit PLACING newsubstring bit FROM start integer FOR count integer ) + bit + + + Replaces the substring of bits that starts at + the start'th bit and extends + for count bits + with newsubstring. + If count is omitted, it defaults to the length + of newsubstring. + + + overlay(B'01010101010101010' placing B'11111' from 2 for 3) + 0111110101010101010 + + + + + + + position + + position ( substring bit IN bits bit ) + integer + + + Returns first starting index of the specified substring + within bits, or zero if it's not present. + + + position(B'010' in B'000001101011') + 8 + + + + + + + substring + + substring ( bits bit FROM start integer FOR count integer ) + bit + + + Extracts the substring of bits starting at + the start'th bit if that is specified, + and stopping after count bits if that is + specified. Provide at least one of start + and count. + + + substring(B'110010111111' from 3 for 2) + 00 + + + + + + + get_bit + + get_bit ( bits bit, + n integer ) + integer + + + Extracts n'th bit + from bit string; the first (leftmost) bit is bit 0. + + + get_bit(B'101010101010101010', 6) + 1 + + + + + + + set_bit + + set_bit ( bits bit, + n integer, + newvalue integer ) + bit + + + Sets n'th bit in + bit string to newvalue; + the first (leftmost) bit is bit 0. + + + set_bit(B'101010101010101010', 6, 0) + 101010001010101010 + + + + +
+ + + In addition, it is possible to cast integral values to and from type + bit. + Casting an integer to bit(n) copies the rightmost + n bits. Casting an integer to a bit string width wider + than the integer itself will sign-extend on the left. + Some examples: + +44::bit(10) 0000101100 +44::bit(3) 100 +cast(-44 as bit(12)) 111111010100 +'1110'::bit(4)::integer 14 + + Note that casting to just bit means casting to + bit(1), and so will deliver only the least significant + bit of the integer. + +
diff --git a/doc/src/sgml/func/func-comparison.sgml b/doc/src/sgml/func/func-comparison.sgml new file mode 100644 index 0000000000000..c1205983f8bac --- /dev/null +++ b/doc/src/sgml/func/func-comparison.sgml @@ -0,0 +1,638 @@ + + Comparison Functions and Operators + + + comparison + operators + + + + The usual comparison operators are available, as shown in . + + + + Comparison Operators + + + + Operator + Description + + + + + + + datatype < datatype + boolean + + Less than + + + + + datatype > datatype + boolean + + Greater than + + + + + datatype <= datatype + boolean + + Less than or equal to + + + + + datatype >= datatype + boolean + + Greater than or equal to + + + + + datatype = datatype + boolean + + Equal + + + + + datatype <> datatype + boolean + + Not equal + + + + + datatype != datatype + boolean + + Not equal + + + +
+ + + + <> is the standard SQL notation for not + equal. != is an alias, which is converted + to <> at a very early stage of parsing. + Hence, it is not possible to implement != + and <> operators that do different things. + + + + + These comparison operators are available for all built-in data types + that have a natural ordering, including numeric, string, and date/time + types. In addition, arrays, composite types, and ranges can be compared + if their component data types are comparable. + + + + It is usually possible to compare values of related data + types as well; for example integer > + bigint will work. Some cases of this sort are implemented + directly by cross-type comparison operators, but if no + such operator is available, the parser will coerce the less-general type + to the more-general type and apply the latter's comparison operator. + + + + As shown above, all comparison operators are binary operators that + return values of type boolean. Thus, expressions like + 1 < 2 < 3 are not valid (because there is + no < operator to compare a Boolean value with + 3). Use the BETWEEN predicates + shown below to perform range tests. + + + + There are also some comparison predicates, as shown in . These behave much like + operators, but have special syntax mandated by the SQL standard. + + + + Comparison Predicates + + + + + Predicate + + + Description + + + Example(s) + + + + + + + + datatype BETWEEN datatype AND datatype + boolean + + + Between (inclusive of the range endpoints). + + + 2 BETWEEN 1 AND 3 + t + + + 2 BETWEEN 3 AND 1 + f + + + + + + datatype NOT BETWEEN datatype AND datatype + boolean + + + Not between (the negation of BETWEEN). + + + 2 NOT BETWEEN 1 AND 3 + f + + + + + + datatype BETWEEN SYMMETRIC datatype AND datatype + boolean + + + Between, after sorting the two endpoint values. + + + 2 BETWEEN SYMMETRIC 3 AND 1 + t + + + + + + datatype NOT BETWEEN SYMMETRIC datatype AND datatype + boolean + + + Not between, after sorting the two endpoint values. + + + 2 NOT BETWEEN SYMMETRIC 3 AND 1 + f + + + + + + datatype IS DISTINCT FROM datatype + boolean + + + Not equal, treating null as a comparable value. + + + 1 IS DISTINCT FROM NULL + t (rather than NULL) + + + NULL IS DISTINCT FROM NULL + f (rather than NULL) + + + + + + datatype IS NOT DISTINCT FROM datatype + boolean + + + Equal, treating null as a comparable value. + + + 1 IS NOT DISTINCT FROM NULL + f (rather than NULL) + + + NULL IS NOT DISTINCT FROM NULL + t (rather than NULL) + + + + + + datatype IS NULL + boolean + + + Test whether value is null. + + + 1.5 IS NULL + f + + + + + + datatype IS NOT NULL + boolean + + + Test whether value is not null. + + + 'null' IS NOT NULL + t + + + + + + datatype ISNULL + boolean + + + Test whether value is null (nonstandard syntax). + + + + + + datatype NOTNULL + boolean + + + Test whether value is not null (nonstandard syntax). + + + + + + boolean IS TRUE + boolean + + + Test whether boolean expression yields true. + + + true IS TRUE + t + + + NULL::boolean IS TRUE + f (rather than NULL) + + + + + + boolean IS NOT TRUE + boolean + + + Test whether boolean expression yields false or unknown. + + + true IS NOT TRUE + f + + + NULL::boolean IS NOT TRUE + t (rather than NULL) + + + + + + boolean IS FALSE + boolean + + + Test whether boolean expression yields false. + + + true IS FALSE + f + + + NULL::boolean IS FALSE + f (rather than NULL) + + + + + + boolean IS NOT FALSE + boolean + + + Test whether boolean expression yields true or unknown. + + + true IS NOT FALSE + t + + + NULL::boolean IS NOT FALSE + t (rather than NULL) + + + + + + boolean IS UNKNOWN + boolean + + + Test whether boolean expression yields unknown. + + + true IS UNKNOWN + f + + + NULL::boolean IS UNKNOWN + t (rather than NULL) + + + + + + boolean IS NOT UNKNOWN + boolean + + + Test whether boolean expression yields true or false. + + + true IS NOT UNKNOWN + t + + + NULL::boolean IS NOT UNKNOWN + f (rather than NULL) + + + + +
+ + + + BETWEEN + + + BETWEEN SYMMETRIC + + The BETWEEN predicate simplifies range tests: + +a BETWEEN x AND y + + is equivalent to + +a >= x AND a <= y + + Notice that BETWEEN treats the endpoint values as included + in the range. + BETWEEN SYMMETRIC is like BETWEEN + except there is no requirement that the argument to the left of + AND be less than or equal to the argument on the right. + If it is not, those two arguments are automatically swapped, so that + a nonempty range is always implied. + + + + The various variants of BETWEEN are implemented in + terms of the ordinary comparison operators, and therefore will work for + any data type(s) that can be compared. + + + + + The use of AND in the BETWEEN + syntax creates an ambiguity with the use of AND as a + logical operator. To resolve this, only a limited set of expression + types are allowed as the second argument of a BETWEEN + clause. If you need to write a more complex sub-expression + in BETWEEN, write parentheses around the + sub-expression. + + + + + + IS DISTINCT FROM + + + IS NOT DISTINCT FROM + + Ordinary comparison operators yield null (signifying unknown), + not true or false, when either input is null. For example, + 7 = NULL yields null, as does 7 <> NULL. When + this behavior is not suitable, use the + IS NOT DISTINCT FROM predicates: + +a IS DISTINCT FROM b +a IS NOT DISTINCT FROM b + + For non-null inputs, IS DISTINCT FROM is + the same as the <> operator. However, if both + inputs are null it returns false, and if only one input is + null it returns true. Similarly, IS NOT DISTINCT + FROM is identical to = for non-null + inputs, but it returns true when both inputs are null, and false when only + one input is null. Thus, these predicates effectively act as though null + were a normal data value, rather than unknown. + + + + + IS NULL + + + IS NOT NULL + + + ISNULL + + + NOTNULL + + To check whether a value is or is not null, use the predicates: + +expression IS NULL +expression IS NOT NULL + + or the equivalent, but nonstandard, predicates: + +expression ISNULL +expression NOTNULL + + null valuecomparing + + + + Do not write + expression = NULL + because NULL is not equal to + NULL. (The null value represents an unknown value, + and it is not known whether two unknown values are equal.) + + + + + Some applications might expect that + expression = NULL + returns true if expression evaluates to + the null value. It is highly recommended that these applications + be modified to comply with the SQL standard. However, if that + cannot be done the + configuration variable is available. If it is enabled, + PostgreSQL will convert x = + NULL clauses to x IS NULL. + + + + + If the expression is row-valued, then + IS NULL is true when the row expression itself is null + or when all the row's fields are null, while + IS NOT NULL is true when the row expression itself is non-null + and all the row's fields are non-null. Because of this behavior, + IS NULL and IS NOT NULL do not always return + inverse results for row-valued expressions; in particular, a row-valued + expression that contains both null and non-null fields will return false + for both tests. For example: + + +SELECT ROW(1,2.5,'this is a test') = ROW(1, 3, 'not the same'); + +SELECT ROW(table.*) IS NULL FROM table; -- detect all-null rows + +SELECT ROW(table.*) IS NOT NULL FROM table; -- detect all-non-null rows + +SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in rows + + + In some cases, it may be preferable to + write row IS DISTINCT FROM NULL + or row IS NOT DISTINCT FROM NULL, + which will simply check whether the overall row value is null without any + additional tests on the row fields. + + + + + IS TRUE + + + IS NOT TRUE + + + IS FALSE + + + IS NOT FALSE + + + IS UNKNOWN + + + IS NOT UNKNOWN + + Boolean values can also be tested using the predicates + +boolean_expression IS TRUE +boolean_expression IS NOT TRUE +boolean_expression IS FALSE +boolean_expression IS NOT FALSE +boolean_expression IS UNKNOWN +boolean_expression IS NOT UNKNOWN + + These will always return true or false, never a null value, even when the + operand is null. + A null input is treated as the logical value unknown. + Notice that IS UNKNOWN and IS NOT UNKNOWN are + effectively the same as IS NULL and + IS NOT NULL, respectively, except that the input + expression must be of Boolean type. + + + + Some comparison-related functions are also available, as shown in . + + + + Comparison Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + num_nonnulls + + num_nonnulls ( VARIADIC "any" ) + integer + + + Returns the number of non-null arguments. + + + num_nonnulls(1, NULL, 2) + 2 + + + + + + num_nulls + + num_nulls ( VARIADIC "any" ) + integer + + + Returns the number of null arguments. + + + num_nulls(1, NULL, 2) + 1 + + + + +
+ +
diff --git a/doc/src/sgml/func/func-comparisons.sgml b/doc/src/sgml/func/func-comparisons.sgml new file mode 100644 index 0000000000000..6a6e0bd401920 --- /dev/null +++ b/doc/src/sgml/func/func-comparisons.sgml @@ -0,0 +1,336 @@ + + Row and Array Comparisons + + + IN + + + + NOT IN + + + + ANY + + + + ALL + + + + SOME + + + + composite type + comparison + + + + row-wise comparison + + + + comparison + composite type + + + + comparison + row constructor + + + + IS DISTINCT FROM + + + + IS NOT DISTINCT FROM + + + + This section describes several specialized constructs for making + multiple comparisons between groups of values. These forms are + syntactically related to the subquery forms of the previous section, + but do not involve subqueries. + The forms involving array subexpressions are + PostgreSQL extensions; the rest are + SQL-compliant. + All of the expression forms documented in this section return + Boolean (true/false) results. + + + + <literal>IN</literal> + + +expression IN (value , ...) + + + + The right-hand side is a parenthesized list + of expressions. The result is true if the left-hand expression's + result is equal to any of the right-hand expressions. This is a shorthand + notation for + + +expression = value1 +OR +expression = value2 +OR +... + + + + + Note that if the left-hand expression yields null, or if there are + no equal right-hand values and at least one right-hand expression yields + null, the result of the IN construct will be null, not false. + This is in accordance with SQL's normal rules for Boolean combinations + of null values. + + + + + <literal>NOT IN</literal> + + +expression NOT IN (value , ...) + + + + The right-hand side is a parenthesized list + of expressions. The result is true if the left-hand expression's + result is unequal to all of the right-hand expressions. This is a shorthand + notation for + + +expression <> value1 +AND +expression <> value2 +AND +... + + + + + Note that if the left-hand expression yields null, or if there are + no equal right-hand values and at least one right-hand expression yields + null, the result of the NOT IN construct will be null, not true + as one might naively expect. + This is in accordance with SQL's normal rules for Boolean combinations + of null values. + + + + + x NOT IN y is equivalent to NOT (x IN y) in all + cases. However, null values are much more likely to trip up the novice when + working with NOT IN than when working with IN. + It is best to express your condition positively if possible. + + + + + + <literal>ANY</literal>/<literal>SOME</literal> (array) + + +expression operator ANY (array expression) +expression operator SOME (array expression) + + + + The right-hand side is a parenthesized expression, which must yield an + array value. + The left-hand expression + is evaluated and compared to each element of the array using the + given operator, which must yield a Boolean + result. + The result of ANY is true if any true result is obtained. + The result is false if no true result is found (including the + case where the array has zero elements). + + + + If the array expression yields a null array, the result of + ANY will be null. If the left-hand expression yields null, + the result of ANY is ordinarily null (though a non-strict + comparison operator could possibly yield a different result). + Also, if the right-hand array contains any null elements and no true + comparison result is obtained, the result of ANY + will be null, not false (again, assuming a strict comparison operator). + This is in accordance with SQL's normal rules for Boolean combinations + of null values. + + + + SOME is a synonym for ANY. + + + + + <literal>ALL</literal> (array) + + +expression operator ALL (array expression) + + + + The right-hand side is a parenthesized expression, which must yield an + array value. + The left-hand expression + is evaluated and compared to each element of the array using the + given operator, which must yield a Boolean + result. + The result of ALL is true if all comparisons yield true + (including the case where the array has zero elements). + The result is false if any false result is found. + + + + If the array expression yields a null array, the result of + ALL will be null. If the left-hand expression yields null, + the result of ALL is ordinarily null (though a non-strict + comparison operator could possibly yield a different result). + Also, if the right-hand array contains any null elements and no false + comparison result is obtained, the result of ALL + will be null, not true (again, assuming a strict comparison operator). + This is in accordance with SQL's normal rules for Boolean combinations + of null values. + + + + + Row Constructor Comparison + + +row_constructor operator row_constructor + + + + Each side is a row constructor, + as described in . + The two row constructors must have the same number of fields. + The given operator is applied to each pair + of corresponding fields. (Since the fields could be of different + types, this means that a different specific operator could be selected + for each pair.) + All the selected operators must be members of some B-tree operator + class, or be the negator of an = member of a B-tree + operator class, meaning that row constructor comparison is only + possible when the operator is + =, + <>, + <, + <=, + >, or + >=, + or has semantics similar to one of these. + + + + The = and <> cases work slightly differently + from the others. Two rows are considered + equal if all their corresponding members are non-null and equal; the rows + are unequal if any corresponding members are non-null and unequal; + otherwise the result of the row comparison is unknown (null). + + + + For the <, <=, > and + >= cases, the row elements are compared left-to-right, + stopping as soon as an unequal or null pair of elements is found. + If either of this pair of elements is null, the result of the + row comparison is unknown (null); otherwise comparison of this pair + of elements determines the result. For example, + ROW(1,2,NULL) < ROW(1,3,0) + yields true, not null, because the third pair of elements are not + considered. + + + +row_constructor IS DISTINCT FROM row_constructor + + + + This construct is similar to a <> row comparison, + but it does not yield null for null inputs. Instead, any null value is + considered unequal to (distinct from) any non-null value, and any two + nulls are considered equal (not distinct). Thus the result will + either be true or false, never null. + + + +row_constructor IS NOT DISTINCT FROM row_constructor + + + + This construct is similar to a = row comparison, + but it does not yield null for null inputs. Instead, any null value is + considered unequal to (distinct from) any non-null value, and any two + nulls are considered equal (not distinct). Thus the result will always + be either true or false, never null. + + + + + + Composite Type Comparison + + +record operator record + + + + The SQL specification requires row-wise comparison to return NULL if the + result depends on comparing two NULL values or a NULL and a non-NULL. + PostgreSQL does this only when comparing the + results of two row constructors (as in + ) or comparing a row constructor + to the output of a subquery (as in ). + In other contexts where two composite-type values are compared, two + NULL field values are considered equal, and a NULL is considered larger + than a non-NULL. This is necessary in order to have consistent sorting + and indexing behavior for composite types. + + + + Each side is evaluated and they are compared row-wise. Composite type + comparisons are allowed when the operator is + =, + <>, + <, + <=, + > or + >=, + or has semantics similar to one of these. (To be specific, an operator + can be a row comparison operator if it is a member of a B-tree operator + class, or is the negator of the = member of a B-tree operator + class.) The default behavior of the above operators is the same as for + IS [ NOT ] DISTINCT FROM for row constructors (see + ). + + + + To support matching of rows which include elements without a default + B-tree operator class, the following operators are defined for composite + type comparison: + *=, + *<>, + *<, + *<=, + *>, and + *>=. + These operators compare the internal binary representation of the two + rows. Two rows might have a different binary representation even + though comparisons of the two rows with the equality operator is true. + The ordering of rows under these comparison operators is deterministic + but not otherwise meaningful. These operators are used internally + for materialized views and might be useful for other specialized + purposes such as replication and B-Tree deduplication (see ). They are not intended to be + generally useful for writing queries, though. + + + diff --git a/doc/src/sgml/func/func-conditional.sgml b/doc/src/sgml/func/func-conditional.sgml new file mode 100644 index 0000000000000..7ca53dbf1ab03 --- /dev/null +++ b/doc/src/sgml/func/func-conditional.sgml @@ -0,0 +1,283 @@ + + Conditional Expressions + + + CASE + + + + conditional expression + + + + This section describes the SQL-compliant conditional expressions + available in PostgreSQL. + + + + + If your needs go beyond the capabilities of these conditional + expressions, you might want to consider writing a server-side function + in a more expressive programming language. + + + + + + Although COALESCE, GREATEST, and + LEAST are syntactically similar to functions, they are + not ordinary functions, and thus cannot be used with explicit + VARIADIC array arguments. + + + + + <literal>CASE</literal> + + + The SQL CASE expression is a + generic conditional expression, similar to if/else statements in + other programming languages: + + +CASE WHEN condition THEN result + WHEN ... + ELSE result +END + + + CASE clauses can be used wherever + an expression is valid. Each condition is an + expression that returns a boolean result. If the condition's + result is true, the value of the CASE expression is the + result that follows the condition, and the + remainder of the CASE expression is not processed. If the + condition's result is not true, any subsequent WHEN clauses + are examined in the same manner. If no WHEN + condition yields true, the value of the + CASE expression is the result of the + ELSE clause. If the ELSE clause is + omitted and no condition is true, the result is null. + + + + An example: + +SELECT * FROM test; + + a +--- + 1 + 2 + 3 + + +SELECT a, + CASE WHEN a=1 THEN 'one' + WHEN a=2 THEN 'two' + ELSE 'other' + END + FROM test; + + a | case +---+------- + 1 | one + 2 | two + 3 | other + + + + + The data types of all the result + expressions must be convertible to a single output type. + See for more details. + + + + There is a simple form of CASE expression + that is a variant of the general form above: + + +CASE expression + WHEN value THEN result + WHEN ... + ELSE result +END + + + The first + expression is computed, then compared to + each of the value expressions in the + WHEN clauses until one is found that is equal to it. If + no match is found, the result of the + ELSE clause (or a null value) is returned. This is similar + to the switch statement in C. + + + + The example above can be written using the simple + CASE syntax: + +SELECT a, + CASE a WHEN 1 THEN 'one' + WHEN 2 THEN 'two' + ELSE 'other' + END + FROM test; + + a | case +---+------- + 1 | one + 2 | two + 3 | other + + + + + A CASE expression does not evaluate any subexpressions + that are not needed to determine the result. For example, this is a + possible way of avoiding a division-by-zero failure: + +SELECT ... WHERE CASE WHEN x <> 0 THEN y/x > 1.5 ELSE false END; + + + + + + As described in , there are various + situations in which subexpressions of an expression are evaluated at + different times, so that the principle that CASE + evaluates only necessary subexpressions is not ironclad. For + example a constant 1/0 subexpression will usually result in + a division-by-zero failure at planning time, even if it's within + a CASE arm that would never be entered at run time. + + + + + + <literal>COALESCE</literal> + + + COALESCE + + + + NVL + + + + IFNULL + + + +COALESCE(value , ...) + + + + The COALESCE function returns the first of its + arguments that is not null. Null is returned only if all arguments + are null. It is often used to substitute a default value for + null values when data is retrieved for display, for example: + +SELECT COALESCE(description, short_description, '(none)') ... + + This returns description if it is not null, otherwise + short_description if it is not null, otherwise (none). + + + + The arguments must all be convertible to a common data type, which + will be the type of the result (see + for details). + + + + Like a CASE expression, COALESCE only + evaluates the arguments that are needed to determine the result; + that is, arguments to the right of the first non-null argument are + not evaluated. This SQL-standard function provides capabilities similar + to NVL and IFNULL, which are used in some other + database systems. + + + + + <literal>NULLIF</literal> + + + NULLIF + + + +NULLIF(value1, value2) + + + + The NULLIF function returns a null value if + value1 equals value2; + otherwise it returns value1. + This can be used to perform the inverse operation of the + COALESCE example given above: + +SELECT NULLIF(value, '(none)') ... + + In this example, if value is (none), + null is returned, otherwise the value of value + is returned. + + + + The two arguments must be of comparable types. + To be specific, they are compared exactly as if you had + written value1 + = value2, so there must be a + suitable = operator available. + + + + The result has the same type as the first argument — but there is + a subtlety. What is actually returned is the first argument of the + implied = operator, and in some cases that will have + been promoted to match the second argument's type. For + example, NULLIF(1, 2.2) yields numeric, + because there is no integer = + numeric operator, + only numeric = numeric. + + + + + + <literal>GREATEST</literal> and <literal>LEAST</literal> + + + GREATEST + + + LEAST + + + +GREATEST(value , ...) + + +LEAST(value , ...) + + + + The GREATEST and LEAST functions select the + largest or smallest value from a list of any number of expressions. + The expressions must all be convertible to a common data type, which + will be the type of the result + (see for details). + + + + NULL values in the argument list are ignored. The result will be NULL + only if all the expressions evaluate to NULL. (This is a deviation from + the SQL standard. According to the standard, the return value is NULL if + any argument is NULL. Some other databases behave this way.) + + + diff --git a/doc/src/sgml/func/func-datetime.sgml b/doc/src/sgml/func/func-datetime.sgml new file mode 100644 index 0000000000000..8cd7150b0d313 --- /dev/null +++ b/doc/src/sgml/func/func-datetime.sgml @@ -0,0 +1,2236 @@ + + Date/Time Functions and Operators + + + shows the available + functions for date/time value processing, with details appearing in + the following subsections. illustrates the behaviors of + the basic arithmetic operators (+, + *, etc.). For formatting functions, refer to + . You should be familiar with + the background information on date/time data types from . + + + + In addition, the usual comparison operators shown in + are available for the + date/time types. Dates and timestamps (with or without time zone) are + all comparable, while times (with or without time zone) and intervals + can only be compared to other values of the same data type. When + comparing a timestamp without time zone to a timestamp with time zone, + the former value is assumed to be given in the time zone specified by + the configuration parameter, and is + rotated to UTC for comparison to the latter value (which is already + in UTC internally). Similarly, a date value is assumed to represent + midnight in the TimeZone zone when comparing it + to a timestamp. + + + + All the functions and operators described below that take time or timestamp + inputs actually come in two variants: one that takes time with time zone or timestamp + with time zone, and one that takes time without time zone or timestamp without time zone. + For brevity, these variants are not shown separately. Also, the + + and * operators come in commutative pairs (for + example both date + integer + and integer + date); we show + only one of each such pair. + + + + Date/Time Operators + + + + + + Operator + + + Description + + + Example(s) + + + + + + + + date + integer + date + + + Add a number of days to a date + + + date '2001-09-28' + 7 + 2001-10-05 + + + + + + date + interval + timestamp + + + Add an interval to a date + + + date '2001-09-28' + interval '1 hour' + 2001-09-28 01:00:00 + + + + + + date + time + timestamp + + + Add a time-of-day to a date + + + date '2001-09-28' + time '03:00' + 2001-09-28 03:00:00 + + + + + + interval + interval + interval + + + Add intervals + + + interval '1 day' + interval '1 hour' + 1 day 01:00:00 + + + + + + timestamp + interval + timestamp + + + Add an interval to a timestamp + + + timestamp '2001-09-28 01:00' + interval '23 hours' + 2001-09-29 00:00:00 + + + + + + time + interval + time + + + Add an interval to a time + + + time '01:00' + interval '3 hours' + 04:00:00 + + + + + + - interval + interval + + + Negate an interval + + + - interval '23 hours' + -23:00:00 + + + + + + date - date + integer + + + Subtract dates, producing the number of days elapsed + + + date '2001-10-01' - date '2001-09-28' + 3 + + + + + + date - integer + date + + + Subtract a number of days from a date + + + date '2001-10-01' - 7 + 2001-09-24 + + + + + + date - interval + timestamp + + + Subtract an interval from a date + + + date '2001-09-28' - interval '1 hour' + 2001-09-27 23:00:00 + + + + + + time - time + interval + + + Subtract times + + + time '05:00' - time '03:00' + 02:00:00 + + + + + + time - interval + time + + + Subtract an interval from a time + + + time '05:00' - interval '2 hours' + 03:00:00 + + + + + + timestamp - interval + timestamp + + + Subtract an interval from a timestamp + + + timestamp '2001-09-28 23:00' - interval '23 hours' + 2001-09-28 00:00:00 + + + + + + interval - interval + interval + + + Subtract intervals + + + interval '1 day' - interval '1 hour' + 1 day -01:00:00 + + + + + + timestamp - timestamp + interval + + + Subtract timestamps (converting 24-hour intervals into days, + similarly to justify_hours()) + + + timestamp '2001-09-29 03:00' - timestamp '2001-07-27 12:00' + 63 days 15:00:00 + + + + + + interval * double precision + interval + + + Multiply an interval by a scalar + + + interval '1 second' * 900 + 00:15:00 + + + interval '1 day' * 21 + 21 days + + + interval '1 hour' * 3.5 + 03:30:00 + + + + + + interval / double precision + interval + + + Divide an interval by a scalar + + + interval '1 hour' / 1.5 + 00:40:00 + + + + +
+ + + Date/Time Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + age + + age ( timestamp, timestamp ) + interval + + + Subtract arguments, producing a symbolic result that + uses years and months, rather than just days + + + age(timestamp '2001-04-10', timestamp '1957-06-13') + 43 years 9 mons 27 days + + + + + + age ( timestamp ) + interval + + + Subtract argument from current_date (at midnight) + + + age(timestamp '1957-06-13') + 62 years 6 mons 10 days + + + + + + + clock_timestamp + + clock_timestamp ( ) + timestamp with time zone + + + Current date and time (changes during statement execution); + see + + + clock_timestamp() + 2019-12-23 14:39:53.662522-05 + + + + + + + current_date + + current_date + date + + + Current date; see + + + current_date + 2019-12-23 + + + + + + + current_time + + current_time + time with time zone + + + Current time of day; see + + + current_time + 14:39:53.662522-05 + + + + + + current_time ( integer ) + time with time zone + + + Current time of day, with limited precision; + see + + + current_time(2) + 14:39:53.66-05 + + + + + + + current_timestamp + + current_timestamp + timestamp with time zone + + + Current date and time (start of current transaction); + see + + + current_timestamp + 2019-12-23 14:39:53.662522-05 + + + + + + current_timestamp ( integer ) + timestamp with time zone + + + Current date and time (start of current transaction), with limited precision; + see + + + current_timestamp(0) + 2019-12-23 14:39:53-05 + + + + + + + date_add + + date_add ( timestamp with time zone, interval , text ) + timestamp with time zone + + + Add an interval to a timestamp with time + zone, computing times of day and daylight-savings adjustments + according to the time zone named by the third argument, or the + current setting if that is omitted. + The form with two arguments is equivalent to the timestamp with + time zone + interval operator. + + + date_add('2021-10-31 00:00:00+02'::timestamptz, '1 day'::interval, 'Europe/Warsaw') + 2021-10-31 23:00:00+00 + + + + + + date_bin ( interval, timestamp, timestamp ) + timestamp + + + Bin input into specified interval aligned with specified origin; see + + + date_bin('15 minutes', timestamp '2001-02-16 20:38:40', timestamp '2001-02-16 20:05:00') + 2001-02-16 20:35:00 + + + + + + + date_part + + date_part ( text, timestamp ) + double precision + + + Get timestamp subfield (equivalent to extract); + see + + + date_part('hour', timestamp '2001-02-16 20:38:40') + 20 + + + + + + date_part ( text, interval ) + double precision + + + Get interval subfield (equivalent to extract); + see + + + date_part('month', interval '2 years 3 months') + 3 + + + + + + + date_subtract + + date_subtract ( timestamp with time zone, interval , text ) + timestamp with time zone + + + Subtract an interval from a timestamp with time + zone, computing times of day and daylight-savings adjustments + according to the time zone named by the third argument, or the + current setting if that is omitted. + The form with two arguments is equivalent to the timestamp with + time zone - interval operator. + + + date_subtract('2021-11-01 00:00:00+01'::timestamptz, '1 day'::interval, 'Europe/Warsaw') + 2021-10-30 22:00:00+00 + + + + + + + date_trunc + + date_trunc ( text, timestamp ) + timestamp + + + Truncate to specified precision; see + + + date_trunc('hour', timestamp '2001-02-16 20:38:40') + 2001-02-16 20:00:00 + + + + + + date_trunc ( text, timestamp with time zone, text ) + timestamp with time zone + + + Truncate to specified precision in the specified time zone; see + + + + date_trunc('day', timestamptz '2001-02-16 20:38:40+00', 'Australia/Sydney') + 2001-02-16 13:00:00+00 + + + + + + date_trunc ( text, interval ) + interval + + + Truncate to specified precision; see + + + + date_trunc('hour', interval '2 days 3 hours 40 minutes') + 2 days 03:00:00 + + + + + + + extract + + extract ( field from timestamp ) + numeric + + + Get timestamp subfield; see + + + extract(hour from timestamp '2001-02-16 20:38:40') + 20 + + + + + + extract ( field from interval ) + numeric + + + Get interval subfield; see + + + extract(month from interval '2 years 3 months') + 3 + + + + + + + isfinite + + isfinite ( date ) + boolean + + + Test for finite date (not +/-infinity) + + + isfinite(date '2001-02-16') + true + + + + + + isfinite ( timestamp ) + boolean + + + Test for finite timestamp (not +/-infinity) + + + isfinite(timestamp 'infinity') + false + + + + + + isfinite ( interval ) + boolean + + + Test for finite interval (not +/-infinity) + + + isfinite(interval '4 hours') + true + + + + + + + justify_days + + justify_days ( interval ) + interval + + + Adjust interval, converting 30-day time periods to months + + + justify_days(interval '1 year 65 days') + 1 year 2 mons 5 days + + + + + + + justify_hours + + justify_hours ( interval ) + interval + + + Adjust interval, converting 24-hour time periods to days + + + justify_hours(interval '50 hours 10 minutes') + 2 days 02:10:00 + + + + + + + justify_interval + + justify_interval ( interval ) + interval + + + Adjust interval using justify_days + and justify_hours, with additional sign + adjustments + + + justify_interval(interval '1 mon -1 hour') + 29 days 23:00:00 + + + + + + + localtime + + localtime + time + + + Current time of day; + see + + + localtime + 14:39:53.662522 + + + + + + localtime ( integer ) + time + + + Current time of day, with limited precision; + see + + + localtime(0) + 14:39:53 + + + + + + + localtimestamp + + localtimestamp + timestamp + + + Current date and time (start of current transaction); + see + + + localtimestamp + 2019-12-23 14:39:53.662522 + + + + + + localtimestamp ( integer ) + timestamp + + + Current date and time (start of current + transaction), with limited precision; + see + + + localtimestamp(2) + 2019-12-23 14:39:53.66 + + + + + + + make_date + + make_date ( year int, + month int, + day int ) + date + + + Create date from year, month and day fields + (negative years signify BC) + + + make_date(2013, 7, 15) + 2013-07-15 + + + + + + make_interval + + make_interval ( years int + , months int + , weeks int + , days int + , hours int + , mins int + , secs double precision + ) + interval + + + Create interval from years, months, weeks, days, hours, minutes and + seconds fields, each of which can default to zero + + + make_interval(days => 10) + 10 days + + + + + + + make_time + + make_time ( hour int, + min int, + sec double precision ) + time + + + Create time from hour, minute and seconds fields + + + make_time(8, 15, 23.5) + 08:15:23.5 + + + + + + + make_timestamp + + make_timestamp ( year int, + month int, + day int, + hour int, + min int, + sec double precision ) + timestamp + + + Create timestamp from year, month, day, hour, minute and seconds fields + (negative years signify BC) + + + make_timestamp(2013, 7, 15, 8, 15, 23.5) + 2013-07-15 08:15:23.5 + + + + + + + make_timestamptz + + make_timestamptz ( year int, + month int, + day int, + hour int, + min int, + sec double precision + , timezone text ) + timestamp with time zone + + + Create timestamp with time zone from year, month, day, hour, minute + and seconds fields (negative years signify BC). + If timezone is not + specified, the current time zone is used; the examples assume the + session time zone is Europe/London + + + make_timestamptz(2013, 7, 15, 8, 15, 23.5) + 2013-07-15 08:15:23.5+01 + + + make_timestamptz(2013, 7, 15, 8, 15, 23.5, 'America/New_York') + 2013-07-15 13:15:23.5+01 + + + + + + + now + + now ( ) + timestamp with time zone + + + Current date and time (start of current transaction); + see + + + now() + 2019-12-23 14:39:53.662522-05 + + + + + + + random + + random ( min date, max date ) + date + + + random ( min timestamp, max timestamp ) + timestamp + + + random ( min timestamptz, max timestamptz ) + timestamptz + + + Returns a random value in the range + min <= x <= max. + + + Note that these functions use the same pseudo-random number generator + as the functions listed in , + and respond in the same way to calling + setseed(). + + + random('1979-02-08'::date,'2025-07-03'::date) + 1983-04-21 + + + random('2000-01-01'::timestamptz, now()) + 2015-09-27 09:11:33.732707+00 + + + + + + + statement_timestamp + + statement_timestamp ( ) + timestamp with time zone + + + Current date and time (start of current statement); + see + + + statement_timestamp() + 2019-12-23 14:39:53.662522-05 + + + + + + + timeofday + + timeofday ( ) + text + + + Current date and time + (like clock_timestamp, but as a text string); + see + + + timeofday() + Mon Dec 23 14:39:53.662522 2019 EST + + + + + + + transaction_timestamp + + transaction_timestamp ( ) + timestamp with time zone + + + Current date and time (start of current transaction); + see + + + transaction_timestamp() + 2019-12-23 14:39:53.662522-05 + + + + + + + to_timestamp + + to_timestamp ( double precision ) + timestamp with time zone + + + Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to + timestamp with time zone + + + to_timestamp(1284352323) + 2010-09-13 04:32:03+00 + + + + +
+ + + + OVERLAPS + + In addition to these functions, the SQL OVERLAPS operator is + supported: + +(start1, end1) OVERLAPS (start2, end2) +(start1, length1) OVERLAPS (start2, length2) + + This expression yields true when two time periods (defined by their + endpoints) overlap, false when they do not overlap. The endpoints + can be specified as pairs of dates, times, or time stamps; or as + a date, time, or time stamp followed by an interval. When a pair + of values is provided, either the start or the end can be written + first; OVERLAPS automatically takes the earlier value + of the pair as the start. Each time period is considered to + represent the half-open interval start <= + time < end, unless + start and end are equal in which case it + represents that single time instant. This means for instance that two + time periods with only an endpoint in common do not overlap. + + + +SELECT (DATE '2001-02-16', DATE '2001-12-21') OVERLAPS + (DATE '2001-10-30', DATE '2002-10-30'); +Result: true +SELECT (DATE '2001-02-16', INTERVAL '100 days') OVERLAPS + (DATE '2001-10-30', DATE '2002-10-30'); +Result: false +SELECT (DATE '2001-10-29', DATE '2001-10-30') OVERLAPS + (DATE '2001-10-30', DATE '2001-10-31'); +Result: false +SELECT (DATE '2001-10-30', DATE '2001-10-30') OVERLAPS + (DATE '2001-10-30', DATE '2001-10-31'); +Result: true + + + + When adding an interval value to (or subtracting an + interval value from) a timestamp + or timestamp with time zone value, the months, days, and + microseconds fields of the interval value are handled in turn. + First, a nonzero months field advances or decrements the date of the + timestamp by the indicated number of months, keeping the day of month the + same unless it would be past the end of the new month, in which case the + last day of that month is used. (For example, March 31 plus 1 month + becomes April 30, but March 31 plus 2 months becomes May 31.) + Then the days field advances or decrements the date of the timestamp by + the indicated number of days. In both these steps the local time of day + is kept the same. Finally, if there is a nonzero microseconds field, it + is added or subtracted literally. + When doing arithmetic on a timestamp with time zone value in + a time zone that recognizes DST, this means that adding or subtracting + (say) interval '1 day' does not necessarily have the + same result as adding or subtracting interval '24 + hours'. + For example, with the session time zone set + to America/Denver: + +SELECT timestamp with time zone '2005-04-02 12:00:00-07' + interval '1 day'; +Result: 2005-04-03 12:00:00-06 +SELECT timestamp with time zone '2005-04-02 12:00:00-07' + interval '24 hours'; +Result: 2005-04-03 13:00:00-06 + + This happens because an hour was skipped due to a change in daylight saving + time at 2005-04-03 02:00:00 in time zone + America/Denver. + + + + Note there can be ambiguity in the months field returned by + age because different months have different numbers of + days. PostgreSQL's approach uses the month from the + earlier of the two dates when calculating partial months. For example, + age('2004-06-01', '2004-04-30') uses April to yield + 1 mon 1 day, while using May would yield 1 mon 2 + days because May has 31 days, while April has only 30. + + + + Subtraction of dates and timestamps can also be complex. One conceptually + simple way to perform subtraction is to convert each value to a number + of seconds using EXTRACT(EPOCH FROM ...), then subtract the + results; this produces the + number of seconds between the two values. This will adjust + for the number of days in each month, timezone changes, and daylight + saving time adjustments. Subtraction of date or timestamp + values with the - operator + returns the number of days (24-hours) and hours/minutes/seconds + between the values, making the same adjustments. The age + function returns years, months, days, and hours/minutes/seconds, + performing field-by-field subtraction and then adjusting for negative + field values. The following queries illustrate the differences in these + approaches. The sample results were produced with timezone + = 'US/Eastern'; there is a daylight saving time change between the + two dates used: + + + +SELECT EXTRACT(EPOCH FROM timestamptz '2013-07-01 12:00:00') - + EXTRACT(EPOCH FROM timestamptz '2013-03-01 12:00:00'); +Result: 10537200.000000 +SELECT (EXTRACT(EPOCH FROM timestamptz '2013-07-01 12:00:00') - + EXTRACT(EPOCH FROM timestamptz '2013-03-01 12:00:00')) + / 60 / 60 / 24; +Result: 121.9583333333333333 +SELECT timestamptz '2013-07-01 12:00:00' - timestamptz '2013-03-01 12:00:00'; +Result: 121 days 23:00:00 +SELECT age(timestamptz '2013-07-01 12:00:00', timestamptz '2013-03-01 12:00:00'); +Result: 4 mons + + + + <function>EXTRACT</function>, <function>date_part</function> + + + date_part + + + extract + + + +EXTRACT(field FROM source) + + + + The extract function retrieves subfields + such as year or hour from date/time values. + source must be a value expression of + type timestamp, date, time, + or interval. (Timestamps and times can be with or + without time zone.) + field is an identifier or + string that selects what field to extract from the source value. + Not all fields are valid for every input data type; for example, fields + smaller than a day cannot be extracted from a date, while + fields of a day or more cannot be extracted from a time. + The extract function returns values of type + numeric. + + + + The following are valid field names: + + + + + century + + + The century; for interval values, the year field + divided by 100 + + + +SELECT EXTRACT(CENTURY FROM TIMESTAMP '2000-12-16 12:21:13'); +Result: 20 +SELECT EXTRACT(CENTURY FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 21 +SELECT EXTRACT(CENTURY FROM DATE '0001-01-01 AD'); +Result: 1 +SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); +Result: -1 +SELECT EXTRACT(CENTURY FROM INTERVAL '2001 years'); +Result: 20 + + + + + + day + + + The day of the month (1–31); for interval + values, the number of days + + + +SELECT EXTRACT(DAY FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 16 +SELECT EXTRACT(DAY FROM INTERVAL '40 days 1 minute'); +Result: 40 + + + + + + + decade + + + The year field divided by 10 + + + +SELECT EXTRACT(DECADE FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 200 + + + + + + dow + + + The day of the week as Sunday (0) to + Saturday (6) + + + +SELECT EXTRACT(DOW FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 5 + + + Note that extract's day of the week numbering + differs from that of the to_char(..., + 'D') function. + + + + + + + doy + + + The day of the year (1–365/366) + + + +SELECT EXTRACT(DOY FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 47 + + + + + + epoch + + + For timestamp with time zone values, the + number of seconds since 1970-01-01 00:00:00 UTC (negative for + timestamps before that); + for date and timestamp values, the + nominal number of seconds since 1970-01-01 00:00:00, + without regard to timezone or daylight-savings rules; + for interval values, the total number + of seconds in the interval + + + +SELECT EXTRACT(EPOCH FROM TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40.12-08'); +Result: 982384720.120000 +SELECT EXTRACT(EPOCH FROM TIMESTAMP '2001-02-16 20:38:40.12'); +Result: 982355920.120000 +SELECT EXTRACT(EPOCH FROM INTERVAL '5 days 3 hours'); +Result: 442800.000000 + + + + You can convert an epoch value back to a timestamp with time zone + with to_timestamp: + + +SELECT to_timestamp(982384720.12); +Result: 2001-02-17 04:38:40.12+00 + + + + Beware that applying to_timestamp to an epoch + extracted from a date or timestamp value + could produce a misleading result: the result will effectively + assume that the original value had been given in UTC, which might + not be the case. + + + + + + hour + + + The hour field (0–23 in timestamps, unrestricted in + intervals) + + + +SELECT EXTRACT(HOUR FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 20 + + + + + + isodow + + + The day of the week as Monday (1) to + Sunday (7) + + + +SELECT EXTRACT(ISODOW FROM TIMESTAMP '2001-02-18 20:38:40'); +Result: 7 + + + This is identical to dow except for Sunday. This + matches the ISO 8601 day of the week numbering. + + + + + + + isoyear + + + The ISO 8601 week-numbering year that the date + falls in + + + +SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-01'); +Result: 2005 +SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-02'); +Result: 2006 + + + + Each ISO 8601 week-numbering year begins with the + Monday of the week containing the 4th of January, so in early + January or late December the ISO year may be + different from the Gregorian year. See the week + field for more information. + + + + + + julian + + + The Julian Date corresponding to the + date or timestamp. Timestamps + that are not local midnight result in a fractional value. See + for more information. + + + +SELECT EXTRACT(JULIAN FROM DATE '2006-01-01'); +Result: 2453737 +SELECT EXTRACT(JULIAN FROM TIMESTAMP '2006-01-01 12:00'); +Result: 2453737.50000000000000000000 + + + + + + microseconds + + + The seconds field, including fractional parts, multiplied by 1 + 000 000; note that this includes full seconds + + + +SELECT EXTRACT(MICROSECONDS FROM TIME '17:12:28.5'); +Result: 28500000 + + + + + + millennium + + + The millennium; for interval values, the year field + divided by 1000 + + + +SELECT EXTRACT(MILLENNIUM FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 3 +SELECT EXTRACT(MILLENNIUM FROM INTERVAL '2001 years'); +Result: 2 + + + + Years in the 1900s are in the second millennium. + The third millennium started January 1, 2001. + + + + + + milliseconds + + + The seconds field, including fractional parts, multiplied by + 1000. Note that this includes full seconds. + + + +SELECT EXTRACT(MILLISECONDS FROM TIME '17:12:28.5'); +Result: 28500.000 + + + + + + minute + + + The minutes field (0–59) + + + +SELECT EXTRACT(MINUTE FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 38 + + + + + + month + + + The number of the month within the year (1–12); + for interval values, the number of months modulo 12 + (0–11) + + + +SELECT EXTRACT(MONTH FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 2 +SELECT EXTRACT(MONTH FROM INTERVAL '2 years 3 months'); +Result: 3 +SELECT EXTRACT(MONTH FROM INTERVAL '2 years 13 months'); +Result: 1 + + + + + + quarter + + + The quarter of the year (1–4) that the date is in; + for interval values, the month field divided by 3 + plus 1 + + + +SELECT EXTRACT(QUARTER FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 1 +SELECT EXTRACT(QUARTER FROM INTERVAL '1 year 6 months'); +Result: 3 + + + + + + second + + + The seconds field, including any fractional seconds + + + +SELECT EXTRACT(SECOND FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 40.000000 +SELECT EXTRACT(SECOND FROM TIME '17:12:28.5'); +Result: 28.500000 + + + + + timezone + + + The time zone offset from UTC, measured in seconds. Positive values + correspond to time zones east of UTC, negative values to + zones west of UTC. (Technically, + PostgreSQL does not use UTC because + leap seconds are not handled.) + + + + + + timezone_hour + + + The hour component of the time zone offset + + + + + + timezone_minute + + + The minute component of the time zone offset + + + + + + week + + + The number of the ISO 8601 week-numbering week of + the year. By definition, ISO weeks start on Mondays and the first + week of a year contains January 4 of that year. In other words, the + first Thursday of a year is in week 1 of that year. + + + In the ISO week-numbering system, it is possible for early-January + dates to be part of the 52nd or 53rd week of the previous year, and for + late-December dates to be part of the first week of the next year. + For example, 2005-01-01 is part of the 53rd week of year + 2004, and 2006-01-01 is part of the 52nd week of year + 2005, while 2012-12-31 is part of the first week of 2013. + It's recommended to use the isoyear field together with + week to get consistent results. + + + + For interval values, the week field is simply the number + of integral days divided by 7. + + + +SELECT EXTRACT(WEEK FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 7 +SELECT EXTRACT(WEEK FROM INTERVAL '13 days 24 hours'); +Result: 1 + + + + + + year + + + The year field. Keep in mind there is no 0 AD, so subtracting + BC years from AD years should be done with care. + + + +SELECT EXTRACT(YEAR FROM TIMESTAMP '2001-02-16 20:38:40'); +Result: 2001 + + + + + + + + + When processing an interval value, + the extract function produces field values that + match the interpretation used by the interval output function. This + can produce surprising results if one starts with a non-normalized + interval representation, for example: + +SELECT INTERVAL '80 minutes'; +Result: 01:20:00 +SELECT EXTRACT(MINUTES FROM INTERVAL '80 minutes'); +Result: 20 + + + + + + When the input value is +/-Infinity, extract returns + +/-Infinity for monotonically-increasing fields (epoch, + julian, year, isoyear, + decade, century, and millennium + for timestamp inputs; epoch, hour, + day, year, decade, + century, and millennium for + interval inputs). + For other fields, NULL is returned. PostgreSQL + versions before 9.6 returned zero for all cases of infinite input. + + + + + The extract function is primarily intended + for computational processing. For formatting date/time values for + display, see . + + + + The date_part function is modeled on the traditional + Ingres equivalent to the + SQL-standard function extract: + +date_part('field', source) + + Note that here the field parameter needs to + be a string value, not a name. The valid field names for + date_part are the same as for + extract. + For historical reasons, the date_part function + returns values of type double precision. This can result in + a loss of precision in certain uses. Using extract + is recommended instead. + + + +SELECT date_part('day', TIMESTAMP '2001-02-16 20:38:40'); +Result: 16 +SELECT date_part('hour', INTERVAL '4 hours 3 minutes'); +Result: 4 + + + + + + <function>date_trunc</function> + + + date_trunc + + + + The function date_trunc is conceptually + similar to the trunc function for numbers. + + + + +date_trunc(field, source , time_zone ) + + source is a value expression of type + timestamp, timestamp with time zone, + or interval. + (Values of type date and + time are cast automatically to timestamp or + interval, respectively.) + field selects to which precision to + truncate the input value. The return value is likewise of type + timestamp, timestamp with time zone, + or interval, + and it has all fields that are less significant than the + selected one set to zero (or one, for day and month). + + + + Valid values for field are: + + microseconds + milliseconds + second + minute + hour + day + week + month + quarter + year + decade + century + millennium + + + + + When the input value is of type timestamp with time zone, + the truncation is performed with respect to a particular time zone; + for example, truncation to day produces a value that + is midnight in that zone. By default, truncation is done with respect + to the current setting, but the + optional time_zone argument can be provided + to specify a different time zone. The time zone name can be specified + in any of the ways described in . + + + + A time zone cannot be specified when processing timestamp without + time zone or interval inputs. These are always + taken at face value. + + + + Examples (assuming the local time zone is America/New_York): + +SELECT date_trunc('hour', TIMESTAMP '2001-02-16 20:38:40'); +Result: 2001-02-16 20:00:00 +SELECT date_trunc('year', TIMESTAMP '2001-02-16 20:38:40'); +Result: 2001-01-01 00:00:00 +SELECT date_trunc('day', TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40+00'); +Result: 2001-02-16 00:00:00-05 +SELECT date_trunc('day', TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40+00', 'Australia/Sydney'); +Result: 2001-02-16 08:00:00-05 +SELECT date_trunc('hour', INTERVAL '3 days 02:47:33'); +Result: 3 days 02:00:00 + + + + + + <function>date_bin</function> + + + date_bin + + + + The function date_bin bins the input + timestamp into the specified interval (the stride) + aligned with a specified origin. + + + + +date_bin(stride, source, origin) + + source is a value expression of type + timestamp or timestamp with time zone. (Values + of type date are cast automatically to + timestamp.) stride is a value + expression of type interval. The return value is likewise + of type timestamp or timestamp with time zone, + and it marks the beginning of the bin into which the + source is placed. + + + + Examples: + +SELECT date_bin('15 minutes', TIMESTAMP '2020-02-11 15:44:17', TIMESTAMP '2001-01-01'); +Result: 2020-02-11 15:30:00 +SELECT date_bin('15 minutes', TIMESTAMP '2020-02-11 15:44:17', TIMESTAMP '2001-01-01 00:02:30'); +Result: 2020-02-11 15:32:30 + + + + + In the case of full units (1 minute, 1 hour, etc.), it gives the same result as + the analogous date_trunc call, but the difference is + that date_bin can truncate to an arbitrary interval. + + + + The stride interval must be greater than zero and + cannot contain units of month or larger. + + + + + <literal>AT TIME ZONE</literal> and <literal>AT LOCAL</literal> + + + time zone + conversion + + + + AT TIME ZONE + + + + AT LOCAL + + + + The AT TIME ZONE operator converts time + stamp without time zone to/from + time stamp with time zone, and + time with time zone values to different time + zones. shows its + variants. + + + + <literal>AT TIME ZONE</literal> and <literal>AT LOCAL</literal> Variants + + + + + Operator + + + Description + + + Example(s) + + + + + + + + timestamp without time zone AT TIME ZONE zone + timestamp with time zone + + + Converts given time stamp without time zone to + time stamp with time zone, assuming the given + value is in the named time zone. + + + timestamp '2001-02-16 20:38:40' at time zone 'America/Denver' + 2001-02-17 03:38:40+00 + + + + + + timestamp without time zone AT LOCAL + timestamp with time zone + + + Converts given time stamp without time zone to + time stamp with the session's + TimeZone value as time zone. + + + timestamp '2001-02-16 20:38:40' at local + 2001-02-17 03:38:40+00 + + + + + + timestamp with time zone AT TIME ZONE zone + timestamp without time zone + + + Converts given time stamp with time zone to + time stamp without time zone, as the time would + appear in that zone. + + + timestamp with time zone '2001-02-16 20:38:40-05' at time zone 'America/Denver' + 2001-02-16 18:38:40 + + + + + + timestamp with time zone AT LOCAL + timestamp without time zone + + + Converts given time stamp with time zone to + time stamp without time zone, as the time would + appear with the session's TimeZone value as time zone. + + + timestamp with time zone '2001-02-16 20:38:40-05' at local + 2001-02-16 18:38:40 + + + + + + time with time zone AT TIME ZONE zone + time with time zone + + + Converts given time with time zone to a new time + zone. Since no date is supplied, this uses the currently active UTC + offset for the named destination zone. + + + time with time zone '05:34:17-05' at time zone 'UTC' + 10:34:17+00 + + + + + + time with time zone AT LOCAL + time with time zone + + + Converts given time with time zone to a new time + zone. Since no date is supplied, this uses the currently active UTC + offset for the session's TimeZone value. + + + Assuming the session's TimeZone is set to UTC: + + + time with time zone '05:34:17-05' at local + 10:34:17+00 + + + + +
+ + + In these expressions, the desired time zone zone can be + specified either as a text value (e.g., 'America/Los_Angeles') + or as an interval (e.g., INTERVAL '-08:00'). + In the text case, a time zone name can be specified in any of the ways + described in . + The interval case is only useful for zones that have fixed offsets from + UTC, so it is not very common in practice. + + + + The syntax AT LOCAL may be used as shorthand for + AT TIME ZONE local, where + local is the session's + TimeZone value. + + + + Examples (assuming the current setting + is America/Los_Angeles): + +SELECT TIMESTAMP '2001-02-16 20:38:40' AT TIME ZONE 'America/Denver'; +Result: 2001-02-16 19:38:40-08 +SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT TIME ZONE 'America/Denver'; +Result: 2001-02-16 18:38:40 +SELECT TIMESTAMP '2001-02-16 20:38:40' AT TIME ZONE 'Asia/Tokyo' AT TIME ZONE 'America/Chicago'; +Result: 2001-02-16 05:38:40 +SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT LOCAL; +Result: 2001-02-16 17:38:40 +SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT TIME ZONE '+05'; +Result: 2001-02-16 20:38:40 +SELECT TIME WITH TIME ZONE '20:38:40-05' AT LOCAL; +Result: 17:38:40 + + The first example adds a time zone to a value that lacks it, and + displays the value using the current TimeZone + setting. The second example shifts the time stamp with time zone value + to the specified time zone, and returns the value without a time zone. + This allows storage and display of values different from the current + TimeZone setting. The third example converts + Tokyo time to Chicago time. The fourth example shifts the time stamp + with time zone value to the time zone currently specified by the + TimeZone setting and returns the value without a + time zone. The fifth example demonstrates that the sign in a POSIX-style + time zone specification has the opposite meaning of the sign in an + ISO-8601 datetime literal, as described in + and . + + + + The sixth example is a cautionary tale. Due to the fact that there is no + date associated with the input value, the conversion is made using the + current date of the session. Therefore, this static example may show a wrong + result depending on the time of the year it is viewed because + 'America/Los_Angeles' observes Daylight Savings Time. + + + + The function timezone(zone, + timestamp) is equivalent to the SQL-conforming construct + timestamp AT TIME ZONE + zone. + + + + The function timezone(zone, + time) is equivalent to the SQL-conforming construct + time AT TIME ZONE + zone. + + + + The function timezone(timestamp) + is equivalent to the SQL-conforming construct timestamp + AT LOCAL. + + + + The function timezone(time) + is equivalent to the SQL-conforming construct time + AT LOCAL. + +
+ + + Current Date/Time + + + date + current + + + + time + current + + + + PostgreSQL provides a number of functions + that return values related to the current date and time. These + SQL-standard functions all return values based on the start time of + the current transaction: + +CURRENT_DATE +CURRENT_TIME +CURRENT_TIMESTAMP +CURRENT_TIME(precision) +CURRENT_TIMESTAMP(precision) +LOCALTIME +LOCALTIMESTAMP +LOCALTIME(precision) +LOCALTIMESTAMP(precision) + + + + + CURRENT_TIME and + CURRENT_TIMESTAMP deliver values with time zone; + LOCALTIME and + LOCALTIMESTAMP deliver values without time zone. + + + + CURRENT_TIME, + CURRENT_TIMESTAMP, + LOCALTIME, and + LOCALTIMESTAMP + can optionally take + a precision parameter, which causes the result to be rounded + to that many fractional digits in the seconds field. Without a precision parameter, + the result is given to the full available precision. + + + + Some examples: + +SELECT CURRENT_TIME; +Result: 14:39:53.662522-05 +SELECT CURRENT_DATE; +Result: 2019-12-23 +SELECT CURRENT_TIMESTAMP; +Result: 2019-12-23 14:39:53.662522-05 +SELECT CURRENT_TIMESTAMP(2); +Result: 2019-12-23 14:39:53.66-05 +SELECT LOCALTIMESTAMP; +Result: 2019-12-23 14:39:53.662522 + + + + + Since these functions return + the start time of the current transaction, their values do not + change during the transaction. This is considered a feature: + the intent is to allow a single transaction to have a consistent + notion of the current time, so that multiple + modifications within the same transaction bear the same + time stamp. + + + + + Other database systems might advance these values more + frequently. + + + + + PostgreSQL also provides functions that + return the start time of the current statement, as well as the actual + current time at the instant the function is called. The complete list + of non-SQL-standard time functions is: + +transaction_timestamp() +statement_timestamp() +clock_timestamp() +timeofday() +now() + + + + + transaction_timestamp() is equivalent to + CURRENT_TIMESTAMP, but is named to clearly reflect + what it returns. + statement_timestamp() returns the start time of the current + statement (more specifically, the time of receipt of the latest command + message from the client). + statement_timestamp() and transaction_timestamp() + return the same value during the first statement of a transaction, but might + differ during subsequent statements. + clock_timestamp() returns the actual current time, and + therefore its value changes even within a single SQL statement. + timeofday() is a historical + PostgreSQL function. Like + clock_timestamp(), it returns the actual current time, + but as a formatted text string rather than a timestamp + with time zone value. + now() is a traditional PostgreSQL + equivalent to transaction_timestamp(). + + + + All the date/time data types also accept the special literal value + now to specify the current date and time (again, + interpreted as the transaction start time). Thus, + the following three all return the same result: + +SELECT CURRENT_TIMESTAMP; +SELECT now(); +SELECT TIMESTAMP 'now'; -- but see tip below + + + + + + Do not use the third form when specifying a value to be evaluated later, + for example in a DEFAULT clause for a table column. + The system will convert now + to a timestamp as soon as the constant is parsed, so that when + the default value is needed, + the time of the table creation would be used! The first two + forms will not be evaluated until the default value is used, + because they are function calls. Thus they will give the desired + behavior of defaulting to the time of row insertion. + (See also .) + + + + + + Delaying Execution + + + pg_sleep + + + pg_sleep_for + + + pg_sleep_until + + + sleep + + + delay + + + + The following functions are available to delay execution of the server + process: + +pg_sleep ( double precision ) +pg_sleep_for ( interval ) +pg_sleep_until ( timestamp with time zone ) + + + pg_sleep makes the current session's process + sleep until the given number of seconds have + elapsed. Fractional-second delays can be specified. + pg_sleep_for is a convenience function to + allow the sleep time to be specified as an interval. + pg_sleep_until is a convenience function for when + a specific wake-up time is desired. + For example: + + +SELECT pg_sleep(1.5); +SELECT pg_sleep_for('5 minutes'); +SELECT pg_sleep_until('tomorrow 03:00'); + + + + + + The effective resolution of the sleep interval is platform-specific; + 0.01 seconds is a common value. The sleep delay will be at least as long + as specified. It might be longer depending on factors such as server load. + In particular, pg_sleep_until is not guaranteed to + wake up exactly at the specified time, but it will not wake up any earlier. + + + + + + Make sure that your session does not hold more locks than necessary + when calling pg_sleep or its variants. Otherwise + other sessions might have to wait for your sleeping process, slowing down + the entire system. + + + + +
diff --git a/doc/src/sgml/func/func-enum.sgml b/doc/src/sgml/func/func-enum.sgml new file mode 100644 index 0000000000000..6227afe4057ba --- /dev/null +++ b/doc/src/sgml/func/func-enum.sgml @@ -0,0 +1,121 @@ + + Enum Support Functions + + + For enum types (described in ), + there are several functions that allow cleaner programming without + hard-coding particular values of an enum type. + These are listed in . The examples + assume an enum type created as: + + +CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple'); + + + + + + Enum Support Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + enum_first + + enum_first ( anyenum ) + anyenum + + + Returns the first value of the input enum type. + + + enum_first(null::rainbow) + red + + + + + + enum_last + + enum_last ( anyenum ) + anyenum + + + Returns the last value of the input enum type. + + + enum_last(null::rainbow) + purple + + + + + + enum_range + + enum_range ( anyenum ) + anyarray + + + Returns all values of the input enum type in an ordered array. + + + enum_range(null::rainbow) + {red,orange,yellow,&zwsp;green,blue,purple} + + + + + enum_range ( anyenum, anyenum ) + anyarray + + + Returns the range between the two given enum values, as an ordered + array. The values must be from the same enum type. If the first + parameter is null, the result will start with the first value of + the enum type. + If the second parameter is null, the result will end with the last + value of the enum type. + + + enum_range('orange'::rainbow, 'green'::rainbow) + {orange,yellow,green} + + + enum_range(NULL, 'green'::rainbow) + {red,orange,&zwsp;yellow,green} + + + enum_range('orange'::rainbow, NULL) + {orange,yellow,green,&zwsp;blue,purple} + + + + +
+ + + Notice that except for the two-argument form of enum_range, + these functions disregard the specific value passed to them; they care + only about its declared data type. Either null or a specific value of + the type can be passed, with the same result. It is more common to + apply these functions to a table column or function argument than to + a hardwired type name as used in the examples. + +
diff --git a/doc/src/sgml/func/func-event-triggers.sgml b/doc/src/sgml/func/func-event-triggers.sgml new file mode 100644 index 0000000000000..9f3f51e9f5133 --- /dev/null +++ b/doc/src/sgml/func/func-event-triggers.sgml @@ -0,0 +1,332 @@ + + Event Trigger Functions + + + PostgreSQL provides these helper functions + to retrieve information from event triggers. + + + + For more information about event triggers, + see . + + + + Capturing Changes at Command End + + + pg_event_trigger_ddl_commands + + + +pg_event_trigger_ddl_commands () setof record + + + + pg_event_trigger_ddl_commands returns a list of + DDL commands executed by each user action, + when invoked in a function attached to a + ddl_command_end event trigger. If called in any other + context, an error is raised. + pg_event_trigger_ddl_commands returns one row for each + base command executed; some commands that are a single SQL sentence + may return more than one row. This function returns the following + columns: + + + + + + Name + Type + Description + + + + + + classid + oid + OID of catalog the object belongs in + + + objid + oid + OID of the object itself + + + objsubid + integer + Sub-object ID (e.g., attribute number for a column) + + + command_tag + text + Command tag + + + object_type + text + Type of the object + + + schema_name + text + + Name of the schema the object belongs in, if any; otherwise NULL. + No quoting is applied. + + + + object_identity + text + + Text rendering of the object identity, schema-qualified. Each + identifier included in the identity is quoted if necessary. + + + + in_extension + boolean + True if the command is part of an extension script + + + command + pg_ddl_command + + A complete representation of the command, in internal format. + This cannot be output directly, but it can be passed to other + functions to obtain different pieces of information about the + command. + + + + + + + + + + Processing Objects Dropped by a DDL Command + + + pg_event_trigger_dropped_objects + + + +pg_event_trigger_dropped_objects () setof record + + + + pg_event_trigger_dropped_objects returns a list of all objects + dropped by the command in whose sql_drop event it is called. + If called in any other context, an error is raised. + This function returns the following columns: + + + + + + Name + Type + Description + + + + + + classid + oid + OID of catalog the object belonged in + + + objid + oid + OID of the object itself + + + objsubid + integer + Sub-object ID (e.g., attribute number for a column) + + + original + boolean + True if this was one of the root object(s) of the deletion + + + normal + boolean + + True if there was a normal dependency relationship + in the dependency graph leading to this object + + + + is_temporary + boolean + + True if this was a temporary object + + + + object_type + text + Type of the object + + + schema_name + text + + Name of the schema the object belonged in, if any; otherwise NULL. + No quoting is applied. + + + + object_name + text + + Name of the object, if the combination of schema and name can be + used as a unique identifier for the object; otherwise NULL. + No quoting is applied, and name is never schema-qualified. + + + + object_identity + text + + Text rendering of the object identity, schema-qualified. Each + identifier included in the identity is quoted if necessary. + + + + address_names + text[] + + An array that, together with object_type and + address_args, can be used by + the pg_get_object_address function to + recreate the object address in a remote server containing an + identically named object of the same kind. + + + + address_args + text[] + + Complement for address_names + + + + + + + + + The pg_event_trigger_dropped_objects function can be used + in an event trigger like this: + +CREATE FUNCTION test_event_trigger_for_drops() + RETURNS event_trigger LANGUAGE plpgsql AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + RAISE NOTICE '% dropped object: % %.% %', + tg_tag, + obj.object_type, + obj.schema_name, + obj.object_name, + obj.object_identity; + END LOOP; +END; +$$; +CREATE EVENT TRIGGER test_event_trigger_for_drops + ON sql_drop + EXECUTE FUNCTION test_event_trigger_for_drops(); + + + + + + Handling a Table Rewrite Event + + + The functions shown in + + provide information about a table for which a + table_rewrite event has just been called. + If called in any other context, an error is raised. + + + + Table Rewrite Information Functions + + + + + Function + + + Description + + + + + + + + + pg_event_trigger_table_rewrite_oid + + pg_event_trigger_table_rewrite_oid () + oid + + + Returns the OID of the table about to be rewritten. + + + + + + + pg_event_trigger_table_rewrite_reason + + pg_event_trigger_table_rewrite_reason () + integer + + + Returns a code explaining the reason(s) for rewriting. The value is + a bitmap built from the following values: 1 + (the table has changed its persistence), 2 + (default value of a column has changed), 4 + (a column has a new data type) and 8 + (the table access method has changed). + + + + +
+ + + These functions can be used in an event trigger like this: + +CREATE FUNCTION test_event_trigger_table_rewrite_oid() + RETURNS event_trigger + LANGUAGE plpgsql AS +$$ +BEGIN + RAISE NOTICE 'rewriting table % for reason %', + pg_event_trigger_table_rewrite_oid()::regclass, + pg_event_trigger_table_rewrite_reason(); +END; +$$; + +CREATE EVENT TRIGGER test_table_rewrite_oid + ON table_rewrite + EXECUTE FUNCTION test_event_trigger_table_rewrite_oid(); + + +
+
diff --git a/doc/src/sgml/func/func-formatting.sgml b/doc/src/sgml/func/func-formatting.sgml new file mode 100644 index 0000000000000..df05e5c167691 --- /dev/null +++ b/doc/src/sgml/func/func-formatting.sgml @@ -0,0 +1,1197 @@ + + Data Type Formatting Functions + + + formatting + + + + The PostgreSQL formatting functions + provide a powerful set of tools for converting various data types + (date/time, integer, floating point, numeric) to formatted strings + and for converting from formatted strings to specific data types. + lists them. + These functions all follow a common calling convention: the first + argument is the value to be formatted and the second argument is a + template that defines the output or input format. + + + + Formatting Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + to_char + + to_char ( timestamp, text ) + text + + + to_char ( timestamp with time zone, text ) + text + + + Converts time stamp to string according to the given format. + + + to_char(timestamp '2002-04-20 17:31:12.66', 'HH12:MI:SS') + 05:31:12 + + + + + + to_char ( interval, text ) + text + + + Converts interval to string according to the given format. + + + to_char(interval '15h 2m 12s', 'HH24:MI:SS') + 15:02:12 + + + + + + to_char ( numeric_type, text ) + text + + + Converts number to string according to the given format; available + for integer, bigint, numeric, + real, double precision. + + + to_char(125, '999') + 125 + + + to_char(125.8::real, '999D9') + 125.8 + + + to_char(-125.8, '999D99S') + 125.80- + + + + + + + to_date + + to_date ( text, text ) + date + + + Converts string to date according to the given format. + + + to_date('05 Dec 2000', 'DD Mon YYYY') + 2000-12-05 + + + + + + + to_number + + to_number ( text, text ) + numeric + + + Converts string to numeric according to the given format. + + + to_number('12,454.8-', '99G999D9S') + -12454.8 + + + + + + + to_timestamp + + to_timestamp ( text, text ) + timestamp with time zone + + + Converts string to time stamp according to the given format. + (See also to_timestamp(double precision) in + .) + + + to_timestamp('05 Dec 2000', 'DD Mon YYYY') + 2000-12-05 00:00:00-05 + + + + +
+ + + + to_timestamp and to_date + exist to handle input formats that cannot be converted by + simple casting. For most standard date/time formats, simply casting the + source string to the required data type works, and is much easier. + Similarly, to_number is unnecessary for standard numeric + representations. + + + + + In a to_char output template string, there are certain + patterns that are recognized and replaced with appropriately-formatted + data based on the given value. Any text that is not a template pattern is + simply copied verbatim. Similarly, in an input template string (for the + other functions), template patterns identify the values to be supplied by + the input data string. If there are characters in the template string + that are not template patterns, the corresponding characters in the input + data string are simply skipped over (whether or not they are equal to the + template string characters). + + + + shows the + template patterns available for formatting date and time values. + + + + Template Patterns for Date/Time Formatting + + + + + + Pattern + Description + + + + + HH + hour of day (01–12) + + + HH12 + hour of day (01–12) + + + HH24 + hour of day (00–23) + + + MI + minute (00–59) + + + SS + second (00–59) + + + MS + millisecond (000–999) + + + US + microsecond (000000–999999) + + + FF1 + tenth of second (0–9) + + + FF2 + hundredth of second (00–99) + + + FF3 + millisecond (000–999) + + + FF4 + tenth of a millisecond (0000–9999) + + + FF5 + hundredth of a millisecond (00000–99999) + + + FF6 + microsecond (000000–999999) + + + SSSS, SSSSS + seconds past midnight (0–86399) + + + AM, am, + PM or pm + meridiem indicator (without periods) + + + A.M., a.m., + P.M. or p.m. + meridiem indicator (with periods) + + + Y,YYY + year (4 or more digits) with comma + + + YYYY + year (4 or more digits) + + + YYY + last 3 digits of year + + + YY + last 2 digits of year + + + Y + last digit of year + + + IYYY + ISO 8601 week-numbering year (4 or more digits) + + + IYY + last 3 digits of ISO 8601 week-numbering year + + + IY + last 2 digits of ISO 8601 week-numbering year + + + I + last digit of ISO 8601 week-numbering year + + + BC, bc, + AD or ad + era indicator (without periods) + + + B.C., b.c., + A.D. or a.d. + era indicator (with periods) + + + MONTH + full upper case month name (blank-padded to 9 chars) + + + Month + full capitalized month name (blank-padded to 9 chars) + + + month + full lower case month name (blank-padded to 9 chars) + + + MON + abbreviated upper case month name (3 chars in English, localized lengths vary) + + + Mon + abbreviated capitalized month name (3 chars in English, localized lengths vary) + + + mon + abbreviated lower case month name (3 chars in English, localized lengths vary) + + + MM + month number (01–12) + + + DAY + full upper case day name (blank-padded to 9 chars) + + + Day + full capitalized day name (blank-padded to 9 chars) + + + day + full lower case day name (blank-padded to 9 chars) + + + DY + abbreviated upper case day name (3 chars in English, localized lengths vary) + + + Dy + abbreviated capitalized day name (3 chars in English, localized lengths vary) + + + dy + abbreviated lower case day name (3 chars in English, localized lengths vary) + + + DDD + day of year (001–366) + + + IDDD + day of ISO 8601 week-numbering year (001–371; day 1 of the year is Monday of the first ISO week) + + + DD + day of month (01–31) + + + D + day of the week, Sunday (1) to Saturday (7) + + + ID + ISO 8601 day of the week, Monday (1) to Sunday (7) + + + W + week of month (1–5) (the first week starts on the first day of the month) + + + WW + week number of year (1–53) (the first week starts on the first day of the year) + + + IW + week number of ISO 8601 week-numbering year (01–53; the first Thursday of the year is in week 1) + + + CC + century (2 digits) (the twenty-first century starts on 2001-01-01) + + + J + Julian Date (integer days since November 24, 4714 BC at local + midnight; see ) + + + Q + quarter + + + RM + month in upper case Roman numerals (I–XII; I=January) + + + rm + month in lower case Roman numerals (i–xii; i=January) + + + TZ + upper case time-zone abbreviation + + + tz + lower case time-zone abbreviation + + + TZH + time-zone hours + + + TZM + time-zone minutes + + + OF + time-zone offset from UTC (HH + or HH:MM) + + + +
+ + + Modifiers can be applied to any template pattern to alter its + behavior. For example, FMMonth + is the Month pattern with the + FM modifier. + shows the + modifier patterns for date/time formatting. + + + + Template Pattern Modifiers for Date/Time Formatting + + + + Modifier + Description + Example + + + + + FM prefix + fill mode (suppress leading zeroes and padding blanks) + FMMonth + + + TH suffix + upper case ordinal number suffix + DDTH, e.g., 12TH + + + th suffix + lower case ordinal number suffix + DDth, e.g., 12th + + + FX prefix + fixed format global option (see usage notes) + FX Month DD Day + + + TM prefix + translation mode (use localized day and month names based on + ) + TMMonth + + + SP suffix + spell mode (not implemented) + DDSP + + + +
+ + + Usage notes for date/time formatting: + + + + + FM suppresses leading zeroes and trailing blanks + that would otherwise be added to make the output of a pattern be + fixed-width. In PostgreSQL, + FM modifies only the next specification, while in + Oracle FM affects all subsequent + specifications, and repeated FM modifiers + toggle fill mode on and off. + + + + + + TM suppresses trailing blanks whether or + not FM is specified. + + + + + + to_timestamp and to_date + ignore letter case in the input; so for + example MON, Mon, + and mon all accept the same strings. When using + the TM modifier, case-folding is done according to + the rules of the function's input collation (see + ). + + + + + + to_timestamp and to_date + skip multiple blank spaces at the beginning of the input string and + around date and time values unless the FX option is used. For example, + to_timestamp(' 2000    JUN', 'YYYY MON') and + to_timestamp('2000 - JUN', 'YYYY-MON') work, but + to_timestamp('2000    JUN', 'FXYYYY MON') returns an error + because to_timestamp expects only a single space. + FX must be specified as the first item in + the template. + + + + + + A separator (a space or non-letter/non-digit character) in the template string of + to_timestamp and to_date + matches any single separator in the input string or is skipped, + unless the FX option is used. + For example, to_timestamp('2000JUN', 'YYYY///MON') and + to_timestamp('2000/JUN', 'YYYY MON') work, but + to_timestamp('2000//JUN', 'YYYY/MON') + returns an error because the number of separators in the input string + exceeds the number of separators in the template. + + + If FX is specified, a separator in the template string + matches exactly one character in the input string. But note that the + input string character is not required to be the same as the separator from the template string. + For example, to_timestamp('2000/JUN', 'FXYYYY MON') + works, but to_timestamp('2000/JUN', 'FXYYYY  MON') + returns an error because the second space in the template string consumes + the letter J from the input string. + + + + + + A TZH template pattern can match a signed number. + Without the FX option, minus signs may be ambiguous, + and could be interpreted as a separator. + This ambiguity is resolved as follows: If the number of separators before + TZH in the template string is less than the number of + separators before the minus sign in the input string, the minus sign + is interpreted as part of TZH. + Otherwise, the minus sign is considered to be a separator between values. + For example, to_timestamp('2000 -10', 'YYYY TZH') matches + -10 to TZH, but + to_timestamp('2000 -10', 'YYYY  TZH') + matches 10 to TZH. + + + + + + Ordinary text is allowed in to_char + templates and will be output literally. You can put a substring + in double quotes to force it to be interpreted as literal text + even if it contains template patterns. For example, in + '"Hello Year "YYYY', the YYYY + will be replaced by the year data, but the single Y in Year + will not be. + In to_date, to_number, + and to_timestamp, literal text and double-quoted + strings result in skipping the number of characters contained in the + string; for example "XX" skips two input characters + (whether or not they are XX). + + + + Prior to PostgreSQL 12, it was possible to + skip arbitrary text in the input string using non-letter or non-digit + characters. For example, + to_timestamp('2000y6m1d', 'yyyy-MM-DD') used to + work. Now you can only use letter characters for this purpose. For example, + to_timestamp('2000y6m1d', 'yyyytMMtDDt') and + to_timestamp('2000y6m1d', 'yyyy"y"MM"m"DD"d"') + skip y, m, and + d. + + + + + + + If you want to have a double quote in the output you must + precede it with a backslash, for example '\"YYYY + Month\"'. + Backslashes are not otherwise special outside of double-quoted + strings. Within a double-quoted string, a backslash causes the + next character to be taken literally, whatever it is (but this + has no special effect unless the next character is a double quote + or another backslash). + + + + + + In to_timestamp and to_date, + if the year format specification is less than four digits, e.g., + YYY, and the supplied year is less than four digits, + the year will be adjusted to be nearest to the year 2020, e.g., + 95 becomes 1995. + + + + + + In to_timestamp and to_date, + negative years are treated as signifying BC. If you write both a + negative year and an explicit BC field, you get AD + again. An input of year zero is treated as 1 BC. + + + + + + In to_timestamp and to_date, + the YYYY conversion has a restriction when + processing years with more than 4 digits. You must + use some non-digit character or template after YYYY, + otherwise the year is always interpreted as 4 digits. For example + (with the year 20000): + to_date('200001130', 'YYYYMMDD') will be + interpreted as a 4-digit year; instead use a non-digit + separator after the year, like + to_date('20000-1130', 'YYYY-MMDD') or + to_date('20000Nov30', 'YYYYMonDD'). + + + + + + In to_timestamp and to_date, + the CC (century) field is accepted but ignored + if there is a YYY, YYYY or + Y,YYY field. If CC is used with + YY or Y then the result is + computed as that year in the specified century. If the century is + specified but the year is not, the first year of the century + is assumed. + + + + + + In to_timestamp and to_date, + weekday names or numbers (DAY, D, + and related field types) are accepted but are ignored for purposes of + computing the result. The same is true for quarter + (Q) fields. + + + + + + In to_timestamp and to_date, + an ISO 8601 week-numbering date (as distinct from a Gregorian date) + can be specified in one of two ways: + + + + Year, week number, and weekday: for + example to_date('2006-42-4', 'IYYY-IW-ID') + returns the date 2006-10-19. + If you omit the weekday it is assumed to be 1 (Monday). + + + + + Year and day of year: for example to_date('2006-291', + 'IYYY-IDDD') also returns 2006-10-19. + + + + + + Attempting to enter a date using a mixture of ISO 8601 week-numbering + fields and Gregorian date fields is nonsensical, and will cause an + error. In the context of an ISO 8601 week-numbering year, the + concept of a month or day of month has no + meaning. In the context of a Gregorian year, the ISO week has no + meaning. + + + + While to_date will reject a mixture of + Gregorian and ISO week-numbering date + fields, to_char will not, since output format + specifications like YYYY-MM-DD (IYYY-IDDD) can be + useful. But avoid writing something like IYYY-MM-DD; + that would yield surprising results near the start of the year. + (See for more + information.) + + + + + + + In to_timestamp, millisecond + (MS) or microsecond (US) + fields are used as the + seconds digits after the decimal point. For example + to_timestamp('12.3', 'SS.MS') is not 3 milliseconds, + but 300, because the conversion treats it as 12 + 0.3 seconds. + So, for the format SS.MS, the input values + 12.3, 12.30, + and 12.300 specify the + same number of milliseconds. To get three milliseconds, one must write + 12.003, which the conversion treats as + 12 + 0.003 = 12.003 seconds. + + + + Here is a more + complex example: + to_timestamp('15:12:02.020.001230', 'HH24:MI:SS.MS.US') + is 15 hours, 12 minutes, and 2 seconds + 20 milliseconds + + 1230 microseconds = 2.021230 seconds. + + + + + + to_char(..., 'ID')'s day of the week numbering + matches the extract(isodow from ...) function, but + to_char(..., 'D')'s does not match + extract(dow from ...)'s day numbering. + + + + + + to_char(interval) formats HH and + HH12 as shown on a 12-hour clock, for example zero hours + and 36 hours both output as 12, while HH24 + outputs the full hour value, which can exceed 23 in + an interval value. + + + + + + + + shows the + template patterns available for formatting numeric values. + + + + Template Patterns for Numeric Formatting + + + + + + Pattern + Description + + + + + 9 + digit position (can be dropped if insignificant) + + + 0 + digit position (will not be dropped, even if insignificant) + + + . (period) + decimal point + + + , (comma) + group (thousands) separator + + + PR + negative value in angle brackets + + + S + sign anchored to number (uses locale) + + + L + currency symbol (uses locale) + + + D + decimal point (uses locale) + + + G + group separator (uses locale) + + + MI + minus sign in specified position (if number < 0) + + + PL + plus sign in specified position (if number > 0) + + + SG + plus/minus sign in specified position + + + RN or rn + Roman numeral (values between 1 and 3999) + + + TH or th + ordinal number suffix + + + V + shift specified number of digits (see notes) + + + EEEE + exponent for scientific notation + + + +
+ + + Usage notes for numeric formatting: + + + + + 0 specifies a digit position that will always be printed, + even if it contains a leading/trailing zero. 9 also + specifies a digit position, but if it is a leading zero then it will + be replaced by a space, while if it is a trailing zero and fill mode + is specified then it will be deleted. (For to_number(), + these two pattern characters are equivalent.) + + + + + + If the format provides fewer fractional digits than the number being + formatted, to_char() will round the number to + the specified number of fractional digits. + + + + + + The pattern characters S, L, D, + and G represent the sign, currency symbol, decimal point, + and thousands separator characters defined by the current locale + (see + and ). The pattern characters period + and comma represent those exact characters, with the meanings of + decimal point and thousands separator, regardless of locale. + + + + + + If no explicit provision is made for a sign + in to_char()'s pattern, one column will be reserved for + the sign, and it will be anchored to (appear just left of) the + number. If S appears just left of some 9's, + it will likewise be anchored to the number. + + + + + + A sign formatted using SG, PL, or + MI is not anchored to + the number; for example, + to_char(-12, 'MI9999') produces '-  12' + but to_char(-12, 'S9999') produces '  -12'. + (The Oracle implementation does not allow the use of + MI before 9, but rather + requires that 9 precede + MI.) + + + + + + TH does not convert values less than zero + and does not convert fractional numbers. + + + + + + PL, SG, and + TH are PostgreSQL + extensions. + + + + + + In to_number, if non-data template patterns such + as L or TH are used, the + corresponding number of input characters are skipped, whether or not + they match the template pattern, unless they are data characters + (that is, digits, sign, decimal point, or comma). For + example, TH would skip two non-data characters. + + + + + + V with to_char + multiplies the input values by + 10^n, where + n is the number of digits following + V. V with + to_number divides in a similar manner. + The V can be thought of as marking the position + of an implicit decimal point in the input or output string. + to_char and to_number + do not support the use of + V combined with a decimal point + (e.g., 99.9V99 is not allowed). + + + + + + EEEE (scientific notation) cannot be used in + combination with any of the other formatting patterns or + modifiers other than digit and decimal point patterns, and must be at the end of the format string + (e.g., 9.99EEEE is a valid pattern). + + + + + + In to_number(), the RN + pattern converts Roman numerals (in standard form) to numbers. + Input is case-insensitive, so RN + and rn are equivalent. RN + cannot be used in combination with any other formatting patterns or + modifiers except FM, which is applicable only + in to_char() and is ignored + in to_number(). + + + + + + + Certain modifiers can be applied to any template pattern to alter its + behavior. For example, FM99.99 + is the 99.99 pattern with the + FM modifier. + shows the + modifier patterns for numeric formatting. + + + + Template Pattern Modifiers for Numeric Formatting + + + + Modifier + Description + Example + + + + + FM prefix + fill mode (suppress trailing zeroes and padding blanks) + FM99.99 + + + TH suffix + upper case ordinal number suffix + 999TH + + + th suffix + lower case ordinal number suffix + 999th + + + +
+ + + shows some + examples of the use of the to_char function. + + + + <function>to_char</function> Examples + + + + Expression + Result + + + + + to_char(current_timestamp, 'Day, DD  HH12:MI:SS') + 'Tuesday  , 06  05:39:18' + + + to_char(current_timestamp, 'FMDay, FMDD  HH12:MI:SS') + 'Tuesday, 6  05:39:18' + + + to_char(current_timestamp AT TIME ZONE + 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS"Z"') + '2022-12-06T05:39:18Z', + ISO 8601 extended format + + + to_char(-0.1, '99.99') + '  -.10' + + + to_char(-0.1, 'FM9.99') + '-.1' + + + to_char(-0.1, 'FM90.99') + '-0.1' + + + to_char(0.1, '0.9') + ' 0.1' + + + to_char(12, '9990999.9') + '    0012.0' + + + to_char(12, 'FM9990999.9') + '0012.' + + + to_char(485, '999') + ' 485' + + + to_char(-485, '999') + '-485' + + + to_char(485, '9 9 9') + ' 4 8 5' + + + to_char(1485, '9,999') + ' 1,485' + + + to_char(1485, '9G999') + ' 1 485' + + + to_char(148.5, '999.999') + ' 148.500' + + + to_char(148.5, 'FM999.999') + '148.5' + + + to_char(148.5, 'FM999.990') + '148.500' + + + to_char(148.5, '999D999') + ' 148,500' + + + to_char(3148.5, '9G999D999') + ' 3 148,500' + + + to_char(-485, '999S') + '485-' + + + to_char(-485, '999MI') + '485-' + + + to_char(485, '999MI') + '485 ' + + + to_char(485, 'FM999MI') + '485' + + + to_char(485, 'PL999') + '+485' + + + to_char(485, 'SG999') + '+485' + + + to_char(-485, 'SG999') + '-485' + + + to_char(-485, '9SG99') + '4-85' + + + to_char(-485, '999PR') + '<485>' + + + to_char(485, 'L999') + 'DM 485' + + + to_char(485, 'RN') + '        CDLXXXV' + + + to_char(485, 'FMRN') + 'CDLXXXV' + + + to_char(5.2, 'FMRN') + 'V' + + + to_char(482, '999th') + ' 482nd' + + + to_char(485, '"Good number:"999') + 'Good number: 485' + + + to_char(485.8, '"Pre:"999" Post:" .999') + 'Pre: 485 Post: .800' + + + to_char(12, '99V999') + ' 12000' + + + to_char(12.4, '99V999') + ' 12400' + + + to_char(12.45, '99V9') + ' 125' + + + to_char(0.0004859, '9.99EEEE') + ' 4.86e-04' + + + +
+ +
diff --git a/doc/src/sgml/func/func-geometry.sgml b/doc/src/sgml/func/func-geometry.sgml new file mode 100644 index 0000000000000..ba203af3bd289 --- /dev/null +++ b/doc/src/sgml/func/func-geometry.sgml @@ -0,0 +1,1261 @@ + + Geometric Functions and Operators + + + The geometric types point, box, + lseg, line, path, + polygon, and circle have a large set of + native support functions and operators, shown in , , and . + + + + Geometric Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + geometric_type + point + geometric_type + + + Adds the coordinates of the second point to those of each + point of the first argument, thus performing translation. + Available for point, box, path, + circle. + + + box '(1,1),(0,0)' + point '(2,0)' + (3,1),(2,0) + + + + + + path + path + path + + + Concatenates two open paths (returns NULL if either path is closed). + + + path '[(0,0),(1,1)]' + path '[(2,2),(3,3),(4,4)]' + [(0,0),(1,1),(2,2),(3,3),(4,4)] + + + + + + geometric_type - point + geometric_type + + + Subtracts the coordinates of the second point from those + of each point of the first argument, thus performing translation. + Available for point, box, path, + circle. + + + box '(1,1),(0,0)' - point '(2,0)' + (-1,1),(-2,0) + + + + + + geometric_type * point + geometric_type + + + Multiplies each point of the first argument by the second + point (treating a point as being a complex number + represented by real and imaginary parts, and performing standard + complex multiplication). If one interprets + the second point as a vector, this is equivalent to + scaling the object's size and distance from the origin by the length + of the vector, and rotating it counterclockwise around the origin by + the vector's angle from the x axis. + Available for point, box,Rotating a + box with these operators only moves its corner points: the box is + still considered to have sides parallel to the axes. Hence the box's + size is not preserved, as a true rotation would do. + path, circle. + + + path '((0,0),(1,0),(1,1))' * point '(3.0,0)' + ((0,0),(3,0),(3,3)) + + + path '((0,0),(1,0),(1,1))' * point(cosd(45), sind(45)) + ((0,0),&zwsp;(0.7071067811865475,0.7071067811865475),&zwsp;(0,1.414213562373095)) + + + + + + geometric_type / point + geometric_type + + + Divides each point of the first argument by the second + point (treating a point as being a complex number + represented by real and imaginary parts, and performing standard + complex division). If one interprets + the second point as a vector, this is equivalent to + scaling the object's size and distance from the origin down by the + length of the vector, and rotating it clockwise around the origin by + the vector's angle from the x axis. + Available for point, box, path, + circle. + + + path '((0,0),(1,0),(1,1))' / point '(2.0,0)' + ((0,0),(0.5,0),(0.5,0.5)) + + + path '((0,0),(1,0),(1,1))' / point(cosd(45), sind(45)) + ((0,0),&zwsp;(0.7071067811865476,-0.7071067811865476),&zwsp;(1.4142135623730951,0)) + + + + + + @-@ geometric_type + double precision + + + Computes the total length. + Available for lseg, path. + + + @-@ path '[(0,0),(1,0),(1,1)]' + 2 + + + + + + @@ geometric_type + point + + + Computes the center point. + Available for box, lseg, + polygon, circle. + + + @@ box '(2,2),(0,0)' + (1,1) + + + + + + # geometric_type + integer + + + Returns the number of points. + Available for path, polygon. + + + # path '((1,0),(0,1),(-1,0))' + 3 + + + + + + geometric_type # geometric_type + point + + + Computes the point of intersection, or NULL if there is none. + Available for lseg, line. + + + lseg '[(0,0),(1,1)]' # lseg '[(1,0),(0,1)]' + (0.5,0.5) + + + + + + box # box + box + + + Computes the intersection of two boxes, or NULL if there is none. + + + box '(2,2),(-1,-1)' # box '(1,1),(-2,-2)' + (1,1),(-1,-1) + + + + + + geometric_type ## geometric_type + point + + + Computes the closest point to the first object on the second object. + Available for these pairs of types: + (point, box), + (point, lseg), + (point, line), + (lseg, box), + (lseg, lseg), + (line, lseg). + + + point '(0,0)' ## lseg '[(2,0),(0,2)]' + (1,1) + + + + + + geometric_type <-> geometric_type + double precision + + + Computes the distance between the objects. + Available for all seven geometric types, for all combinations + of point with another geometric type, and for + these additional pairs of types: + (box, lseg), + (lseg, line), + (polygon, circle) + (and the commutator cases). + + + circle '<(0,0),1>' <-> circle '<(5,0),1>' + 3 + + + + + + geometric_type @> geometric_type + boolean + + + Does first object contain second? + Available for these pairs of types: + (box, point), + (box, box), + (path, point), + (polygon, point), + (polygon, polygon), + (circle, point), + (circle, circle). + + + circle '<(0,0),2>' @> point '(1,1)' + t + + + + + + geometric_type <@ geometric_type + boolean + + + Is first object contained in or on second? + Available for these pairs of types: + (point, box), + (point, lseg), + (point, line), + (point, path), + (point, polygon), + (point, circle), + (box, box), + (lseg, box), + (lseg, line), + (polygon, polygon), + (circle, circle). + + + point '(1,1)' <@ circle '<(0,0),2>' + t + + + + + + geometric_type && geometric_type + boolean + + + Do these objects overlap? (One point in common makes this true.) + Available for box, polygon, + circle. + + + box '(1,1),(0,0)' && box '(2,2),(0,0)' + t + + + + + + geometric_type << geometric_type + boolean + + + Is first object strictly left of second? + Available for point, box, + polygon, circle. + + + circle '<(0,0),1>' << circle '<(5,0),1>' + t + + + + + + geometric_type >> geometric_type + boolean + + + Is first object strictly right of second? + Available for point, box, + polygon, circle. + + + circle '<(5,0),1>' >> circle '<(0,0),1>' + t + + + + + + geometric_type &< geometric_type + boolean + + + Does first object not extend to the right of second? + Available for box, polygon, + circle. + + + box '(1,1),(0,0)' &< box '(2,2),(0,0)' + t + + + + + + geometric_type &> geometric_type + boolean + + + Does first object not extend to the left of second? + Available for box, polygon, + circle. + + + box '(3,3),(0,0)' &> box '(2,2),(0,0)' + t + + + + + + geometric_type <<| geometric_type + boolean + + + Is first object strictly below second? + Available for point, box, polygon, + circle. + + + box '(3,3),(0,0)' <<| box '(5,5),(3,4)' + t + + + + + + geometric_type |>> geometric_type + boolean + + + Is first object strictly above second? + Available for point, box, polygon, + circle. + + + box '(5,5),(3,4)' |>> box '(3,3),(0,0)' + t + + + + + + geometric_type &<| geometric_type + boolean + + + Does first object not extend above second? + Available for box, polygon, + circle. + + + box '(1,1),(0,0)' &<| box '(2,2),(0,0)' + t + + + + + + geometric_type |&> geometric_type + boolean + + + Does first object not extend below second? + Available for box, polygon, + circle. + + + box '(3,3),(0,0)' |&> box '(2,2),(0,0)' + t + + + + + + box <^ box + boolean + + + Is first object below second (allows edges to touch)? + + + box '((1,1),(0,0))' <^ box '((2,2),(1,1))' + t + + + + + + box >^ box + boolean + + + Is first object above second (allows edges to touch)? + + + box '((2,2),(1,1))' >^ box '((1,1),(0,0))' + t + + + + + + geometric_type ?# geometric_type + boolean + + + Do these objects intersect? + Available for these pairs of types: + (box, box), + (lseg, box), + (lseg, lseg), + (lseg, line), + (line, box), + (line, line), + (path, path). + + + lseg '[(-1,0),(1,0)]' ?# box '(2,2),(-2,-2)' + t + + + + + + ?- line + boolean + + + ?- lseg + boolean + + + Is line horizontal? + + + ?- lseg '[(-1,0),(1,0)]' + t + + + + + + point ?- point + boolean + + + Are points horizontally aligned (that is, have same y coordinate)? + + + point '(1,0)' ?- point '(0,0)' + t + + + + + + ?| line + boolean + + + ?| lseg + boolean + + + Is line vertical? + + + ?| lseg '[(-1,0),(1,0)]' + f + + + + + + point ?| point + boolean + + + Are points vertically aligned (that is, have same x coordinate)? + + + point '(0,1)' ?| point '(0,0)' + t + + + + + + line ?-| line + boolean + + + lseg ?-| lseg + boolean + + + Are lines perpendicular? + + + lseg '[(0,0),(0,1)]' ?-| lseg '[(0,0),(1,0)]' + t + + + + + + line ?|| line + boolean + + + lseg ?|| lseg + boolean + + + Are lines parallel? + + + lseg '[(-1,0),(1,0)]' ?|| lseg '[(-1,2),(1,2)]' + t + + + + + + geometric_type ~= geometric_type + boolean + + + Are these objects the same? + Available for point, box, + polygon, circle. + + + polygon '((0,0),(1,1))' ~= polygon '((1,1),(0,0))' + t + + + + +
+ + + + Note that the same as operator, ~=, + represents the usual notion of equality for the point, + box, polygon, and circle types. + Some of the geometric types also have an = operator, but + = compares for equal areas only. + The other scalar comparison operators (<= and so + on), where available for these types, likewise compare areas. + + + + + + Before PostgreSQL 14, the point + is strictly below/above comparison operators point + <<| point and point + |>> point were respectively + called <^ and >^. These + names are still available, but are deprecated and will eventually be + removed. + + + + + Geometric Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + area + + area ( geometric_type ) + double precision + + + Computes area. + Available for box, path, circle. + A path input must be closed, else NULL is returned. + Also, if the path is self-intersecting, the result may be + meaningless. + + + area(box '(2,2),(0,0)') + 4 + + + + + + + center + + center ( geometric_type ) + point + + + Computes center point. + Available for box, circle. + + + center(box '(1,2),(0,0)') + (0.5,1) + + + + + + + diagonal + + diagonal ( box ) + lseg + + + Extracts box's diagonal as a line segment + (same as lseg(box)). + + + diagonal(box '(1,2),(0,0)') + [(1,2),(0,0)] + + + + + + + diameter + + diameter ( circle ) + double precision + + + Computes diameter of circle. + + + diameter(circle '<(0,0),2>') + 4 + + + + + + + height + + height ( box ) + double precision + + + Computes vertical size of box. + + + height(box '(1,2),(0,0)') + 2 + + + + + + + isclosed + + isclosed ( path ) + boolean + + + Is path closed? + + + isclosed(path '((0,0),(1,1),(2,0))') + t + + + + + + + isopen + + isopen ( path ) + boolean + + + Is path open? + + + isopen(path '[(0,0),(1,1),(2,0)]') + t + + + + + + + length + + length ( geometric_type ) + double precision + + + Computes the total length. + Available for lseg, path. + + + length(path '((-1,0),(1,0))') + 4 + + + + + + + npoints + + npoints ( geometric_type ) + integer + + + Returns the number of points. + Available for path, polygon. + + + npoints(path '[(0,0),(1,1),(2,0)]') + 3 + + + + + + + pclose + + pclose ( path ) + path + + + Converts path to closed form. + + + pclose(path '[(0,0),(1,1),(2,0)]') + ((0,0),(1,1),(2,0)) + + + + + + + popen + + popen ( path ) + path + + + Converts path to open form. + + + popen(path '((0,0),(1,1),(2,0))') + [(0,0),(1,1),(2,0)] + + + + + + + radius + + radius ( circle ) + double precision + + + Computes radius of circle. + + + radius(circle '<(0,0),2>') + 2 + + + + + + + slope + + slope ( point, point ) + double precision + + + Computes slope of a line drawn through the two points. + + + slope(point '(0,0)', point '(2,1)') + 0.5 + + + + + + + width + + width ( box ) + double precision + + + Computes horizontal size of box. + + + width(box '(1,2),(0,0)') + 1 + + + + +
+ + + Geometric Type Conversion Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + box + + box ( circle ) + box + + + Computes box inscribed within the circle. + + + box(circle '<(0,0),2>') + (1.414213562373095,1.414213562373095),&zwsp;(-1.414213562373095,-1.414213562373095) + + + + + + box ( point ) + box + + + Converts point to empty box. + + + box(point '(1,0)') + (1,0),(1,0) + + + + + + box ( point, point ) + box + + + Converts any two corner points to box. + + + box(point '(0,1)', point '(1,0)') + (1,1),(0,0) + + + + + + box ( polygon ) + box + + + Computes bounding box of polygon. + + + box(polygon '((0,0),(1,1),(2,0))') + (2,1),(0,0) + + + + + + + bound_box + + bound_box ( box, box ) + box + + + Computes bounding box of two boxes. + + + bound_box(box '(1,1),(0,0)', box '(4,4),(3,3)') + (4,4),(0,0) + + + + + + + circle + + circle ( box ) + circle + + + Computes smallest circle enclosing box. + + + circle(box '(1,1),(0,0)') + <(0.5,0.5),0.7071067811865476> + + + + + + circle ( point, double precision ) + circle + + + Constructs circle from center and radius. + + + circle(point '(0,0)', 2.0) + <(0,0),2> + + + + + + circle ( polygon ) + circle + + + Converts polygon to circle. The circle's center is the mean of the + positions of the polygon's points, and the radius is the average + distance of the polygon's points from that center. + + + circle(polygon '((0,0),(1,3),(2,0))') + <(1,1),1.6094757082487299> + + + + + + + line + + line ( point, point ) + line + + + Converts two points to the line through them. + + + line(point '(-1,0)', point '(1,0)') + {0,-1,0} + + + + + + + lseg + + lseg ( box ) + lseg + + + Extracts box's diagonal as a line segment. + + + lseg(box '(1,0),(-1,0)') + [(1,0),(-1,0)] + + + + + + lseg ( point, point ) + lseg + + + Constructs line segment from two endpoints. + + + lseg(point '(-1,0)', point '(1,0)') + [(-1,0),(1,0)] + + + + + + + path + + path ( polygon ) + path + + + Converts polygon to a closed path with the same list of points. + + + path(polygon '((0,0),(1,1),(2,0))') + ((0,0),(1,1),(2,0)) + + + + + + + point + + point ( double precision, double precision ) + point + + + Constructs point from its coordinates. + + + point(23.4, -44.5) + (23.4,-44.5) + + + + + + point ( box ) + point + + + Computes center of box. + + + point(box '(1,0),(-1,0)') + (0,0) + + + + + + point ( circle ) + point + + + Computes center of circle. + + + point(circle '<(0,0),2>') + (0,0) + + + + + + point ( lseg ) + point + + + Computes center of line segment. + + + point(lseg '[(-1,0),(1,0)]') + (0,0) + + + + + + point ( polygon ) + point + + + Computes center of polygon (the mean of the + positions of the polygon's points). + + + point(polygon '((0,0),(1,1),(2,0))') + (1,0.3333333333333333) + + + + + + + polygon + + polygon ( box ) + polygon + + + Converts box to a 4-point polygon. + + + polygon(box '(1,1),(0,0)') + ((0,0),(0,1),(1,1),(1,0)) + + + + + + polygon ( circle ) + polygon + + + Converts circle to a 12-point polygon. + + + polygon(circle '<(0,0),2>') + ((-2,0),&zwsp;(-1.7320508075688774,0.9999999999999999),&zwsp;(-1.0000000000000002,1.7320508075688772),&zwsp;(-1.2246063538223773e-16,2),&zwsp;(0.9999999999999996,1.7320508075688774),&zwsp;(1.732050807568877,1.0000000000000007),&zwsp;(2,2.4492127076447545e-16),&zwsp;(1.7320508075688776,-0.9999999999999994),&zwsp;(1.0000000000000009,-1.7320508075688767),&zwsp;(3.673819061467132e-16,-2),&zwsp;(-0.9999999999999987,-1.732050807568878),&zwsp;(-1.7320508075688767,-1.0000000000000009)) + + + + + + polygon ( integer, circle ) + polygon + + + Converts circle to an n-point polygon. + + + polygon(4, circle '<(3,0),1>') + ((2,0),&zwsp;(3,1),&zwsp;(4,1.2246063538223773e-16),&zwsp;(3,-1)) + + + + + + polygon ( path ) + polygon + + + Converts closed path to a polygon with the same list of points. + + + polygon(path '((0,0),(1,1),(2,0))') + ((0,0),(1,1),(2,0)) + + + + + +
+ + + It is possible to access the two component numbers of a point + as though the point were an array with indexes 0 and 1. For example, if + t.p is a point column then + SELECT p[0] FROM t retrieves the X coordinate and + UPDATE t SET p[1] = ... changes the Y coordinate. + In the same way, a value of type box or lseg can be treated + as an array of two point values. + + +
diff --git a/doc/src/sgml/func/func-info.sgml b/doc/src/sgml/func/func-info.sgml new file mode 100644 index 0000000000000..c393832d94c64 --- /dev/null +++ b/doc/src/sgml/func/func-info.sgml @@ -0,0 +1,3800 @@ + + System Information Functions and Operators + + + The functions described in this section are used to obtain various + information about a PostgreSQL installation. + + + + Session Information Functions + + + shows several + functions that extract session and system information. + + + + In addition to the functions listed in this section, there are a number of + functions related to the statistics system that also provide system + information. See for more + information. + + + + Session Information Functions + + + + + Function + + + Description + + + + + + + + + current_catalog + + current_catalog + name + + + + current_database + + current_database () + name + + + Returns the name of the current database. (Databases are + called catalogs in the SQL standard, + so current_catalog is the standard's + spelling.) + + + + + + + current_query + + current_query () + text + + + Returns the text of the currently executing query, as submitted + by the client (which might contain more than one statement). + + + + + + + current_role + + current_role + name + + + This is equivalent to current_user. + + + + + + + current_schema + + + schema + current + + current_schema + name + + + current_schema () + name + + + Returns the name of the schema that is first in the search path (or a + null value if the search path is empty). This is the schema that will + be used for any tables or other named objects that are created without + specifying a target schema. + + + + + + + current_schemas + + + search path + current + + current_schemas ( include_implicit boolean ) + name[] + + + Returns an array of the names of all schemas presently in the + effective search path, in their priority order. (Items in the current + setting that do not correspond to + existing, searchable schemas are omitted.) If the Boolean argument + is true, then implicitly-searched system schemas + such as pg_catalog are included in the result. + + + + + + + current_user + + + user + current + + current_user + name + + + Returns the user name of the current execution context. + + + + + + + inet_client_addr + + inet_client_addr () + inet + + + Returns the IP address of the current client, + or NULL if the current connection is via a + Unix-domain socket. + + + + + + + inet_client_port + + inet_client_port () + integer + + + Returns the IP port number of the current client, + or NULL if the current connection is via a + Unix-domain socket. + + + + + + + inet_server_addr + + inet_server_addr () + inet + + + Returns the IP address on which the server accepted the current + connection, + or NULL if the current connection is via a + Unix-domain socket. + + + + + + + inet_server_port + + inet_server_port () + integer + + + Returns the IP port number on which the server accepted the current + connection, + or NULL if the current connection is via a + Unix-domain socket. + + + + + + + pg_backend_pid + + pg_backend_pid () + integer + + + Returns the process ID of the server process attached to the current + session. + + + + + + + pg_blocking_pids + + pg_blocking_pids ( integer ) + integer[] + + + Returns an array of the process ID(s) of the sessions that are + blocking the server process with the specified process ID from + acquiring a lock, or an empty array if there is no such server process + or it is not blocked. + + + One server process blocks another if it either holds a lock that + conflicts with the blocked process's lock request (hard block), or is + waiting for a lock that would conflict with the blocked process's lock + request and is ahead of it in the wait queue (soft block). When using + parallel queries the result always lists client-visible process IDs + (that is, pg_backend_pid results) even if the + actual lock is held or awaited by a child worker process. As a result + of that, there may be duplicated PIDs in the result. Also note that + when a prepared transaction holds a conflicting lock, it will be + represented by a zero process ID. + + + Frequent calls to this function could have some impact on database + performance, because it needs exclusive access to the lock manager's + shared state for a short time. + + + + + + + pg_conf_load_time + + pg_conf_load_time () + timestamp with time zone + + + Returns the time when the server configuration files were last loaded. + If the current session was alive at the time, this will be the time + when the session itself re-read the configuration files (so the + reading will vary a little in different sessions). Otherwise it is + the time when the postmaster process re-read the configuration files. + + + + + + + pg_current_logfile + + + Logging + pg_current_logfile function + + + current_logfiles + and the pg_current_logfile function + + + Logging + current_logfiles file and the pg_current_logfile + function + + pg_current_logfile ( text ) + text + + + Returns the path name of the log file currently in use by the logging + collector. The path includes the + directory and the individual log file name. The result + is NULL if the logging collector is disabled. + When multiple log files exist, each in a different + format, pg_current_logfile without an argument + returns the path of the file having the first format found in the + ordered list: stderr, + csvlog, jsonlog. + NULL is returned if no log file has any of these + formats. + To request information about a specific log file format, supply + either csvlog, jsonlog or + stderr as the + value of the optional parameter. The result is NULL + if the log format requested is not configured in + . + The result reflects the contents of + the current_logfiles file. + + + This function is restricted to superusers and roles with privileges of + the pg_monitor role by default, but other users can + be granted EXECUTE to run the function. + + + + + + + pg_get_loaded_modules + + pg_get_loaded_modules () + setof record + ( module_name text, + version text, + file_name text ) + + + Returns a list of the loadable modules that are loaded into the + current server session. The module_name + and version fields are NULL unless the + module author supplied values for them using + the PG_MODULE_MAGIC_EXT macro. + The file_name field gives the file + name of the module (shared library). + + + + + + + pg_my_temp_schema + + pg_my_temp_schema () + oid + + + Returns the OID of the current session's temporary schema, or zero if + it has none (because it has not created any temporary tables). + + + + + + + pg_is_other_temp_schema + + pg_is_other_temp_schema ( oid ) + boolean + + + Returns true if the given OID is the OID of another session's + temporary schema. (This can be useful, for example, to exclude other + sessions' temporary tables from a catalog display.) + + + + + + + pg_jit_available + + pg_jit_available () + boolean + + + Returns true if a JIT compiler extension is + available (see ) and the + configuration parameter is set to + on. + + + + + + + pg_numa_available + + pg_numa_available () + boolean + + + Returns true if the server has been compiled with NUMA support. + + + + + + + pg_listening_channels + + pg_listening_channels () + setof text + + + Returns the set of names of asynchronous notification channels that + the current session is listening to. + + + + + + + pg_notification_queue_usage + + pg_notification_queue_usage () + double precision + + + Returns the fraction (0–1) of the asynchronous notification + queue's maximum size that is currently occupied by notifications that + are waiting to be processed. + See and + for more information. + + + + + + + pg_postmaster_start_time + + pg_postmaster_start_time () + timestamp with time zone + + + Returns the time when the server started. + + + + + + + pg_safe_snapshot_blocking_pids + + pg_safe_snapshot_blocking_pids ( integer ) + integer[] + + + Returns an array of the process ID(s) of the sessions that are blocking + the server process with the specified process ID from acquiring a safe + snapshot, or an empty array if there is no such server process or it + is not blocked. + + + A session running a SERIALIZABLE transaction blocks + a SERIALIZABLE READ ONLY DEFERRABLE transaction + from acquiring a snapshot until the latter determines that it is safe + to avoid taking any predicate locks. See + for more information about + serializable and deferrable transactions. + + + Frequent calls to this function could have some impact on database + performance, because it needs access to the predicate lock manager's + shared state for a short time. + + + + + + + pg_trigger_depth + + pg_trigger_depth () + integer + + + Returns the current nesting level + of PostgreSQL triggers (0 if not called, + directly or indirectly, from inside a trigger). + + + + + + + session_user + + session_user + name + + + Returns the session user's name. + + + + + + + system_user + + system_user + text + + + Returns the authentication method and the identity (if any) that the + user presented during the authentication cycle before they were + assigned a database role. It is represented as + auth_method:identity or + NULL if the user has not been authenticated (for + example if Trust authentication has + been used). + + + + + + + user + + user + name + + + This is equivalent to current_user. + + + + +
+ + + + current_catalog, + current_role, + current_schema, + current_user, + session_user, + and user have special syntactic status + in SQL: they must be called without trailing + parentheses. In PostgreSQL, parentheses can optionally be used with + current_schema, but not with the others. + + + + + The session_user is normally the user who initiated + the current database connection; but superusers can change this setting + with . + The current_user is the user identifier + that is applicable for permission checking. Normally it is equal + to the session user, but it can be changed with + . + It also changes during the execution of + functions with the attribute SECURITY DEFINER. + In Unix parlance, the session user is the real user and + the current user is the effective user. + current_role and user are + synonyms for current_user. (The SQL standard draws + a distinction between current_role + and current_user, but PostgreSQL + does not, since it unifies users and roles into a single kind of entity.) + + +
+ + + Access Privilege Inquiry Functions + + + privilege + querying + + + + lists functions that + allow querying object access privileges programmatically. + (See for more information about + privileges.) + In these functions, the user whose privileges are being inquired about + can be specified by name or by OID + (pg_authid.oid), or if + the name is given as public then the privileges of the + PUBLIC pseudo-role are checked. Also, the user + argument can be omitted entirely, in which case + the current_user is assumed. + The object that is being inquired about can be specified either by name or + by OID, too. When specifying by name, a schema name can be included if + relevant. + The access privilege of interest is specified by a text string, which must + evaluate to one of the appropriate privilege keywords for the object's type + (e.g., SELECT). Optionally, WITH GRANT + OPTION can be added to a privilege type to test whether the + privilege is held with grant option. Also, multiple privilege types can be + listed separated by commas, in which case the result will be true if any of + the listed privileges is held. (Case of the privilege string is not + significant, and extra whitespace is allowed between but not within + privilege names.) + Some examples: + +SELECT has_table_privilege('myschema.mytable', 'select'); +SELECT has_table_privilege('joe', 'mytable', 'INSERT, SELECT WITH GRANT OPTION'); + + + + + Access Privilege Inquiry Functions + + + + + Function + + + Description + + + + + + + + + has_any_column_privilege + + has_any_column_privilege ( + user name or oid, + table text or oid, + privilege text ) + boolean + + + Does user have privilege for any column of table? + This succeeds either if the privilege is held for the whole table, or + if there is a column-level grant of the privilege for at least one + column. + Allowable privilege types are + SELECT, INSERT, + UPDATE, and REFERENCES. + + + + + + + has_column_privilege + + has_column_privilege ( + user name or oid, + table text or oid, + column text or smallint, + privilege text ) + boolean + + + Does user have privilege for the specified table column? + This succeeds either if the privilege is held for the whole table, or + if there is a column-level grant of the privilege for the column. + The column can be specified by name or by attribute number + (pg_attribute.attnum). + Allowable privilege types are + SELECT, INSERT, + UPDATE, and REFERENCES. + + + + + + + has_database_privilege + + has_database_privilege ( + user name or oid, + database text or oid, + privilege text ) + boolean + + + Does user have privilege for database? + Allowable privilege types are + CREATE, + CONNECT, + TEMPORARY, and + TEMP (which is equivalent to + TEMPORARY). + + + + + + + has_foreign_data_wrapper_privilege + + has_foreign_data_wrapper_privilege ( + user name or oid, + fdw text or oid, + privilege text ) + boolean + + + Does user have privilege for foreign-data wrapper? + The only allowable privilege type is USAGE. + + + + + + + has_function_privilege + + has_function_privilege ( + user name or oid, + function text or oid, + privilege text ) + boolean + + + Does user have privilege for function? + The only allowable privilege type is EXECUTE. + + + When specifying a function by name rather than by OID, the allowed + input is the same as for the regprocedure data type (see + ). + An example is: + +SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute'); + + + + + + + + has_language_privilege + + has_language_privilege ( + user name or oid, + language text or oid, + privilege text ) + boolean + + + Does user have privilege for language? + The only allowable privilege type is USAGE. + + + + + + + has_largeobject_privilege + + has_largeobject_privilege ( + user name or oid, + largeobject oid, + privilege text ) + boolean + + + Does user have privilege for large object? + Allowable privilege types are + SELECT and UPDATE. + + + + + + + has_parameter_privilege + + has_parameter_privilege ( + user name or oid, + parameter text, + privilege text ) + boolean + + + Does user have privilege for configuration parameter? + The parameter name is case-insensitive. + Allowable privilege types are SET + and ALTER SYSTEM. + + + + + + + has_schema_privilege + + has_schema_privilege ( + user name or oid, + schema text or oid, + privilege text ) + boolean + + + Does user have privilege for schema? + Allowable privilege types are + CREATE and + USAGE. + + + + + + + has_sequence_privilege + + has_sequence_privilege ( + user name or oid, + sequence text or oid, + privilege text ) + boolean + + + Does user have privilege for sequence? + Allowable privilege types are + USAGE, + SELECT, and + UPDATE. + + + + + + + has_server_privilege + + has_server_privilege ( + user name or oid, + server text or oid, + privilege text ) + boolean + + + Does user have privilege for foreign server? + The only allowable privilege type is USAGE. + + + + + + + has_table_privilege + + has_table_privilege ( + user name or oid, + table text or oid, + privilege text ) + boolean + + + Does user have privilege for table? + Allowable privilege types + are SELECT, INSERT, + UPDATE, DELETE, + TRUNCATE, REFERENCES, + TRIGGER, and MAINTAIN. + + + + + + + has_tablespace_privilege + + has_tablespace_privilege ( + user name or oid, + tablespace text or oid, + privilege text ) + boolean + + + Does user have privilege for tablespace? + The only allowable privilege type is CREATE. + + + + + + + has_type_privilege + + has_type_privilege ( + user name or oid, + type text or oid, + privilege text ) + boolean + + + Does user have privilege for data type? + The only allowable privilege type is USAGE. + When specifying a type by name rather than by OID, the allowed input + is the same as for the regtype data type (see + ). + + + + + + + pg_has_role + + pg_has_role ( + user name or oid, + role text or oid, + privilege text ) + boolean + + + Does user have privilege for role? + Allowable privilege types are + MEMBER, USAGE, + and SET. + MEMBER denotes direct or indirect membership in + the role without regard to what specific privileges may be conferred. + USAGE denotes whether the privileges of the role + are immediately available without doing SET ROLE, + while SET denotes whether it is possible to change + to the role using the SET ROLE command. + WITH ADMIN OPTION or WITH GRANT + OPTION can be added to any of these privilege types to + test whether the ADMIN privilege is held (all + six spellings test the same thing). + This function does not allow the special case of + setting user to public, + because the PUBLIC pseudo-role can never be a member of real roles. + + + + + + + row_security_active + + row_security_active ( + table text or oid ) + boolean + + + Is row-level security active for the specified table in the context of + the current user and current environment? + + + + +
+ + + shows the operators + available for the aclitem type, which is the catalog + representation of access privileges. See + for information about how to read access privilege values. + + + + <type>aclitem</type> Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + + aclitemeq + + aclitem = aclitem + boolean + + + Are aclitems equal? (Notice that + type aclitem lacks the usual set of comparison + operators; it has only equality. In turn, aclitem + arrays can only be compared for equality.) + + + 'calvin=r*w/hobbes'::aclitem = 'calvin=r*w*/hobbes'::aclitem + f + + + + + + + aclcontains + + aclitem[] @> aclitem + boolean + + + Does array contain the specified privileges? (This is true if there + is an array entry that matches the aclitem's grantee and + grantor, and has at least the specified set of privileges.) + + + '{calvin=r*w/hobbes,hobbes=r*w*/postgres}'::aclitem[] @> 'calvin=r*/hobbes'::aclitem + t + + + + + + aclitem[] ~ aclitem + boolean + + + This is a deprecated alias for @>. + + + '{calvin=r*w/hobbes,hobbes=r*w*/postgres}'::aclitem[] ~ 'calvin=r*/hobbes'::aclitem + t + + + + +
+ + + shows some additional + functions to manage the aclitem type. + + + + <type>aclitem</type> Functions + + + + + Function + + + Description + + + + + + + + + acldefault + + acldefault ( + type "char", + ownerId oid ) + aclitem[] + + + Constructs an aclitem array holding the default access + privileges for an object of type type belonging + to the role with OID ownerId. This represents + the access privileges that will be assumed when an object's + ACL entry is null. (The default access privileges + are described in .) + The type parameter must be one of + 'c' for COLUMN, + 'r' for TABLE and table-like objects, + 's' for SEQUENCE, + 'd' for DATABASE, + 'f' for FUNCTION or PROCEDURE, + 'l' for LANGUAGE, + 'L' for LARGE OBJECT, + 'n' for SCHEMA, + 'p' for PARAMETER, + 't' for TABLESPACE, + 'F' for FOREIGN DATA WRAPPER, + 'S' for FOREIGN SERVER, + or + 'T' for TYPE or DOMAIN. + + + + + + + aclexplode + + aclexplode ( aclitem[] ) + setof record + ( grantor oid, + grantee oid, + privilege_type text, + is_grantable boolean ) + + + Returns the aclitem array as a set of rows. + If the grantee is the pseudo-role PUBLIC, it is represented by zero in + the grantee column. Each granted privilege is + represented as SELECT, INSERT, + etc (see for a full list). + Note that each privilege is broken out as a separate row, so + only one keyword appears in the privilege_type + column. + + + + + + + makeaclitem + + makeaclitem ( + grantee oid, + grantor oid, + privileges text, + is_grantable boolean ) + aclitem + + + Constructs an aclitem with the given properties. + privileges is a comma-separated list of + privilege names such as SELECT, + INSERT, etc, all of which are set in the + result. (Case of the privilege string is not significant, and + extra whitespace is allowed between but not within privilege + names.) + + + + +
+ +
+ + + Schema Visibility Inquiry Functions + + + shows functions that + determine whether a certain object is visible in the + current schema search path. + For example, a table is said to be visible if its + containing schema is in the search path and no table of the same + name appears earlier in the search path. This is equivalent to the + statement that the table can be referenced by name without explicit + schema qualification. Thus, to list the names of all visible tables: + +SELECT relname FROM pg_class WHERE pg_table_is_visible(oid); + + For functions and operators, an object in the search path is said to be + visible if there is no object of the same name and argument data + type(s) earlier in the path. For operator classes and families, + both the name and the associated index access method are considered. + + + + search path + object visibility + + + + Schema Visibility Inquiry Functions + + + + + Function + + + Description + + + + + + + + + pg_collation_is_visible + + pg_collation_is_visible ( collation oid ) + boolean + + + Is collation visible in search path? + + + + + + + pg_conversion_is_visible + + pg_conversion_is_visible ( conversion oid ) + boolean + + + Is conversion visible in search path? + + + + + + + pg_function_is_visible + + pg_function_is_visible ( function oid ) + boolean + + + Is function visible in search path? + (This also works for procedures and aggregates.) + + + + + + + pg_opclass_is_visible + + pg_opclass_is_visible ( opclass oid ) + boolean + + + Is operator class visible in search path? + + + + + + + pg_operator_is_visible + + pg_operator_is_visible ( operator oid ) + boolean + + + Is operator visible in search path? + + + + + + + pg_opfamily_is_visible + + pg_opfamily_is_visible ( opclass oid ) + boolean + + + Is operator family visible in search path? + + + + + + + pg_statistics_obj_is_visible + + pg_statistics_obj_is_visible ( stat oid ) + boolean + + + Is statistics object visible in search path? + + + + + + + pg_table_is_visible + + pg_table_is_visible ( table oid ) + boolean + + + Is table visible in search path? + (This works for all types of relations, including views, materialized + views, indexes, sequences and foreign tables.) + + + + + + + pg_ts_config_is_visible + + pg_ts_config_is_visible ( config oid ) + boolean + + + Is text search configuration visible in search path? + + + + + + + pg_ts_dict_is_visible + + pg_ts_dict_is_visible ( dict oid ) + boolean + + + Is text search dictionary visible in search path? + + + + + + + pg_ts_parser_is_visible + + pg_ts_parser_is_visible ( parser oid ) + boolean + + + Is text search parser visible in search path? + + + + + + + pg_ts_template_is_visible + + pg_ts_template_is_visible ( template oid ) + boolean + + + Is text search template visible in search path? + + + + + + + pg_type_is_visible + + pg_type_is_visible ( type oid ) + boolean + + + Is type (or domain) visible in search path? + + + + +
+ + + All these functions require object OIDs to identify the object to be + checked. If you want to test an object by name, it is convenient to use + the OID alias types (regclass, regtype, + regprocedure, regoperator, regconfig, + or regdictionary), + for example: + +SELECT pg_type_is_visible('myschema.widget'::regtype); + + Note that it would not make much sense to test a non-schema-qualified + type name in this way — if the name can be recognized at all, it must be visible. + + +
+ + + System Catalog Information Functions + + + lists functions that + extract information from the system catalogs. + + + + System Catalog Information Functions + + + + + Function + + + Description + + + + + + + + + format_type + + format_type ( type oid, typemod integer ) + text + + + Returns the SQL name for a data type that is identified by its type + OID and possibly a type modifier. Pass NULL for the type modifier if + no specific modifier is known. + + + + + + + pg_basetype + + pg_basetype ( regtype ) + regtype + + + Returns the OID of the base type of a domain identified by its + type OID. If the argument is the OID of a non-domain type, + returns the argument as-is. Returns NULL if the argument is + not a valid type OID. If there's a chain of domain dependencies, + it will recurse until finding the base type. + + + Assuming CREATE DOMAIN mytext AS text: + + + pg_basetype('mytext'::regtype) + text + + + + + + + pg_char_to_encoding + + pg_char_to_encoding ( encoding name ) + integer + + + Converts the supplied encoding name into an integer representing the + internal identifier used in some system catalog tables. + Returns -1 if an unknown encoding name is provided. + + + + + + + pg_encoding_to_char + + pg_encoding_to_char ( encoding integer ) + name + + + Converts the integer used as the internal identifier of an encoding in some + system catalog tables into a human-readable string. + Returns an empty string if an invalid encoding number is provided. + + + + + + + pg_get_catalog_foreign_keys + + pg_get_catalog_foreign_keys () + setof record + ( fktable regclass, + fkcols text[], + pktable regclass, + pkcols text[], + is_array boolean, + is_opt boolean ) + + + Returns a set of records describing the foreign key relationships + that exist within the PostgreSQL system + catalogs. + The fktable column contains the name of the + referencing catalog, and the fkcols column + contains the name(s) of the referencing column(s). Similarly, + the pktable column contains the name of the + referenced catalog, and the pkcols column + contains the name(s) of the referenced column(s). + If is_array is true, the last referencing + column is an array, each of whose elements should match some entry + in the referenced catalog. + If is_opt is true, the referencing column(s) + are allowed to contain zeroes instead of a valid reference. + + + + + + + pg_get_constraintdef + + pg_get_constraintdef ( constraint oid , pretty boolean ) + text + + + Reconstructs the creating command for a constraint. + (This is a decompiled reconstruction, not the original text + of the command.) + + + + + + + pg_get_expr + + pg_get_expr ( expr pg_node_tree, relation oid , pretty boolean ) + text + + + Decompiles the internal form of an expression stored in the system + catalogs, such as the default value for a column. If the expression + might contain Vars, specify the OID of the relation they refer to as + the second parameter; if no Vars are expected, passing zero is + sufficient. + + + + + + + pg_get_functiondef + + pg_get_functiondef ( func oid ) + text + + + Reconstructs the creating command for a function or procedure. + (This is a decompiled reconstruction, not the original text + of the command.) + The result is a complete CREATE OR REPLACE FUNCTION + or CREATE OR REPLACE PROCEDURE statement. + + + + + + + pg_get_function_arguments + + pg_get_function_arguments ( func oid ) + text + + + Reconstructs the argument list of a function or procedure, in the form + it would need to appear in within CREATE FUNCTION + (including default values). + + + + + + + pg_get_function_identity_arguments + + pg_get_function_identity_arguments ( func oid ) + text + + + Reconstructs the argument list necessary to identify a function or + procedure, in the form it would need to appear in within commands such + as ALTER FUNCTION. This form omits default values. + + + + + + + pg_get_function_result + + pg_get_function_result ( func oid ) + text + + + Reconstructs the RETURNS clause of a function, in + the form it would need to appear in within CREATE + FUNCTION. Returns NULL for a procedure. + + + + + + + pg_get_indexdef + + pg_get_indexdef ( index oid , column integer, pretty boolean ) + text + + + Reconstructs the creating command for an index. + (This is a decompiled reconstruction, not the original text + of the command.) If column is supplied and is + not zero, only the definition of that column is reconstructed. + + + + + + + pg_get_keywords + + pg_get_keywords () + setof record + ( word text, + catcode "char", + barelabel boolean, + catdesc text, + baredesc text ) + + + Returns a set of records describing the SQL keywords recognized by the + server. The word column contains the + keyword. The catcode column contains a + category code: U for an unreserved + keyword, C for a keyword that can be a column + name, T for a keyword that can be a type or + function name, or R for a fully reserved keyword. + The barelabel column + contains true if the keyword can be used as + a bare column label in SELECT lists, + or false if it can only be used + after AS. + The catdesc column contains a + possibly-localized string describing the keyword's category. + The baredesc column contains a + possibly-localized string describing the keyword's column label status. + + + + + + + pg_get_partkeydef + + pg_get_partkeydef ( table oid ) + text + + + Reconstructs the definition of a partitioned table's partition + key, in the form it would have in the PARTITION + BY clause of CREATE TABLE. + (This is a decompiled reconstruction, not the original text + of the command.) + + + + + + + pg_get_ruledef + + pg_get_ruledef ( rule oid , pretty boolean ) + text + + + Reconstructs the creating command for a rule. + (This is a decompiled reconstruction, not the original text + of the command.) + + + + + + + pg_get_serial_sequence + + pg_get_serial_sequence ( table text, column text ) + text + + + Returns the name of the sequence associated with a column, + or NULL if no sequence is associated with the column. + If the column is an identity column, the associated sequence is the + sequence internally created for that column. + For columns created using one of the serial types + (serial, smallserial, bigserial), + it is the sequence created for that serial column definition. + In the latter case, the association can be modified or removed + with ALTER SEQUENCE OWNED BY. + (This function probably should have been + called pg_get_owned_sequence; its current name + reflects the fact that it has historically been used with serial-type + columns.) The first parameter is a table name with optional + schema, and the second parameter is a column name. Because the first + parameter potentially contains both schema and table names, it is + parsed per usual SQL rules, meaning it is lower-cased by default. + The second parameter, being just a column name, is treated literally + and so has its case preserved. The result is suitably formatted + for passing to the sequence functions (see + ). + + + A typical use is in reading the current value of the sequence for an + identity or serial column, for example: + +SELECT currval(pg_get_serial_sequence('sometable', 'id')); + + + + + + + + pg_get_statisticsobjdef + + pg_get_statisticsobjdef ( statobj oid ) + text + + + Reconstructs the creating command for an extended statistics object. + (This is a decompiled reconstruction, not the original text + of the command.) + + + + + + + pg_get_triggerdef + +pg_get_triggerdef ( trigger oid , pretty boolean ) + text + + + Reconstructs the creating command for a trigger. + (This is a decompiled reconstruction, not the original text + of the command.) + + + + + + + pg_get_userbyid + + pg_get_userbyid ( role oid ) + name + + + Returns a role's name given its OID. + + + + + + + pg_get_viewdef + + pg_get_viewdef ( view oid , pretty boolean ) + text + + + Reconstructs the underlying SELECT command for a + view or materialized view. (This is a decompiled reconstruction, not + the original text of the command.) + + + + + + pg_get_viewdef ( view oid, wrap_column integer ) + text + + + Reconstructs the underlying SELECT command for a + view or materialized view. (This is a decompiled reconstruction, not + the original text of the command.) In this form of the function, + pretty-printing is always enabled, and long lines are wrapped to try + to keep them shorter than the specified number of columns. + + + + + + pg_get_viewdef ( view text , pretty boolean ) + text + + + Reconstructs the underlying SELECT command for a + view or materialized view, working from a textual name for the view + rather than its OID. (This is deprecated; use the OID variant + instead.) + + + + + + + pg_index_column_has_property + + pg_index_column_has_property ( index regclass, column integer, property text ) + boolean + + + Tests whether an index column has the named property. + Common index column properties are listed in + . + (Note that extension access methods can define additional property + names for their indexes.) + NULL is returned if the property name is not known + or does not apply to the particular object, or if the OID or column + number does not identify a valid object. + + + + + + + pg_index_has_property + + pg_index_has_property ( index regclass, property text ) + boolean + + + Tests whether an index has the named property. + Common index properties are listed in + . + (Note that extension access methods can define additional property + names for their indexes.) + NULL is returned if the property name is not known + or does not apply to the particular object, or if the OID does not + identify a valid object. + + + + + + + pg_indexam_has_property + + pg_indexam_has_property ( am oid, property text ) + boolean + + + Tests whether an index access method has the named property. + Access method properties are listed in + . + NULL is returned if the property name is not known + or does not apply to the particular object, or if the OID does not + identify a valid object. + + + + + + + pg_options_to_table + + pg_options_to_table ( options_array text[] ) + setof record + ( option_name text, + option_value text ) + + + Returns the set of storage options represented by a value from + pg_class.reloptions or + pg_attribute.attoptions. + + + + + + + pg_settings_get_flags + + pg_settings_get_flags ( guc text ) + text[] + + + Returns an array of the flags associated with the given GUC, or + NULL if it does not exist. The result is + an empty array if the GUC exists but there are no flags to show. + Only the most useful flags listed in + are exposed. + + + + + + + pg_tablespace_databases + + pg_tablespace_databases ( tablespace oid ) + setof oid + + + Returns the set of OIDs of databases that have objects stored in the + specified tablespace. If this function returns any rows, the + tablespace is not empty and cannot be dropped. To identify the specific + objects populating the tablespace, you will need to connect to the + database(s) identified by pg_tablespace_databases + and query their pg_class catalogs. + + + + + + + pg_tablespace_location + + pg_tablespace_location ( tablespace oid ) + text + + + Returns the file system path that this tablespace is located in. + + + + + + + pg_typeof + + pg_typeof ( "any" ) + regtype + + + Returns the OID of the data type of the value that is passed to it. + This can be helpful for troubleshooting or dynamically constructing + SQL queries. The function is declared as + returning regtype, which is an OID alias type (see + ); this means that it is the same as an + OID for comparison purposes but displays as a type name. + + + pg_typeof(33) + integer + + + + + + + COLLATION FOR + + COLLATION FOR ( "any" ) + text + + + Returns the name of the collation of the value that is passed to it. + The value is quoted and schema-qualified if necessary. If no + collation was derived for the argument expression, + then NULL is returned. If the argument is not of a + collatable data type, then an error is raised. + + + collation for ('foo'::text) + "default" + + + collation for ('foo' COLLATE "de_DE") + "de_DE" + + + + + + + to_regclass + + to_regclass ( text ) + regclass + + + Translates a textual relation name to its OID. A similar result is + obtained by casting the string to type regclass (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found. + + + + + + + to_regdatabase + + to_regdatabase ( text ) + regdatabase + + + Translates a textual database name to its OID. A similar result is + obtained by casting the string to type regdatabase (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found. + + + + + + + to_regcollation + + to_regcollation ( text ) + regcollation + + + Translates a textual collation name to its OID. A similar result is + obtained by casting the string to type regcollation (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found. + + + + + + + to_regnamespace + + to_regnamespace ( text ) + regnamespace + + + Translates a textual schema name to its OID. A similar result is + obtained by casting the string to type regnamespace (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found. + + + + + + + to_regoper + + to_regoper ( text ) + regoper + + + Translates a textual operator name to its OID. A similar result is + obtained by casting the string to type regoper (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found or is ambiguous. + + + + + + + to_regoperator + + to_regoperator ( text ) + regoperator + + + Translates a textual operator name (with parameter types) to its OID. A similar result is + obtained by casting the string to type regoperator (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found. + + + + + + + to_regproc + + to_regproc ( text ) + regproc + + + Translates a textual function or procedure name to its OID. A similar result is + obtained by casting the string to type regproc (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found or is ambiguous. + + + + + + + to_regprocedure + + to_regprocedure ( text ) + regprocedure + + + Translates a textual function or procedure name (with argument types) to its OID. A similar result is + obtained by casting the string to type regprocedure (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found. + + + + + + + to_regrole + + to_regrole ( text ) + regrole + + + Translates a textual role name to its OID. A similar result is + obtained by casting the string to type regrole (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found. + + + + + + + to_regtype + + to_regtype ( text ) + regtype + + + Parses a string of text, extracts a potential type name from it, + and translates that name into a type OID. A syntax error in the + string will result in an error; but if the string is a + syntactically valid type name that happens not to be found in the + catalogs, the result is NULL. A similar result + is obtained by casting the string to type regtype + (see ), except that that will throw + error for name not found. + + + + + + + to_regtypemod + + to_regtypemod ( text ) + integer + + + Parses a string of text, extracts a potential type name from it, + and translates its type modifier, if any. A syntax error in the + string will result in an error; but if the string is a + syntactically valid type name that happens not to be found in the + catalogs, the result is NULL. The result is + -1 if no type modifier is present. + + + to_regtypemod can be combined with + to produce appropriate inputs for + , allowing a string representing a + type name to be canonicalized. + + + format_type(to_regtype('varchar(32)'), to_regtypemod('varchar(32)')) + character varying(32) + + + + +
+ + + Most of the functions that reconstruct (decompile) database objects + have an optional pretty flag, which + if true causes the result to + be pretty-printed. Pretty-printing suppresses unnecessary + parentheses and adds whitespace for legibility. + The pretty-printed format is more readable, but the default format + is more likely to be interpreted the same way by future versions of + PostgreSQL; so avoid using pretty-printed output + for dump purposes. Passing false for + the pretty parameter yields the same result as + omitting the parameter. + + + + Index Column Properties + + + + + NameDescription + + + + asc + Does the column sort in ascending order on a forward scan? + + + + desc + Does the column sort in descending order on a forward scan? + + + + nulls_first + Does the column sort with nulls first on a forward scan? + + + + nulls_last + Does the column sort with nulls last on a forward scan? + + + + orderable + Does the column possess any defined sort ordering? + + + + distance_orderable + Can the column be scanned in order by a distance + operator, for example ORDER BY col <-> constant ? + + + + returnable + Can the column value be returned by an index-only scan? + + + + search_array + Does the column natively support col = ANY(array) + searches? + + + + search_nulls + Does the column support IS NULL and + IS NOT NULL searches? + + + + +
+ + + Index Properties + + + + + NameDescription + + + + clusterable + Can the index be used in a CLUSTER command? + + + + index_scan + Does the index support plain (non-bitmap) scans? + + + + bitmap_scan + Does the index support bitmap scans? + + + + backward_scan + Can the scan direction be changed in mid-scan (to + support FETCH BACKWARD on a cursor without + needing materialization)? + + + + +
+ + + Index Access Method Properties + + + + + NameDescription + + + + can_order + Does the access method support ASC, + DESC and related keywords in + CREATE INDEX? + + + + can_unique + Does the access method support unique indexes? + + + + can_multi_col + Does the access method support indexes with multiple columns? + + + + can_exclude + Does the access method support exclusion constraints? + + + + can_include + Does the access method support the INCLUDE + clause of CREATE INDEX? + + + + +
+ + + GUC Flags + + + + + FlagDescription + + + + EXPLAIN + Parameters with this flag are included in + EXPLAIN (SETTINGS) commands. + + + + NO_SHOW_ALL + Parameters with this flag are excluded from + SHOW ALL commands. + + + + NO_RESET + Parameters with this flag do not support + RESET commands. + + + + NO_RESET_ALL + Parameters with this flag are excluded from + RESET ALL commands. + + + + NOT_IN_SAMPLE + Parameters with this flag are not included in + postgresql.conf by default. + + + + RUNTIME_COMPUTED + Parameters with this flag are runtime-computed ones. + + + + +
+ +
+ + + Object Information and Addressing Functions + + + lists functions related to + database object identification and addressing. + + + + Object Information and Addressing Functions + + + + + Function + + + Description + + + + + + + + + pg_get_acl + + pg_get_acl ( classid oid, objid oid, objsubid integer ) + aclitem[] + + + Returns the ACL for a database object, specified + by catalog OID, object OID and sub-object ID. This function returns + NULL values for undefined objects. + + + + + + + pg_describe_object + + pg_describe_object ( classid oid, objid oid, objsubid integer ) + text + + + Returns a textual description of a database object identified by + catalog OID, object OID, and sub-object ID (such as a column number + within a table; the sub-object ID is zero when referring to a whole + object). This description is intended to be human-readable, and might + be translated, depending on server configuration. This is especially + useful to determine the identity of an object referenced in the + pg_depend catalog. This function returns + NULL values for undefined objects. + + + + + + + pg_identify_object + + pg_identify_object ( classid oid, objid oid, objsubid integer ) + record + ( type text, + schema text, + name text, + identity text ) + + + Returns a row containing enough information to uniquely identify the + database object specified by catalog OID, object OID and sub-object + ID. + This information is intended to be machine-readable, and is never + translated. + type identifies the type of database object; + schema is the schema name that the object + belongs in, or NULL for object types that do not + belong to schemas; + name is the name of the object, quoted if + necessary, if the name (along with schema name, if pertinent) is + sufficient to uniquely identify the object, + otherwise NULL; + identity is the complete object identity, with + the precise format depending on object type, and each name within the + format being schema-qualified and quoted as necessary. Undefined + objects are identified with NULL values. + + + + + + + pg_identify_object_as_address + + pg_identify_object_as_address ( classid oid, objid oid, objsubid integer ) + record + ( type text, + object_names text[], + object_args text[] ) + + + Returns a row containing enough information to uniquely identify the + database object specified by catalog OID, object OID and sub-object + ID. + The returned information is independent of the current server, that + is, it could be used to identify an identically named object in + another server. + type identifies the type of database object; + object_names and + object_args + are text arrays that together form a reference to the object. + These three values can be passed + to pg_get_object_address to obtain the internal + address of the object. + + + + + + + pg_get_object_address + + pg_get_object_address ( type text, object_names text[], object_args text[] ) + record + ( classid oid, + objid oid, + objsubid integer ) + + + Returns a row containing enough information to uniquely identify the + database object specified by a type code and object name and argument + arrays. + The returned values are the ones that would be used in system catalogs + such as pg_depend; they can be passed to + other system functions such as pg_describe_object + or pg_identify_object. + classid is the OID of the system catalog + containing the object; + objid is the OID of the object itself, and + objsubid is the sub-object ID, or zero if none. + This function is the inverse + of pg_identify_object_as_address. + Undefined objects are identified with NULL values. + + + + +
+ + + pg_get_acl is useful for retrieving and inspecting + the privileges associated with database objects without looking at + specific catalogs. For example, to retrieve all the granted privileges + on objects in the current database: + +postgres=# SELECT + (pg_identify_object(s.classid,s.objid,s.objsubid)).*, + pg_catalog.pg_get_acl(s.classid,s.objid,s.objsubid) AS acl +FROM pg_catalog.pg_shdepend AS s +JOIN pg_catalog.pg_database AS d + ON d.datname = current_database() AND + d.oid = s.dbid +JOIN pg_catalog.pg_authid AS a + ON a.oid = s.refobjid AND + s.refclassid = 'pg_authid'::regclass +WHERE s.deptype = 'a'; +-[ RECORD 1 ]----------------------------------------- +type | table +schema | public +name | testtab +identity | public.testtab +acl | {postgres=arwdDxtm/postgres,foo=r/postgres} + + + +
+ + + Comment Information Functions + + + comment + about database objects + + + + The functions shown in + extract comments previously stored with the + command. A null value is returned if no + comment could be found for the specified parameters. + + + + Comment Information Functions + + + + + Function + + + Description + + + + + + + + + col_description + + col_description ( table oid, column integer ) + text + + + Returns the comment for a table column, which is specified by the OID + of its table and its column number. + (obj_description cannot be used for table + columns, since columns do not have OIDs of their own.) + + + + + + + obj_description + + obj_description ( object oid, catalog name ) + text + + + Returns the comment for a database object specified by its OID and the + name of the containing system catalog. For + example, obj_description(123456, 'pg_class') would + retrieve the comment for the table with OID 123456. + + + + + + obj_description ( object oid ) + text + + + Returns the comment for a database object specified by its OID alone. + This is deprecated since there is no guarantee + that OIDs are unique across different system catalogs; therefore, the + wrong comment might be returned. + + + + + + + shobj_description + + shobj_description ( object oid, catalog name ) + text + + + Returns the comment for a shared database object specified by its OID + and the name of the containing system catalog. This is just + like obj_description except that it is used for + retrieving comments on shared objects (that is, databases, roles, and + tablespaces). Some system catalogs are global to all databases within + each cluster, and the descriptions for objects in them are stored + globally as well. + + + + +
+ +
+ + + Data Validity Checking Functions + + + The functions shown in + can be helpful for checking validity of proposed input data. + + + + Data Validity Checking Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + pg_input_is_valid + + pg_input_is_valid ( + string text, + type text + ) + boolean + + + Tests whether the given string is valid + input for the specified data type, returning true or false. + + + This function will only work as desired if the data type's input + function has been updated to report invalid input as + a soft error. Otherwise, invalid input will abort + the transaction, just as if the string had been cast to the type + directly. + + + pg_input_is_valid('42', 'integer') + t + + + pg_input_is_valid('42000000000', 'integer') + f + + + pg_input_is_valid('1234.567', 'numeric(7,4)') + f + + + + + + pg_input_error_info + + pg_input_error_info ( + string text, + type text + ) + record + ( message text, + detail text, + hint text, + sql_error_code text ) + + + Tests whether the given string is valid + input for the specified data type; if not, return the details of + the error that would have been thrown. If the input is valid, the + results are NULL. The inputs are the same as + for pg_input_is_valid. + + + This function will only work as desired if the data type's input + function has been updated to report invalid input as + a soft error. Otherwise, invalid input will abort + the transaction, just as if the string had been cast to the type + directly. + + + SELECT * FROM pg_input_error_info('42000000000', 'integer') + + + message | detail | hint | sql_error_code +------------------------------------------------------+--------+------+---------------- + value "42000000000" is out of range for type integer | | | 22003 + + + + + +
+ +
+ + + Transaction ID and Snapshot Information Functions + + + The functions shown in + provide server transaction information in an exportable form. The main + use of these functions is to determine which transactions were committed + between two snapshots. + + + + Transaction ID and Snapshot Information Functions + + + + + Function + + + Description + + + + + + + + + age + + age ( xid ) + integer + + + Returns the number of transactions between the supplied + transaction id and the current transaction counter. + + + + + + + mxid_age + + mxid_age ( xid ) + integer + + + Returns the number of multixacts IDs between the supplied + multixact ID and the current multixacts counter. + + + + + + + pg_current_xact_id + + pg_current_xact_id () + xid8 + + + Returns the current transaction's ID. It will assign a new one if the + current transaction does not have one already (because it has not + performed any database updates); see for details. If executed in a + subtransaction, this will return the top-level transaction ID; + see for details. + + + + + + + pg_current_xact_id_if_assigned + + pg_current_xact_id_if_assigned () + xid8 + + + Returns the current transaction's ID, or NULL if no + ID is assigned yet. (It's best to use this variant if the transaction + might otherwise be read-only, to avoid unnecessary consumption of an + XID.) + If executed in a subtransaction, this will return the top-level + transaction ID. + + + + + + + pg_xact_status + + pg_xact_status ( xid8 ) + text + + + Reports the commit status of a recent transaction. + The result is one of in progress, + committed, or aborted, + provided that the transaction is recent enough that the system retains + the commit status of that transaction. + If it is old enough that no references to the transaction survive in + the system and the commit status information has been discarded, the + result is NULL. + Applications might use this function, for example, to determine + whether their transaction committed or aborted after the application + and database server become disconnected while + a COMMIT is in progress. + Note that prepared transactions are reported as in + progress; applications must check pg_prepared_xacts + if they need to determine whether a transaction ID belongs to a + prepared transaction. + + + + + + + pg_current_snapshot + + pg_current_snapshot () + pg_snapshot + + + Returns a current snapshot, a data structure + showing which transaction IDs are now in-progress. + Only top-level transaction IDs are included in the snapshot; + subtransaction IDs are not shown; see + for details. + + + + + + + pg_snapshot_xip + + pg_snapshot_xip ( pg_snapshot ) + setof xid8 + + + Returns the set of in-progress transaction IDs contained in a snapshot. + + + + + + + pg_snapshot_xmax + + pg_snapshot_xmax ( pg_snapshot ) + xid8 + + + Returns the xmax of a snapshot. + + + + + + + pg_snapshot_xmin + + pg_snapshot_xmin ( pg_snapshot ) + xid8 + + + Returns the xmin of a snapshot. + + + + + + + pg_visible_in_snapshot + + pg_visible_in_snapshot ( xid8, pg_snapshot ) + boolean + + + Is the given transaction ID visible according + to this snapshot (that is, was it completed before the snapshot was + taken)? Note that this function will not give the correct answer for + a subtransaction ID (subxid); see for + details. + + + + + + + pg_get_multixact_members + + pg_get_multixact_members ( multixid xid ) + setof record + ( xid xid, + mode text ) + + + Returns the transaction ID and lock mode for each member of the + specified multixact ID. The lock modes forupd, + fornokeyupd, sh, and + keysh correspond to the row-level locks + FOR UPDATE, FOR NO KEY UPDATE, + FOR SHARE, and FOR KEY SHARE, + respectively, as described in . Two + additional modes are specific to multixacts: + nokeyupd, used by updates that do not modify key + columns, and upd, used by updates or deletes that + modify key columns. + + + + +
+ + + The internal transaction ID type xid is 32 bits wide and + wraps around every 4 billion transactions. However, + the functions shown in , except + age, mxid_age, and + pg_get_multixact_members, use a + 64-bit type xid8 that does not wrap around during the life + of an installation and can be converted to xid by casting if + required; see for details. + The data type pg_snapshot stores information about + transaction ID visibility at a particular moment in time. Its components + are described in . + pg_snapshot's textual representation is + xmin:xmax:xip_list. + For example 10:20:10,14,15 means + xmin=10, xmax=20, xip_list=10, 14, 15. + + + + Snapshot Components + + + + + + Name + Description + + + + + + xmin + + Lowest transaction ID that was still active. All transaction IDs + less than xmin are either committed and visible, + or rolled back and dead. + + + + + xmax + + One past the highest completed transaction ID. All transaction IDs + greater than or equal to xmax had not yet + completed as of the time of the snapshot, and thus are invisible. + + + + + xip_list + + Transactions in progress at the time of the snapshot. A transaction + ID that is xmin <= X < + xmax and not in this list was already completed at the time + of the snapshot, and thus is either visible or dead according to its + commit status. This list does not include the transaction IDs of + subtransactions (subxids). + + + + +
+ + + In releases of PostgreSQL before 13 there was + no xid8 type, so variants of these functions were provided + that used bigint to represent a 64-bit XID, with a + correspondingly distinct snapshot data type txid_snapshot. + These older functions have txid in their names. They + are still supported for backward compatibility, but may be removed from a + future release. See . + + + + Deprecated Transaction ID and Snapshot Information Functions + + + + + Function + + + Description + + + + + + + + + + txid_current + + txid_current () + bigint + + + See pg_current_xact_id(). + + + + + + + txid_current_if_assigned + + txid_current_if_assigned () + bigint + + + See pg_current_xact_id_if_assigned(). + + + + + + + txid_current_snapshot + + txid_current_snapshot () + txid_snapshot + + + See pg_current_snapshot(). + + + + + + + txid_snapshot_xip + + txid_snapshot_xip ( txid_snapshot ) + setof bigint + + + See pg_snapshot_xip(). + + + + + + + txid_snapshot_xmax + + txid_snapshot_xmax ( txid_snapshot ) + bigint + + + See pg_snapshot_xmax(). + + + + + + + txid_snapshot_xmin + + txid_snapshot_xmin ( txid_snapshot ) + bigint + + + See pg_snapshot_xmin(). + + + + + + + txid_visible_in_snapshot + + txid_visible_in_snapshot ( bigint, txid_snapshot ) + boolean + + + See pg_visible_in_snapshot(). + + + + + + + txid_status + + txid_status ( bigint ) + text + + + See pg_xact_status(). + + + + +
+ +
+ + + Committed Transaction Information Functions + + + The functions shown in + provide information about when past transactions were committed. + They only provide useful data when the + configuration option is + enabled, and only for transactions that were committed after it was + enabled. Commit timestamp information is routinely removed during + vacuum. + + + + Committed Transaction Information Functions + + + + + Function + + + Description + + + + + + + + + pg_xact_commit_timestamp + + pg_xact_commit_timestamp ( xid ) + timestamp with time zone + + + Returns the commit timestamp of a transaction. + + + + + + + pg_xact_commit_timestamp_origin + + pg_xact_commit_timestamp_origin ( xid ) + record + ( timestamp timestamp with time zone, + roident oid) + + + Returns the commit timestamp and replication origin of a transaction. + + + + + + + pg_last_committed_xact + + pg_last_committed_xact () + record + ( xid xid, + timestamp timestamp with time zone, + roident oid ) + + + Returns the transaction ID, commit timestamp and replication origin + of the latest committed transaction. + + + + +
+ +
+ + + Control Data Functions + + + The functions shown in + print information initialized during initdb, such + as the catalog version. They also show information about write-ahead + logging and checkpoint processing. This information is cluster-wide, + not specific to any one database. These functions provide most of the same + information, from the same source, as the + application. + + + + Control Data Functions + + + + + Function + + + Description + + + + + + + + + pg_control_checkpoint + + pg_control_checkpoint () + record + + + Returns information about current checkpoint state, as shown in + . + + + + + + + pg_control_system + + pg_control_system () + record + + + Returns information about current control file state, as shown in + . + + + + + + + pg_control_init + + pg_control_init () + record + + + Returns information about cluster initialization state, as shown in + . + + + + + + + pg_control_recovery + + pg_control_recovery () + record + + + Returns information about recovery state, as shown in + . + + + + +
+ + + <function>pg_control_checkpoint</function> Output Columns + + + + Column Name + Data Type + + + + + + + checkpoint_lsn + pg_lsn + + + + redo_lsn + pg_lsn + + + + redo_wal_file + text + + + + timeline_id + integer + + + + prev_timeline_id + integer + + + + full_page_writes + boolean + + + + next_xid + text + + + + next_oid + oid + + + + next_multixact_id + xid + + + + next_multi_offset + xid + + + + oldest_xid + xid + + + + oldest_xid_dbid + oid + + + + oldest_active_xid + xid + + + + oldest_multi_xid + xid + + + + oldest_multi_dbid + oid + + + + oldest_commit_ts_xid + xid + + + + newest_commit_ts_xid + xid + + + + checkpoint_time + timestamp with time zone + + + + +
+ + + <function>pg_control_system</function> Output Columns + + + + Column Name + Data Type + + + + + + + pg_control_version + integer + + + + catalog_version_no + integer + + + + system_identifier + bigint + + + + pg_control_last_modified + timestamp with time zone + + + + +
+ + + <function>pg_control_init</function> Output Columns + + + + Column Name + Data Type + + + + + + + max_data_alignment + integer + + + + database_block_size + integer + + + + blocks_per_segment + integer + + + + wal_block_size + integer + + + + bytes_per_wal_segment + integer + + + + max_identifier_length + integer + + + + max_index_columns + integer + + + + max_toast_chunk_size + integer + + + + large_object_chunk_size + integer + + + + float8_pass_by_value + boolean + + + + data_page_checksum_version + integer + + + + default_char_signedness + boolean + + + + +
+ + + <function>pg_control_recovery</function> Output Columns + + + + Column Name + Data Type + + + + + + + min_recovery_end_lsn + pg_lsn + + + + min_recovery_end_timeline + integer + + + + backup_start_lsn + pg_lsn + + + + backup_end_lsn + pg_lsn + + + + end_of_backup_record_required + boolean + + + + +
+ +
+ + + Version Information Functions + + + The functions shown in + print version information. + + + + Version Information Functions + + + + + Function + + + Description + + + + + + + + + version + + version () + text + + + Returns a string describing the PostgreSQL + server's version. You can also get this information from + , or for a machine-readable + version use . Software + developers should use server_version_num (available + since 8.2) or instead of + parsing the text version. + + + + + + + unicode_version + + unicode_version () + text + + + Returns a string representing the version of Unicode used by + PostgreSQL. + + + + + + icu_unicode_version + + icu_unicode_version () + text + + + Returns a string representing the version of Unicode used by ICU, if + the server was built with ICU support; otherwise returns + NULL + + + +
+ +
+ + + WAL Summarization Information Functions + + + The functions shown in + print information about the status of WAL summarization. + See . + + + + WAL Summarization Information Functions + + + + + Function + + + Description + + + + + + + + + pg_available_wal_summaries + + pg_available_wal_summaries () + setof record + ( tli bigint, + start_lsn pg_lsn, + end_lsn pg_lsn ) + + + Returns information about the WAL summary files present in the + data directory, under pg_wal/summaries. + One row will be returned per WAL summary file. Each file summarizes + WAL on the indicated TLI within the indicated LSN range. This function + might be useful to determine whether enough WAL summaries are present + on the server to take an incremental backup based on some prior + backup whose start LSN is known. + + + + + + + pg_wal_summary_contents + + pg_wal_summary_contents ( tli bigint, start_lsn pg_lsn, end_lsn pg_lsn ) + setof record + ( relfilenode oid, + reltablespace oid, + reldatabase oid, + relforknumber smallint, + relblocknumber bigint, + is_limit_block boolean ) + + + Returns one information about the contents of a single WAL summary file + identified by TLI and starting and ending LSNs. Each row with + is_limit_block false indicates that the block + identified by the remaining output columns was modified by at least + one WAL record within the range of records summarized by this file. + Each row with is_limit_block true indicates either + that (a) the relation fork was truncated to the length given by + relblocknumber within the relevant range of WAL + records or (b) that the relation fork was created or dropped within + the relevant range of WAL records; in such cases, + relblocknumber will be zero. + + + + + + + pg_get_wal_summarizer_state + + pg_get_wal_summarizer_state () + record + ( summarized_tli bigint, + summarized_lsn pg_lsn, + pending_lsn pg_lsn, + summarizer_pid int ) + + + Returns information about the progress of the WAL summarizer. If the + WAL summarizer has never run since the instance was started, then + summarized_tli and summarized_lsn + will be 0 and 0/00000000 respectively; + otherwise, they will be the TLI and ending LSN of the last WAL summary + file written to disk. If the WAL summarizer is currently running, + pending_lsn will be the ending LSN of the last + record that it has consumed, which must always be greater than or + equal to summarized_lsn; if the WAL summarizer is + not running, it will be equal to summarized_lsn. + summarizer_pid is the PID of the WAL summarizer + process, if it is running, and otherwise NULL. + + + As a special exception, the WAL summarizer will refuse to generate + WAL summary files if run on WAL generated under + wal_level=minimal, since such summaries would be + unsafe to use as the basis for an incremental backup. In this case, + the fields above will continue to advance as if summaries were being + generated, but nothing will be written to disk. Once the summarizer + reaches WAL generated while wal_level was set + to replica or higher, it will resume writing + summaries to disk. + + + + +
+ +
+ +
diff --git a/doc/src/sgml/func/func-json.sgml b/doc/src/sgml/func/func-json.sgml new file mode 100644 index 0000000000000..91f98a345d445 --- /dev/null +++ b/doc/src/sgml/func/func-json.sgml @@ -0,0 +1,3945 @@ + + JSON Functions and Operators + + + JSON + functions and operators + + + SQL/JSON + functions and expressions + + + + This section describes: + + + + + functions and operators for processing and creating JSON data + + + + + the SQL/JSON path language + + + + + the SQL/JSON query functions + + + + + + + To provide native support for JSON data types within the SQL environment, + PostgreSQL implements the + SQL/JSON data model. + This model comprises sequences of items. Each item can hold SQL scalar + values, with an additional SQL/JSON null value, and composite data structures + that use JSON arrays and objects. The model is a formalization of the implied + data model in the JSON specification + RFC 7159. + + + + SQL/JSON allows you to handle JSON data alongside regular SQL data, + with transaction support, including: + + + + + Uploading JSON data into the database and storing it in + regular SQL columns as character or binary strings. + + + + + Generating JSON objects and arrays from relational data. + + + + + Querying JSON data using SQL/JSON query functions and + SQL/JSON path language expressions. + + + + + + + To learn more about the SQL/JSON standard, see + . For details on JSON types + supported in PostgreSQL, + see . + + + + Processing and Creating JSON Data + + + shows the operators that + are available for use with JSON data types (see ). + In addition, the usual comparison operators shown in are available for + jsonb, though not for json. The comparison + operators follow the ordering rules for B-tree operations outlined in + . + See also for the aggregate + function json_agg which aggregates record + values as JSON, the aggregate function + json_object_agg which aggregates pairs of values + into a JSON object, and their jsonb equivalents, + jsonb_agg and jsonb_object_agg. + + + + <type>json</type> and <type>jsonb</type> Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + json -> integer + json + + + jsonb -> integer + jsonb + + + Extracts n'th element of JSON array + (array elements are indexed from zero, but negative integers count + from the end). + + + '[{"a":"foo"},{"b":"bar"},{"c":"baz"}]'::json -> 2 + {"c":"baz"} + + + '[{"a":"foo"},{"b":"bar"},{"c":"baz"}]'::json -> -3 + {"a":"foo"} + + + + + + json -> text + json + + + jsonb -> text + jsonb + + + Extracts JSON object field with the given key. + + + '{"a": {"b":"foo"}}'::json -> 'a' + {"b":"foo"} + + + + + + json ->> integer + text + + + jsonb ->> integer + text + + + Extracts n'th element of JSON array, + as text. + + + '[1,2,3]'::json ->> 2 + 3 + + + + + + json ->> text + text + + + jsonb ->> text + text + + + Extracts JSON object field with the given key, as text. + + + '{"a":1,"b":2}'::json ->> 'b' + 2 + + + + + + json #> text[] + json + + + jsonb #> text[] + jsonb + + + Extracts JSON sub-object at the specified path, where path elements + can be either field keys or array indexes. + + + '{"a": {"b": ["foo","bar"]}}'::json #> '{a,b,1}' + "bar" + + + + + + json #>> text[] + text + + + jsonb #>> text[] + text + + + Extracts JSON sub-object at the specified path as text. + + + '{"a": {"b": ["foo","bar"]}}'::json #>> '{a,b,1}' + bar + + + + +
+ + + + The field/element/path extraction operators return NULL, rather than + failing, if the JSON input does not have the right structure to match + the request; for example if no such key or array element exists. + + + + + Some further operators exist only for jsonb, as shown + in . + + describes how these operators can be used to effectively search indexed + jsonb data. + + + + Additional <type>jsonb</type> Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + jsonb @> jsonb + boolean + + + Does the first JSON value contain the second? + (See for details about containment.) + + + '{"a":1, "b":2}'::jsonb @> '{"b":2}'::jsonb + t + + + + + + jsonb <@ jsonb + boolean + + + Is the first JSON value contained in the second? + + + '{"b":2}'::jsonb <@ '{"a":1, "b":2}'::jsonb + t + + + + + + jsonb ? text + boolean + + + Does the text string exist as a top-level key or array element within + the JSON value? + + + '{"a":1, "b":2}'::jsonb ? 'b' + t + + + '["a", "b", "c"]'::jsonb ? 'b' + t + + + + + + jsonb ?| text[] + boolean + + + Do any of the strings in the text array exist as top-level keys or + array elements? + + + '{"a":1, "b":2, "c":3}'::jsonb ?| array['b', 'd'] + t + + + + + + jsonb ?& text[] + boolean + + + Do all of the strings in the text array exist as top-level keys or + array elements? + + + '["a", "b", "c"]'::jsonb ?& array['a', 'b'] + t + + + + + + jsonb || jsonb + jsonb + + + Concatenates two jsonb values. + Concatenating two arrays generates an array containing all the + elements of each input. Concatenating two objects generates an + object containing the union of their + keys, taking the second object's value when there are duplicate keys. + All other cases are treated by converting a non-array input into a + single-element array, and then proceeding as for two arrays. + Does not operate recursively: only the top-level array or object + structure is merged. + + + '["a", "b"]'::jsonb || '["a", "d"]'::jsonb + ["a", "b", "a", "d"] + + + '{"a": "b"}'::jsonb || '{"c": "d"}'::jsonb + {"a": "b", "c": "d"} + + + '[1, 2]'::jsonb || '3'::jsonb + [1, 2, 3] + + + '{"a": "b"}'::jsonb || '42'::jsonb + [{"a": "b"}, 42] + + + To append an array to another array as a single entry, wrap it + in an additional layer of array, for example: + + + '[1, 2]'::jsonb || jsonb_build_array('[3, 4]'::jsonb) + [1, 2, [3, 4]] + + + + + + jsonb - text + jsonb + + + Deletes a key (and its value) from a JSON object, or matching string + value(s) from a JSON array. + + + '{"a": "b", "c": "d"}'::jsonb - 'a' + {"c": "d"} + + + '["a", "b", "c", "b"]'::jsonb - 'b' + ["a", "c"] + + + + + + jsonb - text[] + jsonb + + + Deletes all matching keys or array elements from the left operand. + + + '{"a": "b", "c": "d"}'::jsonb - '{a,c}'::text[] + {} + + + + + + jsonb - integer + jsonb + + + Deletes the array element with specified index (negative + integers count from the end). Throws an error if JSON value + is not an array. + + + '["a", "b"]'::jsonb - 1 + ["a"] + + + + + + jsonb #- text[] + jsonb + + + Deletes the field or array element at the specified path, where path + elements can be either field keys or array indexes. + + + '["a", {"b":1}]'::jsonb #- '{1,b}' + ["a", {}] + + + + + + jsonb @? jsonpath + boolean + + + Does JSON path return any item for the specified JSON value? + (This is useful only with SQL-standard JSON path expressions, not + predicate check + expressions, since those always return a value.) + + + '{"a":[1,2,3,4,5]}'::jsonb @? '$.a[*] ? (@ > 2)' + t + + + + + + jsonb @@ jsonpath + boolean + + + Returns the result of a JSON path predicate check for the + specified JSON value. + (This is useful only + with predicate + check expressions, not SQL-standard JSON path expressions, + since it will return NULL if the path result is + not a single boolean value.) + + + '{"a":[1,2,3,4,5]}'::jsonb @@ '$.a[*] > 2' + t + + + + +
+ + + + The jsonpath operators @? + and @@ suppress the following errors: missing object + field or array element, unexpected JSON item type, datetime and numeric + errors. The jsonpath-related functions described below can + also be told to suppress these types of errors. This behavior might be + helpful when searching JSON document collections of varying structure. + + + + + shows the functions that are + available for constructing json and jsonb values. + Some functions in this table have a RETURNING clause, + which specifies the data type returned. It must be one of json, + jsonb, bytea, a character string type (text, + char, or varchar), or a type + that can be cast to json. + By default, the json type is returned. + + + + JSON Creation Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + to_json + + to_json ( anyelement ) + json + + + + to_jsonb + + to_jsonb ( anyelement ) + jsonb + + + Converts any SQL value to json or jsonb. + Arrays and composites are converted recursively to arrays and + objects (multidimensional arrays become arrays of arrays in JSON). + Otherwise, if there is a cast from the SQL data type + to json, the cast function will be used to perform the + conversion; + + For example, the extension has a cast + from hstore to json, so that + hstore values converted via the JSON creation functions + will be represented as JSON objects, not as primitive string values. + + + otherwise, a scalar JSON value is produced. For any scalar other than + a number, a Boolean, or a null value, the text representation will be + used, with escaping as necessary to make it a valid JSON string value. + + + to_json('Fred said "Hi."'::text) + "Fred said \"Hi.\"" + + + to_jsonb(row(42, 'Fred said "Hi."'::text)) + {"f1": 42, "f2": "Fred said \"Hi.\""} + + + + + + + array_to_json + + array_to_json ( anyarray , boolean ) + json + + + Converts an SQL array to a JSON array. The behavior is the same + as to_json except that line feeds will be added + between top-level array elements if the optional boolean parameter is + true. + + + array_to_json('{{1,5},{99,100}}'::int[]) + [[1,5],[99,100]] + + + + + + + json_array + json_array ( + { value_expression FORMAT JSON } , ... + { NULL | ABSENT } ON NULL + RETURNING data_type FORMAT JSON ENCODING UTF8 ) + + + json_array ( + query_expression + RETURNING data_type FORMAT JSON ENCODING UTF8 ) + + + Constructs a JSON array from either a series of + value_expression parameters or from the results + of query_expression, + which must be a SELECT query returning a single column. If + ABSENT ON NULL is specified, NULL values are ignored. + This is always the case if a + query_expression is used. + + + json_array(1,true,json '{"a":null}') + [1, true, {"a":null}] + + + json_array(SELECT * FROM (VALUES(1),(2)) t) + [1, 2] + + + + + + + row_to_json + + row_to_json ( record , boolean ) + json + + + Converts an SQL composite value to a JSON object. The behavior is the + same as to_json except that line feeds will be + added between top-level elements if the optional boolean parameter is + true. + + + row_to_json(row(1,'foo')) + {"f1":1,"f2":"foo"} + + + + + + + json_build_array + + json_build_array ( VARIADIC "any" ) + json + + + + jsonb_build_array + + jsonb_build_array ( VARIADIC "any" ) + jsonb + + + Builds a possibly-heterogeneously-typed JSON array out of a variadic + argument list. Each argument is converted as + per to_json or to_jsonb. + + + json_build_array(1, 2, 'foo', 4, 5) + [1, 2, "foo", 4, 5] + + + + + + + json_build_object + + json_build_object ( VARIADIC "any" ) + json + + + + jsonb_build_object + + jsonb_build_object ( VARIADIC "any" ) + jsonb + + + Builds a JSON object out of a variadic argument list. By convention, + the argument list consists of alternating keys and values. Key + arguments are coerced to text; value arguments are converted as + per to_json or to_jsonb. + + + json_build_object('foo', 1, 2, row(3,'bar')) + {"foo" : 1, "2" : {"f1":3,"f2":"bar"}} + + + + + + json_object + json_object ( + { key_expression { VALUE | ':' } + value_expression FORMAT JSON ENCODING UTF8 }, ... + { NULL | ABSENT } ON NULL + { WITH | WITHOUT } UNIQUE KEYS + RETURNING data_type FORMAT JSON ENCODING UTF8 ) + + + Constructs a JSON object of all the key/value pairs given, + or an empty object if none are given. + key_expression is a scalar expression + defining the JSON key, which is + converted to the text type. + It cannot be NULL nor can it + belong to a type that has a cast to the json type. + If WITH UNIQUE KEYS is specified, there must not + be any duplicate key_expression. + Any pair for which the value_expression + evaluates to NULL is omitted from the output + if ABSENT ON NULL is specified; + if NULL ON NULL is specified or the clause + omitted, the key is included with value NULL. + + + json_object('code' VALUE 'P123', 'title': 'Jaws') + {"code" : "P123", "title" : "Jaws"} + + + + + + + json_object + + json_object ( text[] ) + json + + + + jsonb_object + + jsonb_object ( text[] ) + jsonb + + + Builds a JSON object out of a text array. The array must have either + exactly one dimension with an even number of members, in which case + they are taken as alternating key/value pairs, or two dimensions + such that each inner array has exactly two elements, which + are taken as a key/value pair. All values are converted to JSON + strings. + + + json_object('{a, 1, b, "def", c, 3.5}') + {"a" : "1", "b" : "def", "c" : "3.5"} + + json_object('{{a, 1}, {b, "def"}, {c, 3.5}}') + {"a" : "1", "b" : "def", "c" : "3.5"} + + + + + + json_object ( keys text[], values text[] ) + json + + + jsonb_object ( keys text[], values text[] ) + jsonb + + + This form of json_object takes keys and values + pairwise from separate text arrays. Otherwise it is identical to + the one-argument form. + + + json_object('{a,b}', '{1,2}') + {"a": "1", "b": "2"} + + + + + + json constructor + json ( + expression + FORMAT JSON ENCODING UTF8 + { WITH | WITHOUT } UNIQUE KEYS ) + json + + + Converts a given expression specified as text or + bytea string (in UTF8 encoding) into a JSON + value. If expression is NULL, an + SQL null value is returned. + If WITH UNIQUE is specified, the + expression must not contain any duplicate + object keys. + + + json('{"a":123, "b":[true,"foo"], "a":"bar"}') + {"a":123, "b":[true,"foo"], "a":"bar"} + + + + + + + json_scalar + json_scalar ( expression ) + + + Converts a given SQL scalar value into a JSON scalar value. + If the input is NULL, an SQL null is returned. If + the input is number or a boolean value, a corresponding JSON number + or boolean value is returned. For any other value, a JSON string is + returned. + + + json_scalar(123.45) + 123.45 + + + json_scalar(CURRENT_TIMESTAMP) + "2022-05-10T10:51:04.62128-04:00" + + + + + + json_serialize ( + expression FORMAT JSON ENCODING UTF8 + RETURNING data_type FORMAT JSON ENCODING UTF8 ) + + + Converts an SQL/JSON expression into a character or binary string. The + expression can be of any JSON type, any + character string type, or bytea in UTF8 encoding. + The returned type used in RETURNING can be any + character string type or bytea. The default is + text. + + + json_serialize('{ "a" : 1 } ' RETURNING bytea) + \x7b20226122203a2031207d20 + + + + +
+ + + details SQL/JSON + facilities for testing JSON. + + + + SQL/JSON Testing Functions + + + + + Function signature + + + Description + + + Example(s) + + + + + + + IS JSON + expression IS NOT JSON + { VALUE | SCALAR | ARRAY | OBJECT } + { WITH | WITHOUT } UNIQUE KEYS + + + This predicate tests whether expression can be + parsed as JSON, possibly of a specified type. + If SCALAR or ARRAY or + OBJECT is specified, the + test is whether or not the JSON is of that particular type. If + WITH UNIQUE KEYS is specified, then any object in the + expression is also tested to see if it + has duplicate keys. + + + +SELECT js, + js IS JSON "json?", + js IS JSON SCALAR "scalar?", + js IS JSON OBJECT "object?", + js IS JSON ARRAY "array?" +FROM (VALUES + ('123'), ('"abc"'), ('{"a": "b"}'), ('[1,2]'),('abc')) foo(js); + js | json? | scalar? | object? | array? +------------+-------+---------+---------+-------- + 123 | t | t | f | f + "abc" | t | t | f | f + {"a": "b"} | t | f | t | f + [1,2] | t | f | f | t + abc | f | f | f | f + + + + +SELECT js, + js IS JSON OBJECT "object?", + js IS JSON ARRAY "array?", + js IS JSON ARRAY WITH UNIQUE KEYS "array w. UK?", + js IS JSON ARRAY WITHOUT UNIQUE KEYS "array w/o UK?" +FROM (VALUES ('[{"a":"1"}, + {"b":"2","b":"3"}]')) foo(js); +-[ RECORD 1 ]-+-------------------- +js | [{"a":"1"}, + + | {"b":"2","b":"3"}] +object? | f +array? | t +array w. UK? | f +array w/o UK? | t + + + + + +
+ + + shows the functions that + are available for processing json and jsonb values. + + + + JSON Processing Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + json_array_elements + + json_array_elements ( json ) + setof json + + + + jsonb_array_elements + + jsonb_array_elements ( jsonb ) + setof jsonb + + + Expands the top-level JSON array into a set of JSON values. + + + select * from json_array_elements('[1,true, [2,false]]') + + + value +----------- + 1 + true + [2,false] + + + + + + + + json_array_elements_text + + json_array_elements_text ( json ) + setof text + + + + jsonb_array_elements_text + + jsonb_array_elements_text ( jsonb ) + setof text + + + Expands the top-level JSON array into a set of text values. + + + select * from json_array_elements_text('["foo", "bar"]') + + + value +----------- + foo + bar + + + + + + + + json_array_length + + json_array_length ( json ) + integer + + + + jsonb_array_length + + jsonb_array_length ( jsonb ) + integer + + + Returns the number of elements in the top-level JSON array. + + + json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]') + 5 + + + jsonb_array_length('[]') + 0 + + + + + + + json_each + + json_each ( json ) + setof record + ( key text, + value json ) + + + + jsonb_each + + jsonb_each ( jsonb ) + setof record + ( key text, + value jsonb ) + + + Expands the top-level JSON object into a set of key/value pairs. + + + select * from json_each('{"a":"foo", "b":"bar"}') + + + key | value +-----+------- + a | "foo" + b | "bar" + + + + + + + + json_each_text + + json_each_text ( json ) + setof record + ( key text, + value text ) + + + + jsonb_each_text + + jsonb_each_text ( jsonb ) + setof record + ( key text, + value text ) + + + Expands the top-level JSON object into a set of key/value pairs. + The returned values will be of + type text. + + + select * from json_each_text('{"a":"foo", "b":"bar"}') + + + key | value +-----+------- + a | foo + b | bar + + + + + + + + json_extract_path + + json_extract_path ( from_json json, VARIADIC path_elems text[] ) + json + + + + jsonb_extract_path + + jsonb_extract_path ( from_json jsonb, VARIADIC path_elems text[] ) + jsonb + + + Extracts JSON sub-object at the specified path. + (This is functionally equivalent to the #> + operator, but writing the path out as a variadic list can be more + convenient in some cases.) + + + json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}', 'f4', 'f6') + "foo" + + + + + + + json_extract_path_text + + json_extract_path_text ( from_json json, VARIADIC path_elems text[] ) + text + + + + jsonb_extract_path_text + + jsonb_extract_path_text ( from_json jsonb, VARIADIC path_elems text[] ) + text + + + Extracts JSON sub-object at the specified path as text. + (This is functionally equivalent to the #>> + operator.) + + + json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}', 'f4', 'f6') + foo + + + + + + + json_object_keys + + json_object_keys ( json ) + setof text + + + + jsonb_object_keys + + jsonb_object_keys ( jsonb ) + setof text + + + Returns the set of keys in the top-level JSON object. + + + select * from json_object_keys('{"f1":"abc","f2":{"f3":"a", "f4":"b"}}') + + + json_object_keys +------------------ + f1 + f2 + + + + + + + + json_populate_record + + json_populate_record ( base anyelement, from_json json ) + anyelement + + + + jsonb_populate_record + + jsonb_populate_record ( base anyelement, from_json jsonb ) + anyelement + + + Expands the top-level JSON object to a row having the composite type + of the base argument. The JSON object + is scanned for fields whose names match column names of the output row + type, and their values are inserted into those columns of the output. + (Fields that do not correspond to any output column name are ignored.) + In typical use, the value of base is just + NULL, which means that any output columns that do + not match any object field will be filled with nulls. However, + if base isn't NULL then + the values it contains will be used for unmatched columns. + + + To convert a JSON value to the SQL type of an output column, the + following rules are applied in sequence: + + + + A JSON null value is converted to an SQL null in all cases. + + + + + If the output column is of type json + or jsonb, the JSON value is just reproduced exactly. + + + + + If the output column is a composite (row) type, and the JSON value + is a JSON object, the fields of the object are converted to columns + of the output row type by recursive application of these rules. + + + + + Likewise, if the output column is an array type and the JSON value + is a JSON array, the elements of the JSON array are converted to + elements of the output array by recursive application of these + rules. + + + + + Otherwise, if the JSON value is a string, the contents of the + string are fed to the input conversion function for the column's + data type. + + + + + Otherwise, the ordinary text representation of the JSON value is + fed to the input conversion function for the column's data type. + + + + + + While the example below uses a constant JSON value, typical use would + be to reference a json or jsonb column + laterally from another table in the query's FROM + clause. Writing json_populate_record in + the FROM clause is good practice, since all of the + extracted columns are available for use without duplicate function + calls. + + + create type subrowtype as (d int, e text); + create type myrowtype as (a int, b text[], c subrowtype); + + + select * from json_populate_record(null::myrowtype, + '{"a": 1, "b": ["2", "a b"], "c": {"d": 4, "e": "a b c"}, "x": "foo"}') + + + a | b | c +---+-----------+------------- + 1 | {2,"a b"} | (4,"a b c") + + + + + + + + jsonb_populate_record_valid + + jsonb_populate_record_valid ( base anyelement, from_json json ) + boolean + + + Function for testing jsonb_populate_record. Returns + true if the input jsonb_populate_record + would finish without an error for the given input JSON object; that is, it's + valid input, false otherwise. + + + create type jsb_char2 as (a char(2)); + + + select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aaa"}'); + + + jsonb_populate_record_valid +----------------------------- + f +(1 row) + + + select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aaa"}') q; + + +ERROR: value too long for type character(2) + + select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aa"}'); + + + jsonb_populate_record_valid +----------------------------- + t +(1 row) + + + select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aa"}') q; + + + a +---- + aa +(1 row) + + + + + + + + json_populate_recordset + + json_populate_recordset ( base anyelement, from_json json ) + setof anyelement + + + + jsonb_populate_recordset + + jsonb_populate_recordset ( base anyelement, from_json jsonb ) + setof anyelement + + + Expands the top-level JSON array of objects to a set of rows having + the composite type of the base argument. + Each element of the JSON array is processed as described above + for json[b]_populate_record. + + + create type twoints as (a int, b int); + + + select * from json_populate_recordset(null::twoints, '[{"a":1,"b":2}, {"a":3,"b":4}]') + + + a | b +---+--- + 1 | 2 + 3 | 4 + + + + + + + + json_to_record + + json_to_record ( json ) + record + + + + jsonb_to_record + + jsonb_to_record ( jsonb ) + record + + + Expands the top-level JSON object to a row having the composite type + defined by an AS clause. (As with all functions + returning record, the calling query must explicitly + define the structure of the record with an AS + clause.) The output record is filled from fields of the JSON object, + in the same way as described above + for json[b]_populate_record. Since there is no + input record value, unmatched columns are always filled with nulls. + + + create type myrowtype as (a int, b text); + + + select * from json_to_record('{"a":1,"b":[1,2,3],"c":[1,2,3],"e":"bar","r": {"a": 123, "b": "a b c"}}') as x(a int, b text, c int[], d text, r myrowtype) + + + a | b | c | d | r +---+---------+---------+---+--------------- + 1 | [1,2,3] | {1,2,3} | | (123,"a b c") + + + + + + + + json_to_recordset + + json_to_recordset ( json ) + setof record + + + + jsonb_to_recordset + + jsonb_to_recordset ( jsonb ) + setof record + + + Expands the top-level JSON array of objects to a set of rows having + the composite type defined by an AS clause. (As + with all functions returning record, the calling query + must explicitly define the structure of the record with + an AS clause.) Each element of the JSON array is + processed as described above + for json[b]_populate_record. + + + select * from json_to_recordset('[{"a":1,"b":"foo"}, {"a":"2","c":"bar"}]') as x(a int, b text) + + + a | b +---+----- + 1 | foo + 2 | + + + + + + + + jsonb_set + + jsonb_set ( target jsonb, path text[], new_value jsonb , create_if_missing boolean ) + jsonb + + + Returns target + with the item designated by path + replaced by new_value, or with + new_value added if + create_if_missing is true (which is the + default) and the item designated by path + does not exist. + All earlier steps in the path must exist, or + the target is returned unchanged. + As with the path oriented operators, negative integers that + appear in the path count from the end + of JSON arrays. + If the last path step is an array index that is out of range, + and create_if_missing is true, the new + value is added at the beginning of the array if the index is negative, + or at the end of the array if it is positive. + + + jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0,f1}', '[2,3,4]', false) + [{"f1": [2, 3, 4], "f2": null}, 2, null, 3] + + + jsonb_set('[{"f1":1,"f2":null},2]', '{0,f3}', '[2,3,4]') + [{"f1": 1, "f2": null, "f3": [2, 3, 4]}, 2] + + + + + + + jsonb_set_lax + + jsonb_set_lax ( target jsonb, path text[], new_value jsonb , create_if_missing boolean , null_value_treatment text ) + jsonb + + + If new_value is not NULL, + behaves identically to jsonb_set. Otherwise behaves + according to the value + of null_value_treatment which must be one + of 'raise_exception', + 'use_json_null', 'delete_key', or + 'return_target'. The default is + 'use_json_null'. + + + jsonb_set_lax('[{"f1":1,"f2":null},2,null,3]', '{0,f1}', null) + [{"f1": null, "f2": null}, 2, null, 3] + + + jsonb_set_lax('[{"f1":99,"f2":null},2]', '{0,f3}', null, true, 'return_target') + [{"f1": 99, "f2": null}, 2] + + + + + + + jsonb_insert + + jsonb_insert ( target jsonb, path text[], new_value jsonb , insert_after boolean ) + jsonb + + + Returns target + with new_value inserted. If the item + designated by the path is an array + element, new_value will be inserted before + that item if insert_after is false (which + is the default), or after it + if insert_after is true. If the item + designated by the path is an object + field, new_value will be inserted only if + the object does not already contain that key. + All earlier steps in the path must exist, or + the target is returned unchanged. + As with the path oriented operators, negative integers that + appear in the path count from the end + of JSON arrays. + If the last path step is an array index that is out of range, the new + value is added at the beginning of the array if the index is negative, + or at the end of the array if it is positive. + + + jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"') + {"a": [0, "new_value", 1, 2]} + + + jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"', true) + {"a": [0, 1, "new_value", 2]} + + + + + + + json_strip_nulls + + json_strip_nulls ( target json ,strip_in_arrays boolean ) + json + + + + jsonb_strip_nulls + + jsonb_strip_nulls ( target jsonb ,strip_in_arrays boolean ) + jsonb + + + Deletes all object fields that have null values from the given JSON + value, recursively. + If strip_in_arrays is true (the default is false), + null array elements are also stripped. + Otherwise they are not stripped. Bare null values are never stripped. + + + json_strip_nulls('[{"f1":1, "f2":null}, 2, null, 3]') + [{"f1":1},2,null,3] + + + jsonb_strip_nulls('[1,2,null,3,4]', true); + [1,2,3,4] + + + + + + + + jsonb_path_exists + + jsonb_path_exists ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + boolean + + + Checks whether the JSON path returns any item for the specified JSON + value. + (This is useful only with SQL-standard JSON path expressions, not + predicate check + expressions, since those always return a value.) + If the vars argument is specified, it must + be a JSON object, and its fields provide named values to be + substituted into the jsonpath expression. + If the silent argument is specified and + is true, the function suppresses the same errors + as the @? and @@ operators do. + + + jsonb_path_exists('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') + t + + + + + + + jsonb_path_match + + jsonb_path_match ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + boolean + + + Returns the SQL boolean result of a JSON path predicate check + for the specified JSON value. + (This is useful only + with predicate + check expressions, not SQL-standard JSON path expressions, + since it will either fail or return NULL if the + path result is not a single boolean value.) + The optional vars + and silent arguments act the same as + for jsonb_path_exists. + + + jsonb_path_match('{"a":[1,2,3,4,5]}', 'exists($.a[*] ? (@ >= $min && @ <= $max))', '{"min":2, "max":4}') + t + + + + + + + jsonb_path_query + + jsonb_path_query ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + setof jsonb + + + Returns all JSON items returned by the JSON path for the specified + JSON value. + For SQL-standard JSON path expressions it returns the JSON + values selected from target. + For predicate + check expressions it returns the result of the predicate + check: true, false, + or null. + The optional vars + and silent arguments act the same as + for jsonb_path_exists. + + + select * from jsonb_path_query('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') + + + jsonb_path_query +------------------ + 2 + 3 + 4 + + + + + + + + jsonb_path_query_array + + jsonb_path_query_array ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + jsonb + + + Returns all JSON items returned by the JSON path for the specified + JSON value, as a JSON array. + The parameters are the same as + for jsonb_path_query. + + + jsonb_path_query_array('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') + [2, 3, 4] + + + + + + + jsonb_path_query_first + + jsonb_path_query_first ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + jsonb + + + Returns the first JSON item returned by the JSON path for the + specified JSON value, or NULL if there are no + results. + The parameters are the same as + for jsonb_path_query. + + + jsonb_path_query_first('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') + 2 + + + + + + + jsonb_path_exists_tz + + jsonb_path_exists_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + boolean + + + + jsonb_path_match_tz + + jsonb_path_match_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + boolean + + + + jsonb_path_query_tz + + jsonb_path_query_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + setof jsonb + + + + jsonb_path_query_array_tz + + jsonb_path_query_array_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + jsonb + + + + jsonb_path_query_first_tz + + jsonb_path_query_first_tz ( target jsonb, path jsonpath , vars jsonb , silent boolean ) + jsonb + + + These functions act like their counterparts described above without + the _tz suffix, except that these functions support + comparisons of date/time values that require timezone-aware + conversions. The example below requires interpretation of the + date-only value 2015-08-02 as a timestamp with time + zone, so the result depends on the current + setting. Due to this dependency, these + functions are marked as stable, which means these functions cannot be + used in indexes. Their counterparts are immutable, and so can be used + in indexes; but they will throw errors if asked to make such + comparisons. + + + jsonb_path_exists_tz('["2015-08-01 12:00:00-05"]', '$[*] ? (@.datetime() < "2015-08-02".datetime())') + t + + + + + + + jsonb_pretty + + jsonb_pretty ( jsonb ) + text + + + Converts the given JSON value to pretty-printed, indented text. + + + jsonb_pretty('[{"f1":1,"f2":null}, 2]') + + +[ + { + "f1": 1, + "f2": null + }, + 2 +] + + + + + + + + json_typeof + + json_typeof ( json ) + text + + + + jsonb_typeof + + jsonb_typeof ( jsonb ) + text + + + Returns the type of the top-level JSON value as a text string. + Possible types are + object, array, + string, number, + boolean, and null. + (The null result should not be confused + with an SQL NULL; see the examples.) + + + json_typeof('-123.4') + number + + + json_typeof('null'::json) + null + + + json_typeof(NULL::json) IS NULL + t + + + + +
+
+ + + The SQL/JSON Path Language + + + SQL/JSON path language + + + + SQL/JSON path expressions specify item(s) to be retrieved + from a JSON value, similarly to XPath expressions used + for access to XML content. In PostgreSQL, + path expressions are implemented as the jsonpath + data type and can use any elements described in + . + + + + JSON query functions and operators + pass the provided path expression to the path engine + for evaluation. If the expression matches the queried JSON data, + the corresponding JSON item, or set of items, is returned. + If there is no match, the result will be NULL, + false, or an error, depending on the function. + Path expressions are written in the SQL/JSON path language + and can include arithmetic expressions and functions. + + + + A path expression consists of a sequence of elements allowed + by the jsonpath data type. + The path expression is normally evaluated from left to right, but + you can use parentheses to change the order of operations. + If the evaluation is successful, a sequence of JSON items is produced, + and the evaluation result is returned to the JSON query function + that completes the specified computation. + + + + To refer to the JSON value being queried (the + context item), use the $ variable + in the path expression. The first element of a path must always + be $. It can be followed by one or more + accessor operators, + which go down the JSON structure level by level to retrieve sub-items + of the context item. Each accessor operator acts on the + result(s) of the previous evaluation step, producing zero, one, or more + output items from each input item. + + + + For example, suppose you have some JSON data from a GPS tracker that you + would like to parse, such as: + +SELECT '{ + "track": { + "segments": [ + { + "location": [ 47.763, 13.4034 ], + "start time": "2018-10-14 10:05:14", + "HR": 73 + }, + { + "location": [ 47.706, 13.2635 ], + "start time": "2018-10-14 10:39:21", + "HR": 135 + } + ] + } +}' AS json \gset + + (The above example can be copied-and-pasted + into psql to set things up for the following + examples. Then psql will + expand :'json' into a suitably-quoted string + constant containing the JSON value.) + + + + To retrieve the available track segments, you need to use the + .key accessor + operator to descend through surrounding JSON objects, for example: + +=> select jsonb_path_query(:'json', '$.track.segments'); + jsonb_path_query +-----------------------------------------------------------&zwsp;-----------------------------------------------------------&zwsp;--------------------------------------------- + [{"HR": 73, "location": [47.763, 13.4034], "start time": "2018-10-14 10:05:14"}, {"HR": 135, "location": [47.706, 13.2635], "start time": "2018-10-14 10:39:21"}] + + + + + To retrieve the contents of an array, you typically use the + [*] operator. + The following example will return the location coordinates for all + the available track segments: + +=> select jsonb_path_query(:'json', '$.track.segments[*].location'); + jsonb_path_query +------------------- + [47.763, 13.4034] + [47.706, 13.2635] + + Here we started with the whole JSON input value ($), + then the .track accessor selected the JSON object + associated with the "track" object key, then + the .segments accessor selected the JSON array + associated with the "segments" key within that + object, then the [*] accessor selected each element + of that array (producing a series of items), then + the .location accessor selected the JSON array + associated with the "location" key within each of + those objects. In this example, each of those objects had + a "location" key; but if any of them did not, + the .location accessor would have simply produced no + output for that input item. + + + + To return the coordinates of the first segment only, you can + specify the corresponding subscript in the [] + accessor operator. Recall that JSON array indexes are 0-relative: + +=> select jsonb_path_query(:'json', '$.track.segments[0].location'); + jsonb_path_query +------------------- + [47.763, 13.4034] + + + + + The result of each path evaluation step can be processed + by one or more of the jsonpath operators and methods + listed in . + Each method name must be preceded by a dot. For example, + you can get the size of an array: + +=> select jsonb_path_query(:'json', '$.track.segments.size()'); + jsonb_path_query +------------------ + 2 + + More examples of using jsonpath operators + and methods within path expressions appear below in + . + + + + A path can also contain + filter expressions that work similarly to the + WHERE clause in SQL. A filter expression begins with + a question mark and provides a condition in parentheses: + + +? (condition) + + + + + Filter expressions must be written just after the path evaluation step + to which they should apply. The result of that step is filtered to include + only those items that satisfy the provided condition. SQL/JSON defines + three-valued logic, so the condition can + produce true, false, + or unknown. The unknown value + plays the same role as SQL NULL and can be tested + for with the is unknown predicate. Further path + evaluation steps use only those items for which the filter expression + returned true. + + + + The functions and operators that can be used in filter expressions are + listed in . Within a + filter expression, the @ variable denotes the value + being considered (i.e., one result of the preceding path step). You can + write accessor operators after @ to retrieve component + items. + + + + For example, suppose you would like to retrieve all heart rate values higher + than 130. You can achieve this as follows: + +=> select jsonb_path_query(:'json', '$.track.segments[*].HR ? (@ > 130)'); + jsonb_path_query +------------------ + 135 + + + + + To get the start times of segments with such values, you have to + filter out irrelevant segments before selecting the start times, so the + filter expression is applied to the previous step, and the path used + in the condition is different: + +=> select jsonb_path_query(:'json', '$.track.segments[*] ? (@.HR > 130)."start time"'); + jsonb_path_query +----------------------- + "2018-10-14 10:39:21" + + + + + You can use several filter expressions in sequence, if required. + The following example selects start times of all segments that + contain locations with relevant coordinates and high heart rate values: + +=> select jsonb_path_query(:'json', '$.track.segments[*] ? (@.location[1] < 13.4) ? (@.HR > 130)."start time"'); + jsonb_path_query +----------------------- + "2018-10-14 10:39:21" + + + + + Using filter expressions at different nesting levels is also allowed. + The following example first filters all segments by location, and then + returns high heart rate values for these segments, if available: + +=> select jsonb_path_query(:'json', '$.track.segments[*] ? (@.location[1] < 13.4).HR ? (@ > 130)'); + jsonb_path_query +------------------ + 135 + + + + + You can also nest filter expressions within each other. + This example returns the size of the track if it contains any + segments with high heart rate values, or an empty sequence otherwise: + +=> select jsonb_path_query(:'json', '$.track ? (exists(@.segments[*] ? (@.HR > 130))).segments.size()'); + jsonb_path_query +------------------ + 2 + + + + + Deviations from the SQL Standard + + PostgreSQL's implementation of the SQL/JSON path + language has the following deviations from the SQL/JSON standard. + + + + Boolean Predicate Check Expressions + + As an extension to the SQL standard, + a PostgreSQL path expression can be a + Boolean predicate, whereas the SQL standard allows predicates only within + filters. While SQL-standard path expressions return the relevant + element(s) of the queried JSON value, predicate check expressions + return the single three-valued jsonb result of the + predicate: true, + false, or null. + For example, we could write this SQL-standard filter expression: + +=> select jsonb_path_query(:'json', '$.track.segments ?(@[*].HR > 130)'); + jsonb_path_query +-----------------------------------------------------------&zwsp;---------------------- + {"HR": 135, "location": [47.706, 13.2635], "start time": "2018-10-14 10:39:21"} + + The similar predicate check expression simply + returns true, indicating that a match exists: + +=> select jsonb_path_query(:'json', '$.track.segments[*].HR > 130'); + jsonb_path_query +------------------ + true + + + + + + Predicate check expressions are required in the + @@ operator (and the + jsonb_path_match function), and should not be used + with the @? operator (or the + jsonb_path_exists function). + + + + + + Regular Expression Interpretation + + There are minor differences in the interpretation of regular + expression patterns used in like_regex filters, as + described in . + + + + + + Strict and Lax Modes + + When you query JSON data, the path expression may not match the + actual JSON data structure. An attempt to access a non-existent + member of an object or element of an array is defined as a + structural error. SQL/JSON path expressions have two modes + of handling structural errors: + + + + + + lax (default) — the path engine implicitly adapts + the queried data to the specified path. + Any structural errors that cannot be fixed as described below + are suppressed, producing no match. + + + + + strict — if a structural error occurs, an error is raised. + + + + + + Lax mode facilitates matching of a JSON document and path + expression when the JSON data does not conform to the expected schema. + If an operand does not match the requirements of a particular operation, + it can be automatically wrapped as an SQL/JSON array, or unwrapped by + converting its elements into an SQL/JSON sequence before performing + the operation. Also, comparison operators automatically unwrap their + operands in lax mode, so you can compare SQL/JSON arrays + out-of-the-box. An array of size 1 is considered equal to its sole element. + Automatic unwrapping is not performed when: + + + + The path expression contains type() or + size() methods that return the type + and the number of elements in the array, respectively. + + + + + The queried JSON data contain nested arrays. In this case, only + the outermost array is unwrapped, while all the inner arrays + remain unchanged. Thus, implicit unwrapping can only go one + level down within each path evaluation step. + + + + + + + For example, when querying the GPS data listed above, you can + abstract from the fact that it stores an array of segments + when using lax mode: + +=> select jsonb_path_query(:'json', 'lax $.track.segments.location'); + jsonb_path_query +------------------- + [47.763, 13.4034] + [47.706, 13.2635] + + + + + In strict mode, the specified path must exactly match the structure of + the queried JSON document, so using this path + expression will cause an error: + +=> select jsonb_path_query(:'json', 'strict $.track.segments.location'); +ERROR: jsonpath member accessor can only be applied to an object + + To get the same result as in lax mode, you have to explicitly unwrap the + segments array: + +=> select jsonb_path_query(:'json', 'strict $.track.segments[*].location'); + jsonb_path_query +------------------- + [47.763, 13.4034] + [47.706, 13.2635] + + + + + The unwrapping behavior of lax mode can lead to surprising results. For + instance, the following query using the .** accessor + selects every HR value twice: + +=> select jsonb_path_query(:'json', 'lax $.**.HR'); + jsonb_path_query +------------------ + 73 + 135 + 73 + 135 + + This happens because the .** accessor selects both + the segments array and each of its elements, while + the .HR accessor automatically unwraps arrays when + using lax mode. To avoid surprising results, we recommend using + the .** accessor only in strict mode. The + following query selects each HR value just once: + +=> select jsonb_path_query(:'json', 'strict $.**.HR'); + jsonb_path_query +------------------ + 73 + 135 + + + + + The unwrapping of arrays can also lead to unexpected results. Consider this + example, which selects all the location arrays: + +=> select jsonb_path_query(:'json', 'lax $.track.segments[*].location'); + jsonb_path_query +------------------- + [47.763, 13.4034] + [47.706, 13.2635] +(2 rows) + + As expected it returns the full arrays. But applying a filter expression + causes the arrays to be unwrapped to evaluate each item, returning only the + items that match the expression: + +=> select jsonb_path_query(:'json', 'lax $.track.segments[*].location ?(@[*] > 15)'); + jsonb_path_query +------------------ + 47.763 + 47.706 +(2 rows) + + This despite the fact that the full arrays are selected by the path + expression. Use strict mode to restore selecting the arrays: + +=> select jsonb_path_query(:'json', 'strict $.track.segments[*].location ?(@[*] > 15)'); + jsonb_path_query +------------------- + [47.763, 13.4034] + [47.706, 13.2635] +(2 rows) + + + + + + SQL/JSON Path Operators and Methods + + + shows the operators and + methods available in jsonpath. Note that while the unary + operators and methods can be applied to multiple values resulting from a + preceding path step, the binary operators (addition etc.) can only be + applied to single values. In lax mode, methods applied to an array will be + executed for each value in the array. The exceptions are + .type() and .size(), which apply to + the array itself. + + + + <type>jsonpath</type> Operators and Methods + + + + + Operator/Method + + + Description + + + Example(s) + + + + + + + + number + number + number + + + Addition + + + jsonb_path_query('[2]', '$[0] + 3') + 5 + + + + + + + number + number + + + Unary plus (no operation); unlike addition, this can iterate over + multiple values + + + jsonb_path_query_array('{"x": [2,3,4]}', '+ $.x') + [2, 3, 4] + + + + + + number - number + number + + + Subtraction + + + jsonb_path_query('[2]', '7 - $[0]') + 5 + + + + + + - number + number + + + Negation; unlike subtraction, this can iterate over + multiple values + + + jsonb_path_query_array('{"x": [2,3,4]}', '- $.x') + [-2, -3, -4] + + + + + + number * number + number + + + Multiplication + + + jsonb_path_query('[4]', '2 * $[0]') + 8 + + + + + + number / number + number + + + Division + + + jsonb_path_query('[8.5]', '$[0] / 2') + 4.2500000000000000 + + + + + + number % number + number + + + Modulo (remainder) + + + jsonb_path_query('[32]', '$[0] % 10') + 2 + + + + + + value . type() + string + + + Type of the JSON item (see json_typeof) + + + jsonb_path_query_array('[1, "2", {}]', '$[*].type()') + ["number", "string", "object"] + + + + + + value . size() + number + + + Size of the JSON item (number of array elements, or 1 if not an + array) + + + jsonb_path_query('{"m": [11, 15]}', '$.m.size()') + 2 + + + + + + value . boolean() + boolean + + + Boolean value converted from a JSON boolean, number, or string + + + jsonb_path_query_array('[1, "yes", false]', '$[*].boolean()') + [true, true, false] + + + + + + value . string() + string + + + String value converted from a JSON boolean, number, string, or + datetime + + + jsonb_path_query_array('[1.23, "xyz", false]', '$[*].string()') + ["1.23", "xyz", "false"] + + + jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp().string()') + "2023-08-15T12:34:56" + + + + + + value . double() + number + + + Approximate floating-point number converted from a JSON number or + string + + + jsonb_path_query('{"len": "1.9"}', '$.len.double() * 2') + 3.8 + + + + + + number . ceiling() + number + + + Nearest integer greater than or equal to the given number + + + jsonb_path_query('{"h": 1.3}', '$.h.ceiling()') + 2 + + + + + + number . floor() + number + + + Nearest integer less than or equal to the given number + + + jsonb_path_query('{"h": 1.7}', '$.h.floor()') + 1 + + + + + + number . abs() + number + + + Absolute value of the given number + + + jsonb_path_query('{"z": -0.3}', '$.z.abs()') + 0.3 + + + + + + value . bigint() + bigint + + + Big integer value converted from a JSON number or string + + + jsonb_path_query('{"len": "9876543219"}', '$.len.bigint()') + 9876543219 + + + + + + value . decimal( [ precision [ , scale ] ] ) + decimal + + + Rounded decimal value converted from a JSON number or string + (precision and scale must be + integer values) + + + jsonb_path_query('1234.5678', '$.decimal(6, 2)') + 1234.57 + + + + + + value . integer() + integer + + + Integer value converted from a JSON number or string + + + jsonb_path_query('{"len": "12345"}', '$.len.integer()') + 12345 + + + + + + value . number() + numeric + + + Numeric value converted from a JSON number or string + + + jsonb_path_query('{"len": "123.45"}', '$.len.number()') + 123.45 + + + + + + string . datetime() + datetime_type + (see note) + + + Date/time value converted from a string + + + jsonb_path_query('["2015-8-1", "2015-08-12"]', '$[*] ? (@.datetime() < "2015-08-2".datetime())') + "2015-8-1" + + + + + + string . datetime(template) + datetime_type + (see note) + + + Date/time value converted from a string using the + specified to_timestamp template + + + jsonb_path_query_array('["12:30", "18:40"]', '$[*].datetime("HH24:MI")') + ["12:30:00", "18:40:00"] + + + + + + string . date() + date + + + Date value converted from a string + + + jsonb_path_query('"2023-08-15"', '$.date()') + "2023-08-15" + + + + + + string . time() + time without time zone + + + Time without time zone value converted from a string + + + jsonb_path_query('"12:34:56"', '$.time()') + "12:34:56" + + + + + + string . time(precision) + time without time zone + + + Time without time zone value converted from a string, with fractional + seconds adjusted to the given precision + + + jsonb_path_query('"12:34:56.789"', '$.time(2)') + "12:34:56.79" + + + + + + string . time_tz() + time with time zone + + + Time with time zone value converted from a string + + + jsonb_path_query('"12:34:56 +05:30"', '$.time_tz()') + "12:34:56+05:30" + + + + + + string . time_tz(precision) + time with time zone + + + Time with time zone value converted from a string, with fractional + seconds adjusted to the given precision + + + jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(2)') + "12:34:56.79+05:30" + + + + + + string . timestamp() + timestamp without time zone + + + Timestamp without time zone value converted from a string + + + jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp()') + "2023-08-15T12:34:56" + + + + + + string . timestamp(precision) + timestamp without time zone + + + Timestamp without time zone value converted from a string, with + fractional seconds adjusted to the given precision + + + jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(2)') + "2023-08-15T12:34:56.79" + + + + + + string . timestamp_tz() + timestamp with time zone + + + Timestamp with time zone value converted from a string + + + jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()') + "2023-08-15T12:34:56+05:30" + + + + + + string . timestamp_tz(precision) + timestamp with time zone + + + Timestamp with time zone value converted from a string, with fractional + seconds adjusted to the given precision + + + jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(2)') + "2023-08-15T12:34:56.79+05:30" + + + + + + object . keyvalue() + array + + + The object's key-value pairs, represented as an array of objects + containing three fields: "key", + "value", and "id"; + "id" is a unique identifier of the object the + key-value pair belongs to + + + jsonb_path_query_array('{"x": "20", "y": 32}', '$.keyvalue()') + [{"id": 0, "key": "x", "value": "20"}, {"id": 0, "key": "y", "value": 32}] + + + + +
+ + + + The result type of the datetime() and + datetime(template) + methods can be date, timetz, time, + timestamptz, or timestamp. + Both methods determine their result type dynamically. + + + The datetime() method sequentially tries to + match its input string to the ISO formats + for date, timetz, time, + timestamptz, and timestamp. It stops on + the first matching format and emits the corresponding data type. + + + The datetime(template) + method determines the result type according to the fields used in the + provided template string. + + + The datetime() and + datetime(template) methods + use the same parsing rules as the to_timestamp SQL + function does (see ), with three + exceptions. First, these methods don't allow unmatched template + patterns. Second, only the following separators are allowed in the + template string: minus sign, period, solidus (slash), comma, apostrophe, + semicolon, colon and space. Third, separators in the template string + must exactly match the input string. + + + If different date/time types need to be compared, an implicit cast is + applied. A date value can be cast to timestamp + or timestamptz, timestamp can be cast to + timestamptz, and time to timetz. + However, all but the first of these conversions depend on the current + setting, and thus can only be performed + within timezone-aware jsonpath functions. Similarly, other + date/time-related methods that convert strings to date/time types + also do this casting, which may involve the current + setting. Therefore, these conversions can + also only be performed within timezone-aware jsonpath + functions. + + + + + shows the available + filter expression elements. + + + + <type>jsonpath</type> Filter Expression Elements + + + + + Predicate/Value + + + Description + + + Example(s) + + + + + + + + value == value + boolean + + + Equality comparison (this, and the other comparison operators, work on + all JSON scalar values) + + + jsonb_path_query_array('[1, "a", 1, 3]', '$[*] ? (@ == 1)') + [1, 1] + + + jsonb_path_query_array('[1, "a", 1, 3]', '$[*] ? (@ == "a")') + ["a"] + + + + + + value != value + boolean + + + value <> value + boolean + + + Non-equality comparison + + + jsonb_path_query_array('[1, 2, 1, 3]', '$[*] ? (@ != 1)') + [2, 3] + + + jsonb_path_query_array('["a", "b", "c"]', '$[*] ? (@ <> "b")') + ["a", "c"] + + + + + + value < value + boolean + + + Less-than comparison + + + jsonb_path_query_array('[1, 2, 3]', '$[*] ? (@ < 2)') + [1] + + + + + + value <= value + boolean + + + Less-than-or-equal-to comparison + + + jsonb_path_query_array('["a", "b", "c"]', '$[*] ? (@ <= "b")') + ["a", "b"] + + + + + + value > value + boolean + + + Greater-than comparison + + + jsonb_path_query_array('[1, 2, 3]', '$[*] ? (@ > 2)') + [3] + + + + + + value >= value + boolean + + + Greater-than-or-equal-to comparison + + + jsonb_path_query_array('[1, 2, 3]', '$[*] ? (@ >= 2)') + [2, 3] + + + + + + true + boolean + + + JSON constant true + + + jsonb_path_query('[{"name": "John", "parent": false}, {"name": "Chris", "parent": true}]', '$[*] ? (@.parent == true)') + {"name": "Chris", "parent": true} + + + + + + false + boolean + + + JSON constant false + + + jsonb_path_query('[{"name": "John", "parent": false}, {"name": "Chris", "parent": true}]', '$[*] ? (@.parent == false)') + {"name": "John", "parent": false} + + + + + + null + value + + + JSON constant null (note that, unlike in SQL, + comparison to null works normally) + + + jsonb_path_query('[{"name": "Mary", "job": null}, {"name": "Michael", "job": "driver"}]', '$[*] ? (@.job == null) .name') + "Mary" + + + + + + boolean && boolean + boolean + + + Boolean AND + + + jsonb_path_query('[1, 3, 7]', '$[*] ? (@ > 1 && @ < 5)') + 3 + + + + + + boolean || boolean + boolean + + + Boolean OR + + + jsonb_path_query('[1, 3, 7]', '$[*] ? (@ < 1 || @ > 5)') + 7 + + + + + + ! boolean + boolean + + + Boolean NOT + + + jsonb_path_query('[1, 3, 7]', '$[*] ? (!(@ < 5))') + 7 + + + + + + boolean is unknown + boolean + + + Tests whether a Boolean condition is unknown. + + + jsonb_path_query('[-1, 2, 7, "foo"]', '$[*] ? ((@ > 0) is unknown)') + "foo" + + + + + + string like_regex string flag string + boolean + + + Tests whether the first operand matches the regular expression + given by the second operand, optionally with modifications + described by a string of flag characters (see + ). + + + jsonb_path_query_array('["abc", "abd", "aBdC", "abdacb", "babc"]', '$[*] ? (@ like_regex "^ab.*c")') + ["abc", "abdacb"] + + + jsonb_path_query_array('["abc", "abd", "aBdC", "abdacb", "babc"]', '$[*] ? (@ like_regex "^ab.*c" flag "i")') + ["abc", "aBdC", "abdacb"] + + + + + + string starts with string + boolean + + + Tests whether the second operand is an initial substring of the first + operand. + + + jsonb_path_query('["John Smith", "Mary Stone", "Bob Johnson"]', '$[*] ? (@ starts with "John")') + "John Smith" + + + + + + exists ( path_expression ) + boolean + + + Tests whether a path expression matches at least one SQL/JSON item. + Returns unknown if the path expression would result + in an error; the second example uses this to avoid a no-such-key error + in strict mode. + + + jsonb_path_query('{"x": [1, 2], "y": [2, 4]}', 'strict $.* ? (exists (@ ? (@[*] > 2)))') + [2, 4] + + + jsonb_path_query_array('{"value": 41}', 'strict $ ? (exists (@.name)) .name') + [] + + + + +
+ +
+ + + SQL/JSON Regular Expressions + + + LIKE_REGEX + in SQL/JSON + + + + SQL/JSON path expressions allow matching text to a regular expression + with the like_regex filter. For example, the + following SQL/JSON path query would case-insensitively match all + strings in an array that start with an English vowel: + +$[*] ? (@ like_regex "^[aeiou]" flag "i") + + + + + The optional flag string may include one or more of + the characters + i for case-insensitive match, + m to allow ^ + and $ to match at newlines, + s to allow . to match a newline, + and q to quote the whole pattern (reducing the + behavior to a simple substring match). + + + + The SQL/JSON standard borrows its definition for regular expressions + from the LIKE_REGEX operator, which in turn uses the + XQuery standard. PostgreSQL does not currently support the + LIKE_REGEX operator. Therefore, + the like_regex filter is implemented using the + POSIX regular expression engine described in + . This leads to various minor + discrepancies from standard SQL/JSON behavior, which are cataloged in + . + Note, however, that the flag-letter incompatibilities described there + do not apply to SQL/JSON, as it translates the XQuery flag letters to + match what the POSIX engine expects. + + + + Keep in mind that the pattern argument of like_regex + is a JSON path string literal, written according to the rules given in + . This means in particular that any + backslashes you want to use in the regular expression must be doubled. + For example, to match string values of the root document that contain + only digits: + +$.* ? (@ like_regex "^\\d+$") + + + +
+ + + SQL/JSON Query Functions + + SQL/JSON functions JSON_EXISTS(), + JSON_QUERY(), and JSON_VALUE() + described in can be used + to query JSON documents. Each of these functions apply a + path_expression (an SQL/JSON path query) to a + context_item (the document). See + for more details on what + the path_expression can contain. The + path_expression can also reference variables, + whose values are specified with their respective names in the + PASSING clause that is supported by each function. + context_item can be a jsonb value + or a character string that can be successfully cast to jsonb. + + + + SQL/JSON Query Functions + + + + + Function signature + + + Description + + + Example(s) + + + + + + + json_exists + +JSON_EXISTS ( +context_item, path_expression + PASSING { value AS varname } , ... +{ TRUE | FALSE | UNKNOWN | ERROR } ON ERROR ) boolean + + + + + + Returns true if the SQL/JSON path_expression + applied to the context_item yields any + items, false otherwise. + + + + + The ON ERROR clause specifies the behavior if + an error occurs during path_expression + evaluation. Specifying ERROR will cause an error to + be thrown with the appropriate message. Other options include + returning boolean values FALSE or + TRUE or the value UNKNOWN which + is actually an SQL NULL. The default when no ON ERROR + clause is specified is to return the boolean value + FALSE. + + + + + Examples: + + + JSON_EXISTS(jsonb '{"key1": [1,2,3]}', 'strict $.key1[*] ? (@ > $x)' PASSING 2 AS x) + t + + + JSON_EXISTS(jsonb '{"a": [1,2,3]}', 'lax $.a[5]' ERROR ON ERROR) + f + + + JSON_EXISTS(jsonb '{"a": [1,2,3]}', 'strict $.a[5]' ERROR ON ERROR) + + +ERROR: jsonpath array subscript is out of bounds + + + + + + json_query + +JSON_QUERY ( +context_item, path_expression + PASSING { value AS varname } , ... + RETURNING data_type FORMAT JSON ENCODING UTF8 + { WITHOUT | WITH { CONDITIONAL | UNCONDITIONAL } } ARRAY WRAPPER + { KEEP | OMIT } QUOTES ON SCALAR STRING + { ERROR | NULL | EMPTY { ARRAY | OBJECT } | DEFAULT expression } ON EMPTY + { ERROR | NULL | EMPTY { ARRAY | OBJECT } | DEFAULT expression } ON ERROR ) jsonb + + + + + + Returns the result of applying the SQL/JSON + path_expression to the + context_item. + + + + + By default, the result is returned as a value of type jsonb, + though the RETURNING clause can be used to return + as some other type to which it can be successfully coerced. + + + + + If the path expression may return multiple values, it might be necessary + to wrap those values using the WITH WRAPPER clause to + make it a valid JSON string, because the default behavior is to not wrap + them, as if WITHOUT WRAPPER were specified. The + WITH WRAPPER clause is by default taken to mean + WITH UNCONDITIONAL WRAPPER, which means that even a + single result value will be wrapped. To apply the wrapper only when + multiple values are present, specify WITH CONDITIONAL WRAPPER. + Getting multiple values in result will be treated as an error if + WITHOUT WRAPPER is specified. + + + + + If the result is a scalar string, by default, the returned value will + be surrounded by quotes, making it a valid JSON value. It can be made + explicit by specifying KEEP QUOTES. Conversely, + quotes can be omitted by specifying OMIT QUOTES. + To ensure that the result is a valid JSON value, OMIT QUOTES + cannot be specified when WITH WRAPPER is also + specified. + + + + + The ON EMPTY clause specifies the behavior if + evaluating path_expression yields an empty + set. The ON ERROR clause specifies the behavior + if an error occurs when evaluating path_expression, + when coercing the result value to the RETURNING type, + or when evaluating the ON EMPTY expression if the + path_expression evaluation returns an empty + set. + + + + + For both ON EMPTY and ON ERROR, + specifying ERROR will cause an error to be thrown with + the appropriate message. Other options include returning an SQL NULL, an + empty array (EMPTY ARRAY), + an empty object (EMPTY OBJECT), or a user-specified + expression (DEFAULT expression) + that can be coerced to jsonb or the type specified in RETURNING. + The default when ON EMPTY or ON ERROR + is not specified is to return an SQL NULL value. + + + + + Examples: + + + JSON_QUERY(jsonb '[1,[2,3],null]', 'lax $[*][$off]' PASSING 1 AS off WITH CONDITIONAL WRAPPER) + 3 + + + JSON_QUERY(jsonb '{"a": "[1, 2]"}', 'lax $.a' OMIT QUOTES) + [1, 2] + + + JSON_QUERY(jsonb '{"a": "[1, 2]"}', 'lax $.a' RETURNING int[] OMIT QUOTES ERROR ON ERROR) + + +ERROR: malformed array literal: "[1, 2]" +DETAIL: Missing "]" after array dimensions. + + + + + + + json_value + +JSON_VALUE ( +context_item, path_expression + PASSING { value AS varname } , ... + RETURNING data_type + { ERROR | NULL | DEFAULT expression } ON EMPTY + { ERROR | NULL | DEFAULT expression } ON ERROR ) text + + + + + + Returns the result of applying the SQL/JSON + path_expression to the + context_item. + + + + + Only use JSON_VALUE() if the extracted value is + expected to be a single SQL/JSON scalar item; + getting multiple values will be treated as an error. If you expect that + extracted value might be an object or an array, use the + JSON_QUERY function instead. + + + + + By default, the result, which must be a single scalar value, is + returned as a value of type text, though the + RETURNING clause can be used to return as some + other type to which it can be successfully coerced. + + + + + The ON ERROR and ON EMPTY + clauses have similar semantics as mentioned in the description of + JSON_QUERY, except the set of values returned in + lieu of throwing an error is different. + + + + + Note that scalar strings returned by JSON_VALUE + always have their quotes removed, equivalent to specifying + OMIT QUOTES in JSON_QUERY. + + + + + Examples: + + + JSON_VALUE(jsonb '"123.45"', '$' RETURNING float) + 123.45 + + + JSON_VALUE(jsonb '"03:04 2015-02-01"', '$.datetime("HH24:MI YYYY-MM-DD")' RETURNING date) + 2015-02-01 + + + JSON_VALUE(jsonb '[1,2]', 'strict $[$off]' PASSING 1 as off) + 2 + + + JSON_VALUE(jsonb '[1,2]', 'strict $[*]' DEFAULT 9 ON ERROR) + 9 + + + + + +
+ + + The context_item expression is converted to + jsonb by an implicit cast if the expression is not already of + type jsonb. Note, however, that any parsing errors that occur + during that conversion are thrown unconditionally, that is, are not + handled according to the (specified or implicit) ON ERROR + clause. + + + + + JSON_VALUE() returns an SQL NULL if + path_expression returns a JSON + null, whereas JSON_QUERY() returns + the JSON null as is. + + +
+ + + JSON_TABLE + + json_table + + + + JSON_TABLE is an SQL/JSON function which + queries JSON data + and presents the results as a relational view, which can be accessed as a + regular SQL table. You can use JSON_TABLE inside + the FROM clause of a SELECT, + UPDATE, or DELETE and as data source + in a MERGE statement. + + + + Taking JSON data as input, JSON_TABLE uses a JSON path + expression to extract a part of the provided data to use as a + row pattern for the constructed view. Each SQL/JSON + value given by the row pattern serves as source for a separate row in the + constructed view. + + + + To split the row pattern into columns, JSON_TABLE + provides the COLUMNS clause that defines the + schema of the created view. For each column, a separate JSON path expression + can be specified to be evaluated against the row pattern to get an SQL/JSON + value that will become the value for the specified column in a given output + row. + + + + JSON data stored at a nested level of the row pattern can be extracted using + the NESTED PATH clause. Each + NESTED PATH clause can be used to generate one or more + columns using the data from a nested level of the row pattern. Those + columns can be specified using a COLUMNS clause that + looks similar to the top-level COLUMNS clause. Rows constructed from + NESTED COLUMNS are called child rows and are joined + against the row constructed from the columns specified in the parent + COLUMNS clause to get the row in the final view. Child + columns themselves may contain a NESTED PATH + specification thus allowing to extract data located at arbitrary nesting + levels. Columns produced by multiple NESTED PATHs at the + same level are considered to be siblings of each + other and their rows after joining with the parent row are combined using + UNION. + + + + The rows produced by JSON_TABLE are laterally + joined to the row that generated them, so you do not have to explicitly join + the constructed view with the original table holding JSON + data. + + + + The syntax is: + + + +JSON_TABLE ( + context_item, path_expression AS json_path_name PASSING { value AS varname } , ... + COLUMNS ( json_table_column , ... ) + { ERROR | EMPTY ARRAY} ON ERROR +) + + +where json_table_column is: + + name FOR ORDINALITY + | name type + FORMAT JSON ENCODING UTF8 + PATH path_expression + { WITHOUT | WITH { CONDITIONAL | UNCONDITIONAL } } ARRAY WRAPPER + { KEEP | OMIT } QUOTES ON SCALAR STRING + { ERROR | NULL | EMPTY { ARRAY | OBJECT } | DEFAULT expression } ON EMPTY + { ERROR | NULL | EMPTY { ARRAY | OBJECT } | DEFAULT expression } ON ERROR + | name type EXISTS PATH path_expression + { ERROR | TRUE | FALSE | UNKNOWN } ON ERROR + | NESTED PATH path_expression AS json_path_name COLUMNS ( json_table_column , ... ) + + + + Each syntax element is described below in more detail. + + + + + + context_item, path_expression AS json_path_name PASSING { value AS varname } , ... + + + + The context_item specifies the input document + to query, the path_expression is an SQL/JSON + path expression defining the query, and json_path_name + is an optional name for the path_expression. + The optional PASSING clause provides data values for + the variables mentioned in the path_expression. + The result of the input data evaluation using the aforementioned elements + is called the row pattern, which is used as the + source for row values in the constructed view. + + + + + + + COLUMNS ( json_table_column , ... ) + + + + + The COLUMNS clause defining the schema of the + constructed view. In this clause, you can specify each column to be + filled with an SQL/JSON value obtained by applying a JSON path expression + against the row pattern. json_table_column has + the following variants: + + + + + + name FOR ORDINALITY + + + + Adds an ordinality column that provides sequential row numbering starting + from 1. Each NESTED PATH (see below) gets its own + counter for any nested ordinality columns. + + + + + + + name type + FORMAT JSON ENCODING UTF8 + PATH path_expression + + + + Inserts an SQL/JSON value obtained by applying + path_expression against the row pattern into + the view's output row after coercing it to specified + type. + + + Specifying FORMAT JSON makes it explicit that you + expect the value to be a valid json object. It only + makes sense to specify FORMAT JSON if + type is one of bpchar, + bytea, character varying, name, + json, jsonb, text, or a domain over + these types. + + + Optionally, you can specify WRAPPER and + QUOTES clauses to format the output. Note that + specifying OMIT QUOTES overrides + FORMAT JSON if also specified, because unquoted + literals do not constitute valid json values. + + + Optionally, you can use ON EMPTY and + ON ERROR clauses to specify whether to throw the error + or return the specified value when the result of JSON path evaluation is + empty and when an error occurs during JSON path evaluation or when + coercing the SQL/JSON value to the specified type, respectively. The + default for both is to return a NULL value. + + + + This clause is internally turned into and has the same semantics as + JSON_VALUE or JSON_QUERY. + The latter if the specified type is not a scalar type or if either of + FORMAT JSON, WRAPPER, or + QUOTES clause is present. + + + + + + + + name type + EXISTS PATH path_expression + + + + Inserts a boolean value obtained by applying + path_expression against the row pattern + into the view's output row after coercing it to specified + type. + + + The value corresponds to whether applying the PATH + expression to the row pattern yields any values. + + + The specified type should have a cast from the + boolean type. + + + Optionally, you can use ON ERROR to specify whether to + throw the error or return the specified value when an error occurs during + JSON path evaluation or when coercing SQL/JSON value to the specified + type. The default is to return a boolean value + FALSE. + + + + This clause is internally turned into and has the same semantics as + JSON_EXISTS. + + + + + + + + NESTED PATH path_expression AS json_path_name + COLUMNS ( json_table_column , ... ) + + + + + Extracts SQL/JSON values from nested levels of the row pattern, + generates one or more columns as defined by the COLUMNS + subclause, and inserts the extracted SQL/JSON values into those + columns. The json_table_column + expression in the COLUMNS subclause uses the same + syntax as in the parent COLUMNS clause. + + + + The NESTED PATH syntax is recursive, + so you can go down multiple nested levels by specifying several + NESTED PATH subclauses within each other. + It allows to unnest the hierarchy of JSON objects and arrays + in a single function invocation rather than chaining several + JSON_TABLE expressions in an SQL statement. + + + + + + + + In each variant of json_table_column described + above, if the PATH clause is omitted, path expression + $.name is used, where + name is the provided column name. + + + + + + + + + AS json_path_name + + + + + The optional json_path_name serves as an + identifier of the provided path_expression. + The name must be unique and distinct from the column names. + + + + + + + { ERROR | EMPTY } ON ERROR + + + + + The optional ON ERROR can be used to specify how to + handle errors when evaluating the top-level + path_expression. Use ERROR + if you want the errors to be thrown and EMPTY to + return an empty table, that is, a table containing 0 rows. Note that + this clause does not affect the errors that occur when evaluating + columns, for which the behavior depends on whether the + ON ERROR clause is specified against a given column. + + + + + + Examples + + + In the examples that follow, the following table containing JSON data + will be used: + + +CREATE TABLE my_films ( js jsonb ); + +INSERT INTO my_films VALUES ( +'{ "favorites" : [ + { "kind" : "comedy", "films" : [ + { "title" : "Bananas", + "director" : "Woody Allen"}, + { "title" : "The Dinner Game", + "director" : "Francis Veber" } ] }, + { "kind" : "horror", "films" : [ + { "title" : "Psycho", + "director" : "Alfred Hitchcock" } ] }, + { "kind" : "thriller", "films" : [ + { "title" : "Vertigo", + "director" : "Alfred Hitchcock" } ] }, + { "kind" : "drama", "films" : [ + { "title" : "Yojimbo", + "director" : "Akira Kurosawa" } ] } + ] }'); + + + + + The following query shows how to use JSON_TABLE to + turn the JSON objects in the my_films table + to a view containing columns for the keys kind, + title, and director contained in + the original JSON along with an ordinality column: + + +SELECT jt.* FROM + my_films, + JSON_TABLE (js, '$.favorites[*]' COLUMNS ( + id FOR ORDINALITY, + kind text PATH '$.kind', + title text PATH '$.films[*].title' WITH WRAPPER, + director text PATH '$.films[*].director' WITH WRAPPER)) AS jt; + + + + id | kind | title | director +----+----------+--------------------------------+---------------------------------- + 1 | comedy | ["Bananas", "The Dinner Game"] | ["Woody Allen", "Francis Veber"] + 2 | horror | ["Psycho"] | ["Alfred Hitchcock"] + 3 | thriller | ["Vertigo"] | ["Alfred Hitchcock"] + 4 | drama | ["Yojimbo"] | ["Akira Kurosawa"] +(4 rows) + + + + + The following is a modified version of the above query to show the + usage of PASSING arguments in the filter specified in + the top-level JSON path expression and the various options for the + individual columns: + + +SELECT jt.* FROM + my_films, + JSON_TABLE (js, '$.favorites[*] ? (@.films[*].director == $filter)' + PASSING 'Alfred Hitchcock' AS filter + COLUMNS ( + id FOR ORDINALITY, + kind text PATH '$.kind', + title text FORMAT JSON PATH '$.films[*].title' OMIT QUOTES, + director text PATH '$.films[*].director' KEEP QUOTES)) AS jt; + + + + id | kind | title | director +----+----------+---------+-------------------- + 1 | horror | Psycho | "Alfred Hitchcock" + 2 | thriller | Vertigo | "Alfred Hitchcock" +(2 rows) + + + + + The following is a modified version of the above query to show the usage + of NESTED PATH for populating title and director + columns, illustrating how they are joined to the parent columns id and + kind: + + +SELECT jt.* FROM + my_films, + JSON_TABLE ( js, '$.favorites[*] ? (@.films[*].director == $filter)' + PASSING 'Alfred Hitchcock' AS filter + COLUMNS ( + id FOR ORDINALITY, + kind text PATH '$.kind', + NESTED PATH '$.films[*]' COLUMNS ( + title text FORMAT JSON PATH '$.title' OMIT QUOTES, + director text PATH '$.director' KEEP QUOTES))) AS jt; + + + + id | kind | title | director +----+----------+---------+-------------------- + 1 | horror | Psycho | "Alfred Hitchcock" + 2 | thriller | Vertigo | "Alfred Hitchcock" +(2 rows) + + + + + + The following is the same query but without the filter in the root + path: + + +SELECT jt.* FROM + my_films, + JSON_TABLE ( js, '$.favorites[*]' + COLUMNS ( + id FOR ORDINALITY, + kind text PATH '$.kind', + NESTED PATH '$.films[*]' COLUMNS ( + title text FORMAT JSON PATH '$.title' OMIT QUOTES, + director text PATH '$.director' KEEP QUOTES))) AS jt; + + + + id | kind | title | director +----+----------+-----------------+-------------------- + 1 | comedy | Bananas | "Woody Allen" + 1 | comedy | The Dinner Game | "Francis Veber" + 2 | horror | Psycho | "Alfred Hitchcock" + 3 | thriller | Vertigo | "Alfred Hitchcock" + 4 | drama | Yojimbo | "Akira Kurosawa" +(5 rows) + + + + + + The following shows another query using a different JSON + object as input. It shows the UNION "sibling join" between + NESTED paths $.movies[*] and + $.books[*] and also the usage of + FOR ORDINALITY column at NESTED + levels (columns movie_id, book_id, + and author_id): + + +SELECT * FROM JSON_TABLE ( +'{"favorites": + [{"movies": + [{"name": "One", "director": "John Doe"}, + {"name": "Two", "director": "Don Joe"}], + "books": + [{"name": "Mystery", "authors": [{"name": "Brown Dan"}]}, + {"name": "Wonder", "authors": [{"name": "Jun Murakami"}, {"name":"Craig Doe"}]}] +}]}'::json, '$.favorites[*]' +COLUMNS ( + user_id FOR ORDINALITY, + NESTED '$.movies[*]' + COLUMNS ( + movie_id FOR ORDINALITY, + mname text PATH '$.name', + director text), + NESTED '$.books[*]' + COLUMNS ( + book_id FOR ORDINALITY, + bname text PATH '$.name', + NESTED '$.authors[*]' + COLUMNS ( + author_id FOR ORDINALITY, + author_name text PATH '$.name')))); + + + + user_id | movie_id | mname | director | book_id | bname | author_id | author_name +---------+----------+-------+----------+---------+---------+-----------+-------------- + 1 | 1 | One | John Doe | | | | + 1 | 2 | Two | Don Joe | | | | + 1 | | | | 1 | Mystery | 1 | Brown Dan + 1 | | | | 2 | Wonder | 1 | Jun Murakami + 1 | | | | 2 | Wonder | 2 | Craig Doe +(5 rows) + + + + +
diff --git a/doc/src/sgml/func/func-logical.sgml b/doc/src/sgml/func/func-logical.sgml new file mode 100644 index 0000000000000..65e50e65a8117 --- /dev/null +++ b/doc/src/sgml/func/func-logical.sgml @@ -0,0 +1,146 @@ + + Logical Operators + + + operator + logical + + + + Boolean + operators + operators, logical + + + + The usual logical operators are available: + + + AND (operator) + + + + OR (operator) + + + + NOT (operator) + + + + conjunction + + + + disjunction + + + + negation + + + +boolean AND boolean boolean +boolean OR boolean boolean +NOT boolean boolean + + + SQL uses a three-valued logic system with true, + false, and null, which represents unknown. + Observe the following truth tables: + + + + + + a + b + a AND b + a OR b + + + + + + TRUE + TRUE + TRUE + TRUE + + + + TRUE + FALSE + FALSE + TRUE + + + + TRUE + NULL + NULL + TRUE + + + + FALSE + FALSE + FALSE + FALSE + + + + FALSE + NULL + FALSE + NULL + + + + NULL + NULL + NULL + NULL + + + + + + + + + + a + NOT a + + + + + + TRUE + FALSE + + + + FALSE + TRUE + + + + NULL + NULL + + + + + + + + The operators AND and OR are + commutative, that is, you can switch the left and right operands + without affecting the result. (However, it is not guaranteed that + the left operand is evaluated before the right operand. See for more information about the + order of evaluation of subexpressions.) + + diff --git a/doc/src/sgml/func/func-matching.sgml b/doc/src/sgml/func/func-matching.sgml new file mode 100644 index 0000000000000..ebe0b22c8f60f --- /dev/null +++ b/doc/src/sgml/func/func-matching.sgml @@ -0,0 +1,2487 @@ + + Pattern Matching + + + pattern matching + + + + There are three separate approaches to pattern matching provided + by PostgreSQL: the traditional + SQL LIKE operator, the + more recent SIMILAR TO operator (added in + SQL:1999), and POSIX-style regular + expressions. Aside from the basic does this string match + this pattern? operators, functions are available to extract + or replace matching substrings and to split a string at matching + locations. + + + + + If you have pattern matching needs that go beyond this, + consider writing a user-defined function in Perl or Tcl. + + + + + + While most regular-expression searches can be executed very quickly, + regular expressions can be contrived that take arbitrary amounts of + time and memory to process. Be wary of accepting regular-expression + search patterns from hostile sources. If you must do so, it is + advisable to impose a statement timeout. + + + + Searches using SIMILAR TO patterns have the same + security hazards, since SIMILAR TO provides many + of the same capabilities as POSIX-style regular + expressions. + + + + LIKE searches, being much simpler than the other + two options, are safer to use with possibly-hostile pattern sources. + + + + + SIMILAR TO and POSIX-style regular + expressions do not support nondeterministic collations. If required, use + LIKE or apply a different collation to the expression + to work around this limitation. + + + + <function>LIKE</function> + + + LIKE + + + +string LIKE pattern ESCAPE escape-character +string NOT LIKE pattern ESCAPE escape-character + + + + The LIKE expression returns true if the + string matches the supplied + pattern. (As + expected, the NOT LIKE expression returns + false if LIKE returns true, and vice versa. + An equivalent expression is + NOT (string LIKE + pattern).) + + + + If pattern does not contain percent + signs or underscores, then the pattern only represents the string + itself; in that case LIKE acts like the + equals operator. An underscore (_) in + pattern stands for (matches) any single + character; a percent sign (%) matches any sequence + of zero or more characters. + + + + Some examples: + +'abc' LIKE 'abc' true +'abc' LIKE 'a%' true +'abc' LIKE '_b_' true +'abc' LIKE 'c' false + + + + + LIKE pattern matching supports nondeterministic + collations (see ), such as + case-insensitive collations or collations that, say, ignore punctuation. + So with a case-insensitive collation, one could have: + +'AbC' LIKE 'abc' COLLATE case_insensitive true +'AbC' LIKE 'a%' COLLATE case_insensitive true + + With collations that ignore certain characters or in general that consider + strings of different lengths equal, the semantics can become a bit more + complicated. Consider these examples: + +'.foo.' LIKE 'foo' COLLATE ign_punct true +'.foo.' LIKE 'f_o' COLLATE ign_punct true +'.foo.' LIKE '_oo' COLLATE ign_punct false + + The way the matching works is that the pattern is partitioned into + sequences of wildcards and non-wildcard strings (wildcards being + _ and %). For example, the pattern + f_o is partitioned into f, _, o, the + pattern _oo is partitioned into _, + oo. The input string matches the pattern if it can be + partitioned in such a way that the wildcards match one character or any + number of characters respectively and the non-wildcard partitions are + equal under the applicable collation. So for example, '.foo.' + LIKE 'f_o' COLLATE ign_punct is true because one can partition + .foo. into .f, o, o., and then + '.f' = 'f' COLLATE ign_punct, 'o' + matches the _ wildcard, and 'o.' = 'o' COLLATE + ign_punct. But '.foo.' LIKE '_oo' COLLATE + ign_punct is false because .foo. cannot be + partitioned in a way that the first character is any character and the + rest of the string compares equal to oo. (Note that + the single-character wildcard always matches exactly one character, + independent of the collation. So in this example, the + _ would match ., but then the rest + of the input string won't match the rest of the pattern.) + + + + LIKE pattern matching always covers the entire + string. Therefore, if it's desired to match a sequence anywhere within + a string, the pattern must start and end with a percent sign. + + + + To match a literal underscore or percent sign without matching + other characters, the respective character in + pattern must be + preceded by the escape character. The default escape + character is the backslash but a different one can be selected by + using the ESCAPE clause. To match the escape + character itself, write two escape characters. + + + + + If you have turned off, + any backslashes you write in literal string constants will need to be + doubled. See for more information. + + + + + It's also possible to select no escape character by writing + ESCAPE ''. This effectively disables the + escape mechanism, which makes it impossible to turn off the + special meaning of underscore and percent signs in the pattern. + + + + According to the SQL standard, omitting ESCAPE + means there is no escape character (rather than defaulting to a + backslash), and a zero-length ESCAPE value is + disallowed. PostgreSQL's behavior in + this regard is therefore slightly nonstandard. + + + + The key word ILIKE can be used instead of + LIKE to make the match case-insensitive according to the + active locale. (But this does not support nondeterministic collations.) + This is not in the SQL standard but is a + PostgreSQL extension. + + + + The operator ~~ is equivalent to + LIKE, and ~~* corresponds to + ILIKE. There are also + !~~ and !~~* operators that + represent NOT LIKE and NOT + ILIKE, respectively. All of these operators are + PostgreSQL-specific. You may see these + operator names in EXPLAIN output and similar + places, since the parser actually translates LIKE + et al. to these operators. + + + + The phrases LIKE, ILIKE, + NOT LIKE, and NOT ILIKE are + generally treated as operators + in PostgreSQL syntax; for example they can + be used in expression + operator ANY + (subquery) constructs, although + an ESCAPE clause cannot be included there. In some + obscure cases it may be necessary to use the underlying operator names + instead. + + + + Also see the starts-with operator ^@ and the + corresponding starts_with() function, which are + useful in cases where simply matching the beginning of a string is + needed. + + + + + + <function>SIMILAR TO</function> Regular Expressions + + + regular expression + + + + + SIMILAR TO + + + substring + + + +string SIMILAR TO pattern ESCAPE escape-character +string NOT SIMILAR TO pattern ESCAPE escape-character + + + + The SIMILAR TO operator returns true or + false depending on whether its pattern matches the given string. + It is similar to LIKE, except that it + interprets the pattern using the SQL standard's definition of a + regular expression. SQL regular expressions are a curious cross + between LIKE notation and common (POSIX) regular + expression notation. + + + + Like LIKE, the SIMILAR TO + operator succeeds only if its pattern matches the entire string; + this is unlike common regular expression behavior where the pattern + can match any part of the string. + Also like + LIKE, SIMILAR TO uses + _ and % as wildcard characters denoting + any single character and any string, respectively (these are + comparable to . and .* in POSIX regular + expressions). + + + + In addition to these facilities borrowed from LIKE, + SIMILAR TO supports these pattern-matching + metacharacters borrowed from POSIX regular expressions: + + + + + | denotes alternation (either of two alternatives). + + + + + * denotes repetition of the previous item zero + or more times. + + + + + + denotes repetition of the previous item one + or more times. + + + + + ? denotes repetition of the previous item zero + or one time. + + + + + {m} denotes repetition + of the previous item exactly m times. + + + + + {m,} denotes repetition + of the previous item m or more times. + + + + + {m,n} + denotes repetition of the previous item at least m and + not more than n times. + + + + + Parentheses () can be used to group items into + a single logical item. + + + + + A bracket expression [...] specifies a character + class, just as in POSIX regular expressions. + + + + + Notice that the period (.) is not a metacharacter + for SIMILAR TO. + + + + As with LIKE, a backslash disables the special + meaning of any of these metacharacters. A different escape character + can be specified with ESCAPE, or the escape + capability can be disabled by writing ESCAPE ''. + + + + According to the SQL standard, omitting ESCAPE + means there is no escape character (rather than defaulting to a + backslash), and a zero-length ESCAPE value is + disallowed. PostgreSQL's behavior in + this regard is therefore slightly nonstandard. + + + + Another nonstandard extension is that following the escape character + with a letter or digit provides access to the escape sequences + defined for POSIX regular expressions; see + , + , and + below. + + + + Some examples: + +'abc' SIMILAR TO 'abc' true +'abc' SIMILAR TO 'a' false +'abc' SIMILAR TO '%(b|d)%' true +'abc' SIMILAR TO '(b|c)%' false +'-abc-' SIMILAR TO '%\mabc\M%' true +'xabcy' SIMILAR TO '%\mabc\M%' false + + + + + The substring function with three parameters + provides extraction of a substring that matches an SQL + regular expression pattern. The function can be written according + to standard SQL syntax: + +substring(string similar pattern escape escape-character) + + or using the now obsolete SQL:1999 syntax: + +substring(string from pattern for escape-character) + + or as a plain three-argument function: + +substring(string, pattern, escape-character) + + As with SIMILAR TO, the + specified pattern must match the entire data string, or else the + function fails and returns null. To indicate the part of the + pattern for which the matching data sub-string is of interest, + the pattern should contain + two occurrences of the escape character followed by a double quote + ("). + The text matching the portion of the pattern + between these separators is returned when the match is successful. + + + + The escape-double-quote separators actually + divide substring's pattern into three independent + regular expressions; for example, a vertical bar (|) + in any of the three sections affects only that section. Also, the first + and third of these regular expressions are defined to match the smallest + possible amount of text, not the largest, when there is any ambiguity + about how much of the data string matches which pattern. (In POSIX + parlance, the first and third regular expressions are forced to be + non-greedy.) + + + + As an extension to the SQL standard, PostgreSQL + allows there to be just one escape-double-quote separator, in which case + the third regular expression is taken as empty; or no separators, in which + case the first and third regular expressions are taken as empty. + + + + Some examples, with #" delimiting the return string: + +substring('foobar' similar '%#"o_b#"%' escape '#') oob +substring('foobar' similar '#"o_b#"%' escape '#') NULL + + + + + + <acronym>POSIX</acronym> Regular Expressions + + + regular expression + pattern matching + + + substring + + + regexp_count + + + regexp_instr + + + regexp_like + + + regexp_match + + + regexp_matches + + + regexp_replace + + + regexp_split_to_table + + + regexp_split_to_array + + + regexp_substr + + + + lists the available + operators for pattern matching using POSIX regular expressions. + + + + Regular Expression Match Operators + + + + + + Operator + + + Description + + + Example(s) + + + + + + + + text ~ text + boolean + + + String matches regular expression, case sensitively + + + 'thomas' ~ 't.*ma' + t + + + + + + text ~* text + boolean + + + String matches regular expression, case-insensitively + + + 'thomas' ~* 'T.*ma' + t + + + + + + text !~ text + boolean + + + String does not match regular expression, case sensitively + + + 'thomas' !~ 't.*max' + t + + + + + + text !~* text + boolean + + + String does not match regular expression, case-insensitively + + + 'thomas' !~* 'T.*ma' + f + + + + +
+ + + POSIX regular expressions provide a more + powerful means for pattern matching than the LIKE and + SIMILAR TO operators. + Many Unix tools such as egrep, + sed, or awk use a pattern + matching language that is similar to the one described here. + + + + A regular expression is a character sequence that is an + abbreviated definition of a set of strings (a regular + set). A string is said to match a regular expression + if it is a member of the regular set described by the regular + expression. As with LIKE, pattern characters + match string characters exactly unless they are special characters + in the regular expression language — but regular expressions use + different special characters than LIKE does. + Unlike LIKE patterns, a + regular expression is allowed to match anywhere within a string, unless + the regular expression is explicitly anchored to the beginning or + end of the string. + + + + Some examples: + +'abcd' ~ 'bc' true +'abcd' ~ 'a.c' true — dot matches any character +'abcd' ~ 'a.*d' true — * repeats the preceding pattern item +'abcd' ~ '(b|x)' true — | means OR, parentheses group +'abcd' ~ '^a' true — ^ anchors to start of string +'abcd' ~ '^(b|c)' false — would match except for anchoring + + + + + The POSIX pattern language is described in much + greater detail below. + + + + The substring function with two parameters, + substring(string from + pattern), provides extraction of a + substring + that matches a POSIX regular expression pattern. It returns null if + there is no match, otherwise the first portion of the text that matched the + pattern. But if the pattern contains any parentheses, the portion + of the text that matched the first parenthesized subexpression (the + one whose left parenthesis comes first) is + returned. You can put parentheses around the whole expression + if you want to use parentheses within it without triggering this + exception. If you need parentheses in the pattern before the + subexpression you want to extract, see the non-capturing parentheses + described below. + + + + Some examples: + +substring('foobar' from 'o.b') oob +substring('foobar' from 'o(.)b') o + + + + + The regexp_count function counts the number of + places where a POSIX regular expression pattern matches a string. + It has the syntax + regexp_count(string, + pattern + , start + , flags + ). + pattern is searched for + in string, normally from the beginning of + the string, but if the start parameter is + provided then beginning from that character index. + The flags parameter is an optional text + string containing zero or more single-letter flags that change the + function's behavior. For example, including i in + flags specifies case-insensitive matching. + Supported flags are described in + . + + + + Some examples: + +regexp_count('ABCABCAXYaxy', 'A.') 3 +regexp_count('ABCABCAXYaxy', 'A.', 1, 'i') 4 + + + + + The regexp_instr function returns the starting or + ending position of the N'th match of a + POSIX regular expression pattern to a string, or zero if there is no + such match. It has the syntax + regexp_instr(string, + pattern + , start + , N + , endoption + , flags + , subexpr + ). + pattern is searched for + in string, normally from the beginning of + the string, but if the start parameter is + provided then beginning from that character index. + If N is specified + then the N'th match of the pattern + is located, otherwise the first match is located. + If the endoption parameter is omitted or + specified as zero, the function returns the position of the first + character of the match. Otherwise, endoption + must be one, and the function returns the position of the character + following the match. + The flags parameter is an optional text + string containing zero or more single-letter flags that change the + function's behavior. Supported flags are described + in . + For a pattern containing parenthesized + subexpressions, subexpr is an integer + indicating which subexpression is of interest: the result identifies + the position of the substring matching that subexpression. + Subexpressions are numbered in the order of their leading parentheses. + When subexpr is omitted or zero, the result + identifies the position of the whole match regardless of + parenthesized subexpressions. + + + + Some examples: + +regexp_instr('number of your street, town zip, FR', '[^,]+', 1, 2) + 23 +regexp_instr(string=>'ABCDEFGHI', pattern=>'(c..)(...)', start=>1, "N"=>1, endoption=>0, flags=>'i', subexpr=>2) + 6 + + + + + The regexp_like function checks whether a match + of a POSIX regular expression pattern occurs within a string, + returning boolean true or false. It has the syntax + regexp_like(string, + pattern + , flags ). + The flags parameter is an optional text + string containing zero or more single-letter flags that change the + function's behavior. Supported flags are described + in . + This function has the same results as the ~ + operator if no flags are specified. If only the i + flag is specified, it has the same results as + the ~* operator. + + + + Some examples: + +regexp_like('Hello World', 'world') false +regexp_like('Hello World', 'world', 'i') true + + + + + The regexp_match function returns a text array of + matching substring(s) within the first match of a POSIX + regular expression pattern to a string. It has the syntax + regexp_match(string, + pattern , flags ). + If there is no match, the result is NULL. + If a match is found, and the pattern contains no + parenthesized subexpressions, then the result is a single-element text + array containing the substring matching the whole pattern. + If a match is found, and the pattern contains + parenthesized subexpressions, then the result is a text array + whose n'th element is the substring matching + the n'th parenthesized subexpression of + the pattern (not counting non-capturing + parentheses; see below for details). + The flags parameter is an optional text string + containing zero or more single-letter flags that change the function's + behavior. Supported flags are described + in . + + + + Some examples: + +SELECT regexp_match('foobarbequebaz', 'bar.*que'); + regexp_match +-------------- + {barbeque} +(1 row) + +SELECT regexp_match('foobarbequebaz', '(bar)(beque)'); + regexp_match +-------------- + {bar,beque} +(1 row) + + + + + + In the common case where you just want the whole matching substring + or NULL for no match, the best solution is to + use regexp_substr(). + However, regexp_substr() only exists + in PostgreSQL version 15 and up. When + working in older versions, you can extract the first element + of regexp_match()'s result, for example: + +SELECT (regexp_match('foobarbequebaz', 'bar.*que'))[1]; + regexp_match +-------------- + barbeque +(1 row) + + + + + + The regexp_matches function returns a set of text arrays + of matching substring(s) within matches of a POSIX regular + expression pattern to a string. It has the same syntax as + regexp_match. + This function returns no rows if there is no match, one row if there is + a match and the g flag is not given, or N + rows if there are N matches and the g flag + is given. Each returned row is a text array containing the whole + matched substring or the substrings matching parenthesized + subexpressions of the pattern, just as described above + for regexp_match. + regexp_matches accepts all the flags shown + in , plus + the g flag which commands it to return all matches, not + just the first one. + + + + Some examples: + +SELECT regexp_matches('foo', 'not there'); + regexp_matches +---------------- +(0 rows) + +SELECT regexp_matches('foobarbequebazilbarfbonk', '(b[^b]+)(b[^b]+)', 'g'); + regexp_matches +---------------- + {bar,beque} + {bazil,barf} +(2 rows) + + + + + + In most cases regexp_matches() should be used with + the g flag, since if you only want the first match, it's + easier and more efficient to use regexp_match(). + However, regexp_match() only exists + in PostgreSQL version 10 and up. When working in older + versions, a common trick is to place a regexp_matches() + call in a sub-select, for example: + +SELECT col1, (SELECT regexp_matches(col2, '(bar)(beque)')) FROM tab; + + This produces a text array if there's a match, or NULL if + not, the same as regexp_match() would do. Without the + sub-select, this query would produce no output at all for table rows + without a match, which is typically not the desired behavior. + + + + + The regexp_replace function provides substitution of + new text for substrings that match POSIX regular expression patterns. + It has the syntax + regexp_replace(string, + pattern, replacement + , flags ) + or + regexp_replace(string, + pattern, replacement, + start + , N + , flags ). + The source string is returned unchanged if + there is no match to the pattern. If there is a + match, the string is returned with the + replacement string substituted for the matching + substring. The replacement string can contain + \n, where n is 1 + through 9, to indicate that the source substring matching the + n'th parenthesized subexpression of the pattern should be + inserted, and it can contain \& to indicate that the + substring matching the entire pattern should be inserted. Write + \\ if you need to put a literal backslash in the replacement + text. + pattern is searched for + in string, normally from the beginning of + the string, but if the start parameter is + provided then beginning from that character index. + By default, only the first match of the pattern is replaced. + If N is specified and is greater than zero, + then the N'th match of the pattern + is replaced. + If the g flag is given, or + if N is specified and is zero, then all + matches at or after the start position are + replaced. (The g flag is ignored + when N is specified.) + The flags parameter is an optional text + string containing zero or more single-letter flags that change the + function's behavior. Supported flags (though + not g) are + described in . + + + + Some examples: + +regexp_replace('foobarbaz', 'b..', 'X') + fooXbaz +regexp_replace('foobarbaz', 'b..', 'X', 'g') + fooXX +regexp_replace('foobarbaz', 'b(..)', 'X\1Y', 'g') + fooXarYXazY +regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', 1, 0, 'i') + X PXstgrXSQL fXnctXXn +regexp_replace(string=>'A PostgreSQL function', pattern=>'a|e|i|o|u', replacement=>'X', start=>1, "N"=>3, flags=>'i') + A PostgrXSQL function + + + + + The regexp_split_to_table function splits a string using a POSIX + regular expression pattern as a delimiter. It has the syntax + regexp_split_to_table(string, pattern + , flags ). + If there is no match to the pattern, the function returns the + string. If there is at least one match, for each match it returns + the text from the end of the last match (or the beginning of the string) + to the beginning of the match. When there are no more matches, it + returns the text from the end of the last match to the end of the string. + The flags parameter is an optional text string containing + zero or more single-letter flags that change the function's behavior. + regexp_split_to_table supports the flags described in + . + + + + The regexp_split_to_array function behaves the same as + regexp_split_to_table, except that regexp_split_to_array + returns its result as an array of text. It has the syntax + regexp_split_to_array(string, pattern + , flags ). + The parameters are the same as for regexp_split_to_table. + + + + Some examples: + +SELECT foo FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', '\s+') AS foo; + foo +------- + the + quick + brown + fox + jumps + over + the + lazy + dog +(9 rows) + +SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', '\s+'); + regexp_split_to_array +----------------------------------------------- + {the,quick,brown,fox,jumps,over,the,lazy,dog} +(1 row) + +SELECT foo FROM regexp_split_to_table('the quick brown fox', '\s*') AS foo; + foo +----- + t + h + e + q + u + i + c + k + b + r + o + w + n + f + o + x +(16 rows) + + + + + As the last example demonstrates, the regexp split functions ignore + zero-length matches that occur at the start or end of the string + or immediately after a previous match. This is contrary to the strict + definition of regexp matching that is implemented by + the other regexp functions, but is usually the most convenient behavior + in practice. Other software systems such as Perl use similar definitions. + + + + The regexp_substr function returns the substring + that matches a POSIX regular expression pattern, + or NULL if there is no match. It has the syntax + regexp_substr(string, + pattern + , start + , N + , flags + , subexpr + ). + pattern is searched for + in string, normally from the beginning of + the string, but if the start parameter is + provided then beginning from that character index. + If N is specified + then the N'th match of the pattern + is returned, otherwise the first match is returned. + The flags parameter is an optional text + string containing zero or more single-letter flags that change the + function's behavior. Supported flags are described + in . + For a pattern containing parenthesized + subexpressions, subexpr is an integer + indicating which subexpression is of interest: the result is the + substring matching that subexpression. + Subexpressions are numbered in the order of their leading parentheses. + When subexpr is omitted or zero, the result + is the whole match regardless of parenthesized subexpressions. + + + + Some examples: + +regexp_substr('number of your street, town zip, FR', '[^,]+', 1, 2) + town zip +regexp_substr('ABCDEFGHI', '(c..)(...)', 1, 1, 'i', 2) + FGH + + + + + + + Regular Expression Details + + + PostgreSQL's regular expressions are implemented + using a software package written by Henry Spencer. Much of + the description of regular expressions below is copied verbatim from his + manual. + + + + Regular expressions (REs), as defined in + POSIX 1003.2, come in two forms: + extended REs or EREs + (roughly those of egrep), and + basic REs or BREs + (roughly those of ed). + PostgreSQL supports both forms, and + also implements some extensions + that are not in the POSIX standard, but have become widely used + due to their availability in programming languages such as Perl and Tcl. + REs using these non-POSIX extensions are called + advanced REs or AREs + in this documentation. AREs are almost an exact superset of EREs, + but BREs have several notational incompatibilities (as well as being + much more limited). + We first describe the ARE and ERE forms, noting features that apply + only to AREs, and then describe how BREs differ. + + + + + PostgreSQL always initially presumes that a regular + expression follows the ARE rules. However, the more limited ERE or + BRE rules can be chosen by prepending an embedded option + to the RE pattern, as described in . + This can be useful for compatibility with applications that expect + exactly the POSIX 1003.2 rules. + + + + + A regular expression is defined as one or more + branches, separated by + |. It matches anything that matches one of the + branches. + + + + A branch is zero or more quantified atoms or + constraints, concatenated. + It matches a match for the first, followed by a match for the second, etc.; + an empty branch matches the empty string. + + + + A quantified atom is an atom possibly followed + by a single quantifier. + Without a quantifier, it matches a match for the atom. + With a quantifier, it can match some number of matches of the atom. + An atom can be any of the possibilities + shown in . + The possible quantifiers and their meanings are shown in + . + + + + A constraint matches an empty string, but matches only when + specific conditions are met. A constraint can be used where an atom + could be used, except it cannot be followed by a quantifier. + The simple constraints are shown in + ; + some more constraints are described later. + + + + + Regular Expression Atoms + + + + + + + Atom + Description + + + + + + (re) + (where re is any regular expression) + matches a match for + re, with the match noted for possible reporting + + + + (?:re) + as above, but the match is not noted for reporting + (a non-capturing set of parentheses) + (AREs only) + + + + . + matches any single character + + + + [chars] + a bracket expression, + matching any one of the chars (see + for more detail) + + + + \k + (where k is a non-alphanumeric character) + matches that character taken as an ordinary character, + e.g., \\ matches a backslash character + + + + \c + where c is alphanumeric + (possibly followed by other characters) + is an escape, see + (AREs only; in EREs and BREs, this matches c) + + + + { + when followed by a character other than a digit, + matches the left-brace character {; + when followed by a digit, it is the beginning of a + bound (see below) + + + + x + where x is a single character with no other + significance, matches that character + + + +
+ + + An RE cannot end with a backslash (\). + + + + + If you have turned off, + any backslashes you write in literal string constants will need to be + doubled. See for more information. + + + + + Regular Expression Quantifiers + + + + + + + Quantifier + Matches + + + + + + * + a sequence of 0 or more matches of the atom + + + + + + a sequence of 1 or more matches of the atom + + + + ? + a sequence of 0 or 1 matches of the atom + + + + {m} + a sequence of exactly m matches of the atom + + + + {m,} + a sequence of m or more matches of the atom + + + + + {m,n} + a sequence of m through n + (inclusive) matches of the atom; m cannot exceed + n + + + + *? + non-greedy version of * + + + + +? + non-greedy version of + + + + + ?? + non-greedy version of ? + + + + {m}? + non-greedy version of {m} + + + + {m,}? + non-greedy version of {m,} + + + + + {m,n}? + non-greedy version of {m,n} + + + +
+ + + The forms using {...} + are known as bounds. + The numbers m and n within a bound are + unsigned decimal integers with permissible values from 0 to 255 inclusive. + + + + Non-greedy quantifiers (available in AREs only) match the + same possibilities as their corresponding normal (greedy) + counterparts, but prefer the smallest number rather than the largest + number of matches. + See for more detail. + + + + + A quantifier cannot immediately follow another quantifier, e.g., + ** is invalid. + A quantifier cannot + begin an expression or subexpression or follow + ^ or |. + + + + + Regular Expression Constraints + + + + + + + Constraint + Description + + + + + + ^ + matches at the beginning of the string + + + + $ + matches at the end of the string + + + + (?=re) + positive lookahead matches at any point + where a substring matching re begins + (AREs only) + + + + (?!re) + negative lookahead matches at any point + where no substring matching re begins + (AREs only) + + + + (?<=re) + positive lookbehind matches at any point + where a substring matching re ends + (AREs only) + + + + (?<!re) + negative lookbehind matches at any point + where no substring matching re ends + (AREs only) + + + +
+ + + Lookahead and lookbehind constraints cannot contain back + references (see ), + and all parentheses within them are considered non-capturing. + +
+ + + Bracket Expressions + + + A bracket expression is a list of + characters enclosed in []. It normally matches + any single character from the list (but see below). If the list + begins with ^, it matches any single character + not from the rest of the list. + If two characters + in the list are separated by -, this is + shorthand for the full range of characters between those two + (inclusive) in the collating sequence, + e.g., [0-9] in ASCII matches + any decimal digit. It is illegal for two ranges to share an + endpoint, e.g., a-c-e. Ranges are very + collating-sequence-dependent, so portable programs should avoid + relying on them. + + + + To include a literal ] in the list, make it the + first character (after ^, if that is used). To + include a literal -, make it the first or last + character, or the second endpoint of a range. To use a literal + - as the first endpoint of a range, enclose it + in [. and .] to make it a + collating element (see below). With the exception of these characters, + some combinations using [ + (see next paragraphs), and escapes (AREs only), all other special + characters lose their special significance within a bracket expression. + In particular, \ is not special when following + ERE or BRE rules, though it is special (as introducing an escape) + in AREs. + + + + Within a bracket expression, a collating element (a character, a + multiple-character sequence that collates as if it were a single + character, or a collating-sequence name for either) enclosed in + [. and .] stands for the + sequence of characters of that collating element. The sequence is + treated as a single element of the bracket expression's list. This + allows a bracket + expression containing a multiple-character collating element to + match more than one character, e.g., if the collating sequence + includes a ch collating element, then the RE + [[.ch.]]*c matches the first five characters of + chchcc. + + + + + PostgreSQL currently does not support multi-character collating + elements. This information describes possible future behavior. + + + + + Within a bracket expression, a collating element enclosed in + [= and =] is an equivalence + class, standing for the sequences of characters of all collating + elements equivalent to that one, including itself. (If there are + no other equivalent collating elements, the treatment is as if the + enclosing delimiters were [. and + .].) For example, if o and + ^ are the members of an equivalence class, then + [[=o=]], [[=^=]], and + [o^] are all synonymous. An equivalence class + cannot be an endpoint of a range. + + + + Within a bracket expression, the name of a character class + enclosed in [: and :] stands + for the list of all characters belonging to that class. A character + class cannot be used as an endpoint of a range. + The POSIX standard defines these character class + names: + alnum (letters and numeric digits), + alpha (letters), + blank (space and tab), + cntrl (control characters), + digit (numeric digits), + graph (printable characters except space), + lower (lower-case letters), + print (printable characters including space), + punct (punctuation), + space (any white space), + upper (upper-case letters), + and xdigit (hexadecimal digits). + The behavior of these standard character classes is generally + consistent across platforms for characters in the 7-bit ASCII set. + Whether a given non-ASCII character is considered to belong to one + of these classes depends on the collation + that is used for the regular-expression function or operator + (see ), or by default on the + database's LC_CTYPE locale setting (see + ). The classification of non-ASCII + characters can vary across platforms even in similarly-named + locales. (But the C locale never considers any + non-ASCII characters to belong to any of these classes.) + In addition to these standard character + classes, PostgreSQL defines + the word character class, which is the same as + alnum plus the underscore (_) + character, and + the ascii character class, which contains exactly + the 7-bit ASCII set. + + + + There are two special cases of bracket expressions: the bracket + expressions [[:<:]] and + [[:>:]] are constraints, + matching empty strings at the beginning + and end of a word respectively. A word is defined as a sequence + of word characters that is neither preceded nor followed by word + characters. A word character is any character belonging to the + word character class, that is, any letter, digit, + or underscore. This is an extension, compatible with but not + specified by POSIX 1003.2, and should be used with + caution in software intended to be portable to other systems. + The constraint escapes described below are usually preferable; they + are no more standard, but are easier to type. + + + + + Regular Expression Escapes + + + Escapes are special sequences beginning with \ + followed by an alphanumeric character. Escapes come in several varieties: + character entry, class shorthands, constraint escapes, and back references. + A \ followed by an alphanumeric character but not constituting + a valid escape is illegal in AREs. + In EREs, there are no escapes: outside a bracket expression, + a \ followed by an alphanumeric character merely stands for + that character as an ordinary character, and inside a bracket expression, + \ is an ordinary character. + (The latter is the one actual incompatibility between EREs and AREs.) + + + + Character-entry escapes exist to make it easier to specify + non-printing and other inconvenient characters in REs. They are + shown in . + + + + Class-shorthand escapes provide shorthands for certain + commonly-used character classes. They are + shown in . + + + + A constraint escape is a constraint, + matching the empty string if specific conditions are met, + written as an escape. They are + shown in . + + + + A back reference (\n) matches the + same string matched by the previous parenthesized subexpression specified + by the number n + (see ). For example, + ([bc])\1 matches bb or cc + but not bc or cb. + The subexpression must entirely precede the back reference in the RE. + Subexpressions are numbered in the order of their leading parentheses. + Non-capturing parentheses do not define subexpressions. + The back reference considers only the string characters matched by the + referenced subexpression, not any constraints contained in it. For + example, (^\d)\1 will match 22. + + + + Regular Expression Character-Entry Escapes + + + + + + + Escape + Description + + + + + + \a + alert (bell) character, as in C + + + + \b + backspace, as in C + + + + \B + synonym for backslash (\) to help reduce the need for backslash + doubling + + + + \cX + (where X is any character) the character whose + low-order 5 bits are the same as those of + X, and whose other bits are all zero + + + + \e + the character whose collating-sequence name + is ESC, + or failing that, the character with octal value 033 + + + + \f + form feed, as in C + + + + \n + newline, as in C + + + + \r + carriage return, as in C + + + + \t + horizontal tab, as in C + + + + \uwxyz + (where wxyz is exactly four hexadecimal digits) + the character whose hexadecimal value is + 0xwxyz + + + + + \Ustuvwxyz + (where stuvwxyz is exactly eight hexadecimal + digits) + the character whose hexadecimal value is + 0xstuvwxyz + + + + + \v + vertical tab, as in C + + + + \xhhh + (where hhh is any sequence of hexadecimal + digits) + the character whose hexadecimal value is + 0xhhh + (a single character no matter how many hexadecimal digits are used) + + + + + \0 + the character whose value is 0 (the null byte) + + + + \xy + (where xy is exactly two octal digits, + and is not a back reference) + the character whose octal value is + 0xy + + + + \xyz + (where xyz is exactly three octal digits, + and is not a back reference) + the character whose octal value is + 0xyz + + + +
+ + + Hexadecimal digits are 0-9, + a-f, and A-F. + Octal digits are 0-7. + + + + Numeric character-entry escapes specifying values outside the ASCII range + (0–127) have meanings dependent on the database encoding. When the + encoding is UTF-8, escape values are equivalent to Unicode code points, + for example \u1234 means the character U+1234. + For other multibyte encodings, character-entry escapes usually just + specify the concatenation of the byte values for the character. If the + escape value does not correspond to any legal character in the database + encoding, no error will be raised, but it will never match any data. + + + + The character-entry escapes are always taken as ordinary characters. + For example, \135 is ] in ASCII, but + \135 does not terminate a bracket expression. + + + + Regular Expression Class-Shorthand Escapes + + + + + + + Escape + Description + + + + + + \d + matches any digit, like + [[:digit:]] + + + + \s + matches any whitespace character, like + [[:space:]] + + + + \w + matches any word character, like + [[:word:]] + + + + \D + matches any non-digit, like + [^[:digit:]] + + + + \S + matches any non-whitespace character, like + [^[:space:]] + + + + \W + matches any non-word character, like + [^[:word:]] + + + +
+ + + The class-shorthand escapes also work within bracket expressions, + although the definitions shown above are not quite syntactically + valid in that context. + For example, [a-c\d] is equivalent to + [a-c[:digit:]]. + + + + Regular Expression Constraint Escapes + + + + + + + Escape + Description + + + + + + \A + matches only at the beginning of the string + (see for how this differs from + ^) + + + + \m + matches only at the beginning of a word + + + + \M + matches only at the end of a word + + + + \y + matches only at the beginning or end of a word + + + + \Y + matches only at a point that is not the beginning or end of a + word + + + + \Z + matches only at the end of the string + (see for how this differs from + $) + + + +
+ + + A word is defined as in the specification of + [[:<:]] and [[:>:]] above. + Constraint escapes are illegal within bracket expressions. + + + + Regular Expression Back References + + + + + + + Escape + Description + + + + + + \m + (where m is a nonzero digit) + a back reference to the m'th subexpression + + + + \mnn + (where m is a nonzero digit, and + nn is some more digits, and the decimal value + mnn is not greater than the number of closing capturing + parentheses seen so far) + a back reference to the mnn'th subexpression + + + +
+ + + + There is an inherent ambiguity between octal character-entry + escapes and back references, which is resolved by the following heuristics, + as hinted at above. + A leading zero always indicates an octal escape. + A single non-zero digit, not followed by another digit, + is always taken as a back reference. + A multi-digit sequence not starting with a zero is taken as a back + reference if it comes after a suitable subexpression + (i.e., the number is in the legal range for a back reference), + and otherwise is taken as octal. + + +
+ + + Regular Expression Metasyntax + + + In addition to the main syntax described above, there are some special + forms and miscellaneous syntactic facilities available. + + + + An RE can begin with one of two special director prefixes. + If an RE begins with ***:, + the rest of the RE is taken as an ARE. (This normally has no effect in + PostgreSQL, since REs are assumed to be AREs; + but it does have an effect if ERE or BRE mode had been specified by + the flags parameter to a regex function.) + If an RE begins with ***=, + the rest of the RE is taken to be a literal string, + with all characters considered ordinary characters. + + + + An ARE can begin with embedded options: + a sequence (?xyz) + (where xyz is one or more alphabetic characters) + specifies options affecting the rest of the RE. + These options override any previously determined options — + in particular, they can override the case-sensitivity behavior implied by + a regex operator, or the flags parameter to a regex + function. + The available option letters are + shown in . + Note that these same option letters are used in the flags + parameters of regex functions. + + + + ARE Embedded-Option Letters + + + + + + + Option + Description + + + + + + b + rest of RE is a BRE + + + + c + case-sensitive matching (overrides operator type) + + + + e + rest of RE is an ERE + + + + i + case-insensitive matching (see + ) (overrides operator type) + + + + m + historical synonym for n + + + + n + newline-sensitive matching (see + ) + + + + p + partial newline-sensitive matching (see + ) + + + + q + rest of RE is a literal (quoted) string, all ordinary + characters + + + + s + non-newline-sensitive matching (default) + + + + t + tight syntax (default; see below) + + + + w + inverse partial newline-sensitive (weird) matching + (see ) + + + + x + expanded syntax (see below) + + + +
+ + + Embedded options take effect at the ) terminating the sequence. + They can appear only at the start of an ARE (after the + ***: director if any). + + + + In addition to the usual (tight) RE syntax, in which all + characters are significant, there is an expanded syntax, + available by specifying the embedded x option. + In the expanded syntax, + white-space characters in the RE are ignored, as are + all characters between a # + and the following newline (or the end of the RE). This + permits paragraphing and commenting a complex RE. + There are three exceptions to that basic rule: + + + + + a white-space character or # preceded by \ is + retained + + + + + white space or # within a bracket expression is retained + + + + + white space and comments cannot appear within multi-character symbols, + such as (?: + + + + + For this purpose, white-space characters are blank, tab, newline, and + any character that belongs to the space character class. + + + + Finally, in an ARE, outside bracket expressions, the sequence + (?#ttt) + (where ttt is any text not containing a )) + is a comment, completely ignored. + Again, this is not allowed between the characters of + multi-character symbols, like (?:. + Such comments are more a historical artifact than a useful facility, + and their use is deprecated; use the expanded syntax instead. + + + + None of these metasyntax extensions is available if + an initial ***= director + has specified that the user's input be treated as a literal string + rather than as an RE. + +
+ + + Regular Expression Matching Rules + + + In the event that an RE could match more than one substring of a given + string, the RE matches the one starting earliest in the string. + If the RE could match more than one substring starting at that point, + either the longest possible match or the shortest possible match will + be taken, depending on whether the RE is greedy or + non-greedy. + + + + Whether an RE is greedy or not is determined by the following rules: + + + + Most atoms, and all constraints, have no greediness attribute (because + they cannot match variable amounts of text anyway). + + + + + Adding parentheses around an RE does not change its greediness. + + + + + A quantified atom with a fixed-repetition quantifier + ({m} + or + {m}?) + has the same greediness (possibly none) as the atom itself. + + + + + A quantified atom with other normal quantifiers (including + {m,n} + with m equal to n) + is greedy (prefers longest match). + + + + + A quantified atom with a non-greedy quantifier (including + {m,n}? + with m equal to n) + is non-greedy (prefers shortest match). + + + + + A branch — that is, an RE that has no top-level + | operator — has the same greediness as the first + quantified atom in it that has a greediness attribute. + + + + + An RE consisting of two or more branches connected by the + | operator is always greedy. + + + + + + + The above rules associate greediness attributes not only with individual + quantified atoms, but with branches and entire REs that contain quantified + atoms. What that means is that the matching is done in such a way that + the branch, or whole RE, matches the longest or shortest possible + substring as a whole. Once the length of the entire match + is determined, the part of it that matches any particular subexpression + is determined on the basis of the greediness attribute of that + subexpression, with subexpressions starting earlier in the RE taking + priority over ones starting later. + + + + An example of what this means: + +SELECT SUBSTRING('XY1234Z', 'Y*([0-9]{1,3})'); +Result: 123 +SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); +Result: 1 + + In the first case, the RE as a whole is greedy because Y* + is greedy. It can match beginning at the Y, and it matches + the longest possible string starting there, i.e., Y123. + The output is the parenthesized part of that, or 123. + In the second case, the RE as a whole is non-greedy because Y*? + is non-greedy. It can match beginning at the Y, and it matches + the shortest possible string starting there, i.e., Y1. + The subexpression [0-9]{1,3} is greedy but it cannot change + the decision as to the overall match length; so it is forced to match + just 1. + + + + In short, when an RE contains both greedy and non-greedy subexpressions, + the total match length is either as long as possible or as short as + possible, according to the attribute assigned to the whole RE. The + attributes assigned to the subexpressions only affect how much of that + match they are allowed to eat relative to each other. + + + + The quantifiers {1,1} and {1,1}? + can be used to force greediness or non-greediness, respectively, + on a subexpression or a whole RE. + This is useful when you need the whole RE to have a greediness attribute + different from what's deduced from its elements. As an example, + suppose that we are trying to separate a string containing some digits + into the digits and the parts before and after them. We might try to + do that like this: + +SELECT regexp_match('abc01234xyz', '(.*)(\d+)(.*)'); +Result: {abc0123,4,xyz} + + That didn't work: the first .* is greedy so + it eats as much as it can, leaving the \d+ to + match at the last possible place, the last digit. We might try to fix + that by making it non-greedy: + +SELECT regexp_match('abc01234xyz', '(.*?)(\d+)(.*)'); +Result: {abc,0,""} + + That didn't work either, because now the RE as a whole is non-greedy + and so it ends the overall match as soon as possible. We can get what + we want by forcing the RE as a whole to be greedy: + +SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); +Result: {abc,01234,xyz} + + Controlling the RE's overall greediness separately from its components' + greediness allows great flexibility in handling variable-length patterns. + + + + When deciding what is a longer or shorter match, + match lengths are measured in characters, not collating elements. + An empty string is considered longer than no match at all. + For example: + bb* + matches the three middle characters of abbbc; + (week|wee)(night|knights) + matches all ten characters of weeknights; + when (.*).* + is matched against abc the parenthesized subexpression + matches all three characters; and when + (a*)* is matched against bc + both the whole RE and the parenthesized + subexpression match an empty string. + + + + If case-independent matching is specified, + the effect is much as if all case distinctions had vanished from the + alphabet. + When an alphabetic that exists in multiple cases appears as an + ordinary character outside a bracket expression, it is effectively + transformed into a bracket expression containing both cases, + e.g., x becomes [xX]. + When it appears inside a bracket expression, all case counterparts + of it are added to the bracket expression, e.g., + [x] becomes [xX] + and [^x] becomes [^xX]. + + + + If newline-sensitive matching is specified, . + and bracket expressions using ^ + will never match the newline character + (so that matches will not cross lines unless the RE + explicitly includes a newline) + and ^ and $ + will match the empty string after and before a newline + respectively, in addition to matching at beginning and end of string + respectively. + But the ARE escapes \A and \Z + continue to match beginning or end of string only. + Also, the character class shorthands \D + and \W will match a newline regardless of this mode. + (Before PostgreSQL 14, they did not match + newlines when in newline-sensitive mode. + Write [^[:digit:]] + or [^[:word:]] to get the old behavior.) + + + + If partial newline-sensitive matching is specified, + this affects . and bracket expressions + as with newline-sensitive matching, but not ^ + and $. + + + + If inverse partial newline-sensitive matching is specified, + this affects ^ and $ + as with newline-sensitive matching, but not . + and bracket expressions. + This isn't very useful but is provided for symmetry. + + + + + Limits and Compatibility + + + No particular limit is imposed on the length of REs in this + implementation. However, + programs intended to be highly portable should not employ REs longer + than 256 bytes, + as a POSIX-compliant implementation can refuse to accept such REs. + + + + The only feature of AREs that is actually incompatible with + POSIX EREs is that \ does not lose its special + significance inside bracket expressions. + All other ARE features use syntax which is illegal or has + undefined or unspecified effects in POSIX EREs; + the *** syntax of directors likewise is outside the POSIX + syntax for both BREs and EREs. + + + + Many of the ARE extensions are borrowed from Perl, but some have + been changed to clean them up, and a few Perl extensions are not present. + Incompatibilities of note include \b, \B, + the lack of special treatment for a trailing newline, + the addition of complemented bracket expressions to the things + affected by newline-sensitive matching, + the restrictions on parentheses and back references in lookahead/lookbehind + constraints, and the longest/shortest-match (rather than first-match) + matching semantics. + + + + + Basic Regular Expressions + + + BREs differ from EREs in several respects. + In BREs, |, +, and ? + are ordinary characters and there is no equivalent + for their functionality. + The delimiters for bounds are + \{ and \}, + with { and } + by themselves ordinary characters. + The parentheses for nested subexpressions are + \( and \), + with ( and ) by themselves ordinary characters. + ^ is an ordinary character except at the beginning of the + RE or the beginning of a parenthesized subexpression, + $ is an ordinary character except at the end of the + RE or the end of a parenthesized subexpression, + and * is an ordinary character if it appears at the beginning + of the RE or the beginning of a parenthesized subexpression + (after a possible leading ^). + Finally, single-digit back references are available, and + \< and \> + are synonyms for + [[:<:]] and [[:>:]] + respectively; no other escapes are available in BREs. + + + + + + + Differences from SQL Standard and XQuery + + + LIKE_REGEX + + + + OCCURRENCES_REGEX + + + + POSITION_REGEX + + + + SUBSTRING_REGEX + + + + TRANSLATE_REGEX + + + + XQuery regular expressions + + + + Since SQL:2008, the SQL standard includes regular expression operators + and functions that performs pattern + matching according to the XQuery regular expression + standard: + + LIKE_REGEX + OCCURRENCES_REGEX + POSITION_REGEX + SUBSTRING_REGEX + TRANSLATE_REGEX + + PostgreSQL does not currently implement these + operators and functions. You can get approximately equivalent + functionality in each case as shown in . (Various optional clauses on + both sides have been omitted in this table.) + + + + Regular Expression Functions Equivalencies + + + + + SQL standard + PostgreSQL + + + + + + string LIKE_REGEX pattern + regexp_like(string, pattern) or string ~ pattern + + + + OCCURRENCES_REGEX(pattern IN string) + regexp_count(string, pattern) + + + + POSITION_REGEX(pattern IN string) + regexp_instr(string, pattern) + + + + SUBSTRING_REGEX(pattern IN string) + regexp_substr(string, pattern) + + + + TRANSLATE_REGEX(pattern IN string WITH replacement) + regexp_replace(string, pattern, replacement) + + + +
+ + + Regular expression functions similar to those provided by PostgreSQL are + also available in a number of other SQL implementations, whereas the + SQL-standard functions are not as widely implemented. Some of the + details of the regular expression syntax will likely differ in each + implementation. + + + + The SQL-standard operators and functions use XQuery regular expressions, + which are quite close to the ARE syntax described above. + Notable differences between the existing POSIX-based + regular-expression feature and XQuery regular expressions include: + + + + + XQuery character class subtraction is not supported. An example of + this feature is using the following to match only English + consonants: [a-z-[aeiou]]. + + + + + XQuery character class shorthands \c, + \C, \i, + and \I are not supported. + + + + + XQuery character class elements + using \p{UnicodeProperty} or the + inverse \P{UnicodeProperty} are not supported. + + + + + POSIX interprets character classes such as \w + (see ) + according to the prevailing locale (which you can control by + attaching a COLLATE clause to the operator or + function). XQuery specifies these classes by reference to Unicode + character properties, so equivalent behavior is obtained only with + a locale that follows the Unicode rules. + + + + + The SQL standard (not XQuery itself) attempts to cater for more + variants of newline than POSIX does. The + newline-sensitive matching options described above consider only + ASCII NL (\n) to be a newline, but SQL would have + us treat CR (\r), CRLF (\r\n) + (a Windows-style newline), and some Unicode-only characters like + LINE SEPARATOR (U+2028) as newlines as well. + Notably, . and \s should + count \r\n as one character not two according to + SQL. + + + + + Of the character-entry escapes described in + , + XQuery supports only \n, \r, + and \t. + + + + + XQuery does not support + the [:name:] syntax + for character classes within bracket expressions. + + + + + XQuery does not have lookahead or lookbehind constraints, + nor any of the constraint escapes described in + . + + + + + The metasyntax forms described in + do not exist in XQuery. + + + + + The regular expression flag letters defined by XQuery are + related to but not the same as the option letters for POSIX + (). While the + i and q options behave the + same, others do not: + + + + XQuery's s (allow dot to match newline) + and m (allow ^ + and $ to match at newlines) flags provide + access to the same behaviors as + POSIX's n, p + and w flags, but they + do not match the behavior of + POSIX's s and m flags. + Note in particular that dot-matches-newline is the default + behavior in POSIX but not XQuery. + + + + + XQuery's x (ignore whitespace in pattern) flag + is noticeably different from POSIX's expanded-mode flag. + POSIX's x flag also + allows # to begin a comment in the pattern, + and POSIX will not ignore a whitespace character after a + backslash. + + + + + + + + +
+
+
diff --git a/doc/src/sgml/func/func-math.sgml b/doc/src/sgml/func/func-math.sgml new file mode 100644 index 0000000000000..9dcf97e7c9e06 --- /dev/null +++ b/doc/src/sgml/func/func-math.sgml @@ -0,0 +1,1616 @@ + + Mathematical Functions and Operators + + + Mathematical operators are provided for many + PostgreSQL types. For types without + standard mathematical conventions + (e.g., date/time types) we + describe the actual behavior in subsequent sections. + + + + shows the mathematical + operators that are available for the standard numeric types. + Unless otherwise noted, operators shown as + accepting numeric_type are available for all + the types smallint, integer, + bigint, numeric, real, + and double precision. + Operators shown as accepting integral_type + are available for the types smallint, integer, + and bigint. + Except where noted, each form of an operator returns the same data type + as its argument(s). Calls involving multiple argument data types, such + as integer + numeric, + are resolved by using the type appearing later in these lists. + + + + Mathematical Operators + + + + + + Operator + + + Description + + + Example(s) + + + + + + + + numeric_type + numeric_type + numeric_type + + + Addition + + + 2 + 3 + 5 + + + + + + + numeric_type + numeric_type + + + Unary plus (no operation) + + + + 3.5 + 3.5 + + + + + + numeric_type - numeric_type + numeric_type + + + Subtraction + + + 2 - 3 + -1 + + + + + + - numeric_type + numeric_type + + + Negation + + + - (-4) + 4 + + + + + + numeric_type * numeric_type + numeric_type + + + Multiplication + + + 2 * 3 + 6 + + + + + + numeric_type / numeric_type + numeric_type + + + Division (for integral types, division truncates the result towards + zero) + + + 5.0 / 2 + 2.5000000000000000 + + + 5 / 2 + 2 + + + (-5) / 2 + -2 + + + + + + numeric_type % numeric_type + numeric_type + + + Modulo (remainder); available for smallint, + integer, bigint, and numeric + + + 5 % 4 + 1 + + + + + + numeric ^ numeric + numeric + + + double precision ^ double precision + double precision + + + Exponentiation + + + 2 ^ 3 + 8 + + + Unlike typical mathematical practice, multiple uses of + ^ will associate left to right by default: + + + 2 ^ 3 ^ 3 + 512 + + + 2 ^ (3 ^ 3) + 134217728 + + + + + + |/ double precision + double precision + + + Square root + + + |/ 25.0 + 5 + + + + + + ||/ double precision + double precision + + + Cube root + + + ||/ 64.0 + 4 + + + + + + @ numeric_type + numeric_type + + + Absolute value + + + @ -5.0 + 5.0 + + + + + + integral_type & integral_type + integral_type + + + Bitwise AND + + + 91 & 15 + 11 + + + + + + integral_type | integral_type + integral_type + + + Bitwise OR + + + 32 | 3 + 35 + + + + + + integral_type # integral_type + integral_type + + + Bitwise exclusive OR + + + 17 # 5 + 20 + + + + + + ~ integral_type + integral_type + + + Bitwise NOT + + + ~1 + -2 + + + + + + integral_type << integer + integral_type + + + Bitwise shift left + + + 1 << 4 + 16 + + + + + + integral_type >> integer + integral_type + + + Bitwise shift right + + + 8 >> 2 + 2 + + + + + +
+ + + shows the available + mathematical functions. + Many of these functions are provided in multiple forms with different + argument types. + Except where noted, any given form of a function returns the same + data type as its argument(s); cross-type cases are resolved in the + same way as explained above for operators. + The functions working with double precision data are mostly + implemented on top of the host system's C library; accuracy and behavior in + boundary cases can therefore vary depending on the host system. + + + + Mathematical Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + abs + + abs ( numeric_type ) + numeric_type + + + Absolute value + + + abs(-17.4) + 17.4 + + + + + + + cbrt + + cbrt ( double precision ) + double precision + + + Cube root + + + cbrt(64.0) + 4 + + + + + + + ceil + + ceil ( numeric ) + numeric + + + ceil ( double precision ) + double precision + + + Nearest integer greater than or equal to argument + + + ceil(42.2) + 43 + + + ceil(-42.8) + -42 + + + + + + + ceiling + + ceiling ( numeric ) + numeric + + + ceiling ( double precision ) + double precision + + + Nearest integer greater than or equal to argument (same + as ceil) + + + ceiling(95.3) + 96 + + + + + + + degrees + + degrees ( double precision ) + double precision + + + Converts radians to degrees + + + degrees(0.5) + 28.64788975654116 + + + + + + + div + + div ( y numeric, + x numeric ) + numeric + + + Integer quotient of y/x + (truncates towards zero) + + + div(9, 4) + 2 + + + + + + + erf + + erf ( double precision ) + double precision + + + Error function + + + erf(1.0) + 0.8427007929497149 + + + + + + + erfc + + erfc ( double precision ) + double precision + + + Complementary error function (1 - erf(x), without + loss of precision for large inputs) + + + erfc(1.0) + 0.15729920705028513 + + + + + + + exp + + exp ( numeric ) + numeric + + + exp ( double precision ) + double precision + + + Exponential (e raised to the given power) + + + exp(1.0) + 2.7182818284590452 + + + + + + + factorial + + factorial ( bigint ) + numeric + + + Factorial + + + factorial(5) + 120 + + + + + + + floor + + floor ( numeric ) + numeric + + + floor ( double precision ) + double precision + + + Nearest integer less than or equal to argument + + + floor(42.8) + 42 + + + floor(-42.8) + -43 + + + + + + + gamma + + gamma ( double precision ) + double precision + + + Gamma function + + + gamma(0.5) + 1.772453850905516 + + + gamma(6) + 120 + + + + + + + gcd + + gcd ( numeric_type, numeric_type ) + numeric_type + + + Greatest common divisor (the largest positive number that divides both + inputs with no remainder); returns 0 if both inputs + are zero; available for integer, bigint, + and numeric + + + gcd(1071, 462) + 21 + + + + + + + lcm + + lcm ( numeric_type, numeric_type ) + numeric_type + + + Least common multiple (the smallest strictly positive number that is + an integral multiple of both inputs); returns 0 if + either input is zero; available for integer, + bigint, and numeric + + + lcm(1071, 462) + 23562 + + + + + + + lgamma + + lgamma ( double precision ) + double precision + + + Natural logarithm of the absolute value of the gamma function + + + lgamma(1000) + 5905.220423209181 + + + + + + + ln + + ln ( numeric ) + numeric + + + ln ( double precision ) + double precision + + + Natural logarithm + + + ln(2.0) + 0.6931471805599453 + + + + + + + log + + log ( numeric ) + numeric + + + log ( double precision ) + double precision + + + Base 10 logarithm + + + log(100) + 2 + + + + + + + log10 + + log10 ( numeric ) + numeric + + + log10 ( double precision ) + double precision + + + Base 10 logarithm (same as log) + + + log10(1000) + 3 + + + + + + log ( b numeric, + x numeric ) + numeric + + + Logarithm of x to base b + + + log(2.0, 64.0) + 6.0000000000000000 + + + + + + + min_scale + + min_scale ( numeric ) + integer + + + Minimum scale (number of fractional decimal digits) needed + to represent the supplied value precisely + + + min_scale(8.4100) + 2 + + + + + + + mod + + mod ( y numeric_type, + x numeric_type ) + numeric_type + + + Remainder of y/x; + available for smallint, integer, + bigint, and numeric + + + mod(9, 4) + 1 + + + + + + + pi + + pi ( ) + double precision + + + Approximate value of π + + + pi() + 3.141592653589793 + + + + + + + power + + power ( a numeric, + b numeric ) + numeric + + + power ( a double precision, + b double precision ) + double precision + + + a raised to the power of b + + + power(9, 3) + 729 + + + + + + + radians + + radians ( double precision ) + double precision + + + Converts degrees to radians + + + radians(45.0) + 0.7853981633974483 + + + + + + + round + + round ( numeric ) + numeric + + + round ( double precision ) + double precision + + + Rounds to nearest integer. For numeric, ties are + broken by rounding away from zero. For double precision, + the tie-breaking behavior is platform dependent, but + round to nearest even is the most common rule. + + + round(42.4) + 42 + + + + + + round ( v numeric, s integer ) + numeric + + + Rounds v to s decimal + places. Ties are broken by rounding away from zero. + + + round(42.4382, 2) + 42.44 + + + round(1234.56, -1) + 1230 + + + + + + + scale + + scale ( numeric ) + integer + + + Scale of the argument (the number of decimal digits in the fractional part) + + + scale(8.4100) + 4 + + + + + + + sign + + sign ( numeric ) + numeric + + + sign ( double precision ) + double precision + + + Sign of the argument (-1, 0, or +1) + + + sign(-8.4) + -1 + + + + + + + sqrt + + sqrt ( numeric ) + numeric + + + sqrt ( double precision ) + double precision + + + Square root + + + sqrt(2) + 1.4142135623730951 + + + + + + + trim_scale + + trim_scale ( numeric ) + numeric + + + Reduces the value's scale (number of fractional decimal digits) by + removing trailing zeroes + + + trim_scale(8.4100) + 8.41 + + + + + + + trunc + + trunc ( numeric ) + numeric + + + trunc ( double precision ) + double precision + + + Truncates to integer (towards zero) + + + trunc(42.8) + 42 + + + trunc(-42.8) + -42 + + + + + + trunc ( v numeric, s integer ) + numeric + + + Truncates v to s + decimal places + + + trunc(42.4382, 2) + 42.43 + + + + + + + width_bucket + + width_bucket ( operand numeric, low numeric, high numeric, count integer ) + integer + + + width_bucket ( operand double precision, low double precision, high double precision, count integer ) + integer + + + Returns the number of the bucket in + which operand falls in a histogram + having count equal-width buckets spanning the + range low to high. + The buckets have inclusive lower bounds and exclusive upper bounds. + Returns 0 for an input less + than low, + or count+1 for an input + greater than or equal to high. + If low > high, + the behavior is mirror-reversed, with bucket 1 + now being the one just below low, and the + inclusive bounds now being on the upper side. + + + width_bucket(5.35, 0.024, 10.06, 5) + 3 + + + width_bucket(9, 10, 0, 10) + 2 + + + + + + width_bucket ( operand anycompatible, thresholds anycompatiblearray ) + integer + + + Returns the number of the bucket in + which operand falls given an array listing the + inclusive lower bounds of the buckets. + Returns 0 for an input less than the first lower + bound. operand and the array elements can be + of any type having standard comparison operators. + The thresholds array must be + sorted, smallest first, or unexpected results will be + obtained. + + + width_bucket(now(), array['yesterday', 'today', 'tomorrow']::timestamptz[]) + 2 + + + + +
+ + + shows functions for + generating random numbers. + + + + Random Functions + + + + + + Function + + + Description + + + Example(s) + + + + + + + + + random + + random ( ) + double precision + + + Returns a random value in the range 0.0 <= x < 1.0 + + + random() + 0.897124072839091 + + + + + + + random + + random ( min integer, max integer ) + integer + + + random ( min bigint, max bigint ) + bigint + + + random ( min numeric, max numeric ) + numeric + + + Returns a random value in the range + min <= x <= max. + For type numeric, the result will have the same number of + fractional decimal digits as min or + max, whichever has more. + + + random(1, 10) + 7 + + + random(-0.499, 0.499) + 0.347 + + + + + + + random_normal + + + random_normal ( + mean double precision + , stddev double precision ) + double precision + + + Returns a random value from the normal distribution with the given + parameters; mean defaults to 0.0 + and stddev defaults to 1.0 + + + random_normal(0.0, 1.0) + 0.051285419 + + + + + + + setseed + + setseed ( double precision ) + void + + + Sets the seed for subsequent random() and + random_normal() calls; + argument must be between -1.0 and 1.0, inclusive + + + setseed(0.12345) + + + + +
+ + + The random() and random_normal() + functions listed in and + use a + deterministic pseudo-random number generator. + It is fast but not suitable for cryptographic + applications; see the module for a more + secure alternative. + If setseed() is called, the series of results of + subsequent calls to these functions in the current session + can be repeated by re-issuing setseed() with the same + argument. + Without any prior setseed() call in the same + session, the first call to any of these functions obtains a seed + from a platform-dependent source of random bits. + + + + shows the + available trigonometric functions. Each of these functions comes in + two variants, one that measures angles in radians and one that + measures angles in degrees. + + + + Trigonometric Functions + + + + + + Function + + + Description + + + Example(s) + + + + + + + + + acos + + acos ( double precision ) + double precision + + + Inverse cosine, result in radians + + + acos(1) + 0 + + + + + + + acosd + + acosd ( double precision ) + double precision + + + Inverse cosine, result in degrees + + + acosd(0.5) + 60 + + + + + + + asin + + asin ( double precision ) + double precision + + + Inverse sine, result in radians + + + asin(1) + 1.5707963267948966 + + + + + + + asind + + asind ( double precision ) + double precision + + + Inverse sine, result in degrees + + + asind(0.5) + 30 + + + + + + + atan + + atan ( double precision ) + double precision + + + Inverse tangent, result in radians + + + atan(1) + 0.7853981633974483 + + + + + + + atand + + atand ( double precision ) + double precision + + + Inverse tangent, result in degrees + + + atand(1) + 45 + + + + + + + atan2 + + atan2 ( y double precision, + x double precision ) + double precision + + + Inverse tangent of + y/x, + result in radians + + + atan2(1, 0) + 1.5707963267948966 + + + + + + + atan2d + + atan2d ( y double precision, + x double precision ) + double precision + + + Inverse tangent of + y/x, + result in degrees + + + atan2d(1, 0) + 90 + + + + + + + cos + + cos ( double precision ) + double precision + + + Cosine, argument in radians + + + cos(0) + 1 + + + + + + + cosd + + cosd ( double precision ) + double precision + + + Cosine, argument in degrees + + + cosd(60) + 0.5 + + + + + + + cot + + cot ( double precision ) + double precision + + + Cotangent, argument in radians + + + cot(0.5) + 1.830487721712452 + + + + + + + cotd + + cotd ( double precision ) + double precision + + + Cotangent, argument in degrees + + + cotd(45) + 1 + + + + + + + sin + + sin ( double precision ) + double precision + + + Sine, argument in radians + + + sin(1) + 0.8414709848078965 + + + + + + + sind + + sind ( double precision ) + double precision + + + Sine, argument in degrees + + + sind(30) + 0.5 + + + + + + + tan + + tan ( double precision ) + double precision + + + Tangent, argument in radians + + + tan(1) + 1.5574077246549023 + + + + + + + tand + + tand ( double precision ) + double precision + + + Tangent, argument in degrees + + + tand(45) + 1 + + + + +
+ + + + Another way to work with angles measured in degrees is to use the unit + transformation functions radians() + and degrees() shown earlier. + However, using the degree-based trigonometric functions is preferred, + as that way avoids round-off error for special cases such + as sind(30). + + + + + shows the + available hyperbolic functions. + + + + Hyperbolic Functions + + + + + + Function + + + Description + + + Example(s) + + + + + + + + + sinh + + sinh ( double precision ) + double precision + + + Hyperbolic sine + + + sinh(1) + 1.1752011936438014 + + + + + + + cosh + + cosh ( double precision ) + double precision + + + Hyperbolic cosine + + + cosh(0) + 1 + + + + + + + tanh + + tanh ( double precision ) + double precision + + + Hyperbolic tangent + + + tanh(1) + 0.7615941559557649 + + + + + + + asinh + + asinh ( double precision ) + double precision + + + Inverse hyperbolic sine + + + asinh(1) + 0.881373587019543 + + + + + + + acosh + + acosh ( double precision ) + double precision + + + Inverse hyperbolic cosine + + + acosh(1) + 0 + + + + + + + atanh + + atanh ( double precision ) + double precision + + + Inverse hyperbolic tangent + + + atanh(0.5) + 0.5493061443340548 + + + + +
+ +
diff --git a/doc/src/sgml/func/func-merge-support.sgml b/doc/src/sgml/func/func-merge-support.sgml new file mode 100644 index 0000000000000..7f084271c13ae --- /dev/null +++ b/doc/src/sgml/func/func-merge-support.sgml @@ -0,0 +1,78 @@ + + Merge Support Functions + + + MERGE + RETURNING + + + + PostgreSQL includes one merge support function + that may be used in the RETURNING list of a + command to identify the action taken for each + row; see . + + + + Merge Support Functions + + + + + + Function + + + Description + + + + + + + + + merge_action + + merge_action ( ) + text + + + Returns the merge action command executed for the current row. This + will be 'INSERT', 'UPDATE', or + 'DELETE'. + + + + +
+ + + Example: + 0 THEN + UPDATE SET in_stock = true, quantity = s.quantity + WHEN MATCHED THEN + UPDATE SET in_stock = false, quantity = 0 + WHEN NOT MATCHED THEN + INSERT (product_id, in_stock, quantity) + VALUES (s.product_id, true, s.quantity) + RETURNING merge_action(), p.*; + + merge_action | product_id | in_stock | quantity +--------------+------------+----------+---------- + UPDATE | 1001 | t | 50 + UPDATE | 1002 | f | 0 + INSERT | 1003 | t | 10 +]]> + + + + Note that this function can only be used in the RETURNING + list of a MERGE command. It is an error to use it in any + other part of a query. + + +
diff --git a/doc/src/sgml/func/func-net.sgml b/doc/src/sgml/func/func-net.sgml new file mode 100644 index 0000000000000..1361a44c19767 --- /dev/null +++ b/doc/src/sgml/func/func-net.sgml @@ -0,0 +1,592 @@ + + Network Address Functions and Operators + + + The IP network address types, cidr and inet, + support the usual comparison operators shown in + + as well as the specialized operators and functions shown in + and + . + + + + Any cidr value can be cast to inet implicitly; + therefore, the operators and functions shown below as operating on + inet also work on cidr values. (Where there are + separate functions for inet and cidr, it is + because the behavior should be different for the two cases.) + Also, it is permitted to cast an inet value + to cidr. When this is done, any bits to the right of the + netmask are silently zeroed to create a valid cidr value. + + + + IP Address Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + inet << inet + boolean + + + Is subnet strictly contained by subnet? + This operator, and the next four, test for subnet inclusion. They + consider only the network parts of the two addresses (ignoring any + bits to the right of the netmasks) and determine whether one network + is identical to or a subnet of the other. + + + inet '192.168.1.5' << inet '192.168.1/24' + t + + + inet '192.168.0.5' << inet '192.168.1/24' + f + + + inet '192.168.1/24' << inet '192.168.1/24' + f + + + + + + inet <<= inet + boolean + + + Is subnet contained by or equal to subnet? + + + inet '192.168.1/24' <<= inet '192.168.1/24' + t + + + + + + inet >> inet + boolean + + + Does subnet strictly contain subnet? + + + inet '192.168.1/24' >> inet '192.168.1.5' + t + + + + + + inet >>= inet + boolean + + + Does subnet contain or equal subnet? + + + inet '192.168.1/24' >>= inet '192.168.1/24' + t + + + + + + inet && inet + boolean + + + Does either subnet contain or equal the other? + + + inet '192.168.1/24' && inet '192.168.1.80/28' + t + + + inet '192.168.1/24' && inet '192.168.2.0/28' + f + + + + + + ~ inet + inet + + + Computes bitwise NOT. + + + ~ inet '192.168.1.6' + 63.87.254.249 + + + + + + inet & inet + inet + + + Computes bitwise AND. + + + inet '192.168.1.6' & inet '0.0.0.255' + 0.0.0.6 + + + + + + inet | inet + inet + + + Computes bitwise OR. + + + inet '192.168.1.6' | inet '0.0.0.255' + 192.168.1.255 + + + + + + inet + bigint + inet + + + Adds an offset to an address. + + + inet '192.168.1.6' + 25 + 192.168.1.31 + + + + + + bigint + inet + inet + + + Adds an offset to an address. + + + 200 + inet '::ffff:fff0:1' + ::ffff:255.240.0.201 + + + + + + inet - bigint + inet + + + Subtracts an offset from an address. + + + inet '192.168.1.43' - 36 + 192.168.1.7 + + + + + + inet - inet + bigint + + + Computes the difference of two addresses. + + + inet '192.168.1.43' - inet '192.168.1.19' + 24 + + + inet '::1' - inet '::ffff:1' + -4294901760 + + + + +
+ + + IP Address Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + abbrev + + abbrev ( inet ) + text + + + Creates an abbreviated display format as text. + (The result is the same as the inet output function + produces; it is abbreviated only in comparison to the + result of an explicit cast to text, which for historical + reasons will never suppress the netmask part.) + + + abbrev(inet '10.1.0.0/32') + 10.1.0.0 + + + + + + abbrev ( cidr ) + text + + + Creates an abbreviated display format as text. + (The abbreviation consists of dropping all-zero octets to the right + of the netmask; more examples are in + .) + + + abbrev(cidr '10.1.0.0/16') + 10.1/16 + + + + + + + broadcast + + broadcast ( inet ) + inet + + + Computes the broadcast address for the address's network. + + + broadcast(inet '192.168.1.5/24') + 192.168.1.255/24 + + + + + + + family + + family ( inet ) + integer + + + Returns the address's family: 4 for IPv4, + 6 for IPv6. + + + family(inet '::1') + 6 + + + + + + + host + + host ( inet ) + text + + + Returns the IP address as text, ignoring the netmask. + + + host(inet '192.168.1.0/24') + 192.168.1.0 + + + + + + + hostmask + + hostmask ( inet ) + inet + + + Computes the host mask for the address's network. + + + hostmask(inet '192.168.23.20/30') + 0.0.0.3 + + + + + + + inet_merge + + inet_merge ( inet, inet ) + cidr + + + Computes the smallest network that includes both of the given networks. + + + inet_merge(inet '192.168.1.5/24', inet '192.168.2.5/24') + 192.168.0.0/22 + + + + + + + inet_same_family + + inet_same_family ( inet, inet ) + boolean + + + Tests whether the addresses belong to the same IP family. + + + inet_same_family(inet '192.168.1.5/24', inet '::1') + f + + + + + + + masklen + + masklen ( inet ) + integer + + + Returns the netmask length in bits. + + + masklen(inet '192.168.1.5/24') + 24 + + + + + + + netmask + + netmask ( inet ) + inet + + + Computes the network mask for the address's network. + + + netmask(inet '192.168.1.5/24') + 255.255.255.0 + + + + + + + network + + network ( inet ) + cidr + + + Returns the network part of the address, zeroing out + whatever is to the right of the netmask. + (This is equivalent to casting the value to cidr.) + + + network(inet '192.168.1.5/24') + 192.168.1.0/24 + + + + + + + set_masklen + + set_masklen ( inet, integer ) + inet + + + Sets the netmask length for an inet value. + The address part does not change. + + + set_masklen(inet '192.168.1.5/24', 16) + 192.168.1.5/16 + + + + + + set_masklen ( cidr, integer ) + cidr + + + Sets the netmask length for a cidr value. + Address bits to the right of the new netmask are set to zero. + + + set_masklen(cidr '192.168.1.0/24', 16) + 192.168.0.0/16 + + + + + + + text + + text ( inet ) + text + + + Returns the unabbreviated IP address and netmask length as text. + (This has the same result as an explicit cast to text.) + + + text(inet '192.168.1.5') + 192.168.1.5/32 + + + + +
+ + + + The abbrev, host, + and text functions are primarily intended to offer + alternative display formats for IP addresses. + + + + + The MAC address types, macaddr and macaddr8, + support the usual comparison operators shown in + + as well as the specialized functions shown in + . + In addition, they support the bitwise logical operators + ~, & and | + (NOT, AND and OR), just as shown above for IP addresses. + + + + MAC Address Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + trunc + + trunc ( macaddr ) + macaddr + + + Sets the last 3 bytes of the address to zero. The remaining prefix + can be associated with a particular manufacturer (using data not + included in PostgreSQL). + + + trunc(macaddr '12:34:56:78:90:ab') + 12:34:56:00:00:00 + + + + + + trunc ( macaddr8 ) + macaddr8 + + + Sets the last 5 bytes of the address to zero. The remaining prefix + can be associated with a particular manufacturer (using data not + included in PostgreSQL). + + + trunc(macaddr8 '12:34:56:78:90:ab:cd:ef') + 12:34:56:00:00:00:00:00 + + + + + + + macaddr8_set7bit + + macaddr8_set7bit ( macaddr8 ) + macaddr8 + + + Sets the 7th bit of the address to one, creating what is known as + modified EUI-64, for inclusion in an IPv6 address. + + + macaddr8_set7bit(macaddr8 '00:34:56:ab:cd:ef') + 02:34:56:ff:fe:ab:cd:ef + + + + +
+ +
diff --git a/doc/src/sgml/func/func-range.sgml b/doc/src/sgml/func/func-range.sgml new file mode 100644 index 0000000000000..2dc40348a57f4 --- /dev/null +++ b/doc/src/sgml/func/func-range.sgml @@ -0,0 +1,1053 @@ + + Range/Multirange Functions and Operators + + + See for an overview of range types. + + + + shows the specialized operators + available for range types. + shows the specialized operators + available for multirange types. + In addition to those, the usual comparison operators shown in + are available for range + and multirange types. The comparison operators order first by the range lower + bounds, and only if those are equal do they compare the upper bounds. The + multirange operators compare each range until one is unequal. This + does not usually result in a useful overall ordering, but the operators are + provided to allow unique indexes to be constructed on ranges. + + + + Range Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + anyrange @> anyrange + boolean + + + Does the first range contain the second? + + + int4range(2,4) @> int4range(2,3) + t + + + + + + anyrange @> anyelement + boolean + + + Does the range contain the element? + + + '[2011-01-01,2011-03-01)'::tsrange @> '2011-01-10'::timestamp + t + + + + + + anyrange <@ anyrange + boolean + + + Is the first range contained by the second? + + + int4range(2,4) <@ int4range(1,7) + t + + + + + + anyelement <@ anyrange + boolean + + + Is the element contained in the range? + + + 42 <@ int4range(1,7) + f + + + + + + anyrange && anyrange + boolean + + + Do the ranges overlap, that is, have any elements in common? + + + int8range(3,7) && int8range(4,12) + t + + + + + + anyrange << anyrange + boolean + + + Is the first range strictly left of the second? + + + int8range(1,10) << int8range(100,110) + t + + + + + + anyrange >> anyrange + boolean + + + Is the first range strictly right of the second? + + + int8range(50,60) >> int8range(20,30) + t + + + + + + anyrange &< anyrange + boolean + + + Does the first range not extend to the right of the second? + + + int8range(1,20) &< int8range(18,20) + t + + + + + + anyrange &> anyrange + boolean + + + Does the first range not extend to the left of the second? + + + int8range(7,20) &> int8range(5,10) + t + + + + + + anyrange -|- anyrange + boolean + + + Are the ranges adjacent? + + + numrange(1.1,2.2) -|- numrange(2.2,3.3) + t + + + + + + anyrange + anyrange + anyrange + + + Computes the union of the ranges. The ranges must overlap or be + adjacent, so that the union is a single range (but + see range_merge()). + + + numrange(5,15) + numrange(10,20) + [5,20) + + + + + + anyrange * anyrange + anyrange + + + Computes the intersection of the ranges. + + + int8range(5,15) * int8range(10,20) + [10,15) + + + + + + anyrange - anyrange + anyrange + + + Computes the difference of the ranges. The second range must not be + contained in the first in such a way that the difference would not be + a single range. + + + int8range(5,15) - int8range(10,20) + [5,10) + + + + +
+ + + Multirange Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + anymultirange @> anymultirange + boolean + + + Does the first multirange contain the second? + + + '{[2,4)}'::int4multirange @> '{[2,3)}'::int4multirange + t + + + + + + anymultirange @> anyrange + boolean + + + Does the multirange contain the range? + + + '{[2,4)}'::int4multirange @> int4range(2,3) + t + + + + + + anymultirange @> anyelement + boolean + + + Does the multirange contain the element? + + + '{[2011-01-01,2011-03-01)}'::tsmultirange @> '2011-01-10'::timestamp + t + + + + + + anyrange @> anymultirange + boolean + + + Does the range contain the multirange? + + + '[2,4)'::int4range @> '{[2,3)}'::int4multirange + t + + + + + + anymultirange <@ anymultirange + boolean + + + Is the first multirange contained by the second? + + + '{[2,4)}'::int4multirange <@ '{[1,7)}'::int4multirange + t + + + + + + anymultirange <@ anyrange + boolean + + + Is the multirange contained by the range? + + + '{[2,4)}'::int4multirange <@ int4range(1,7) + t + + + + + + anyrange <@ anymultirange + boolean + + + Is the range contained by the multirange? + + + int4range(2,4) <@ '{[1,7)}'::int4multirange + t + + + + + + anyelement <@ anymultirange + boolean + + + Is the element contained by the multirange? + + + 4 <@ '{[1,7)}'::int4multirange + t + + + + + + anymultirange && anymultirange + boolean + + + Do the multiranges overlap, that is, have any elements in common? + + + '{[3,7)}'::int8multirange && '{[4,12)}'::int8multirange + t + + + + + + anymultirange && anyrange + boolean + + + Does the multirange overlap the range? + + + '{[3,7)}'::int8multirange && int8range(4,12) + t + + + + + + anyrange && anymultirange + boolean + + + Does the range overlap the multirange? + + + int8range(3,7) && '{[4,12)}'::int8multirange + t + + + + + + anymultirange << anymultirange + boolean + + + Is the first multirange strictly left of the second? + + + '{[1,10)}'::int8multirange << '{[100,110)}'::int8multirange + t + + + + + + anymultirange << anyrange + boolean + + + Is the multirange strictly left of the range? + + + '{[1,10)}'::int8multirange << int8range(100,110) + t + + + + + + anyrange << anymultirange + boolean + + + Is the range strictly left of the multirange? + + + int8range(1,10) << '{[100,110)}'::int8multirange + t + + + + + + anymultirange >> anymultirange + boolean + + + Is the first multirange strictly right of the second? + + + '{[50,60)}'::int8multirange >> '{[20,30)}'::int8multirange + t + + + + + + anymultirange >> anyrange + boolean + + + Is the multirange strictly right of the range? + + + '{[50,60)}'::int8multirange >> int8range(20,30) + t + + + + + + anyrange >> anymultirange + boolean + + + Is the range strictly right of the multirange? + + + int8range(50,60) >> '{[20,30)}'::int8multirange + t + + + + + + anymultirange &< anymultirange + boolean + + + Does the first multirange not extend to the right of the second? + + + '{[1,20)}'::int8multirange &< '{[18,20)}'::int8multirange + t + + + + + + anymultirange &< anyrange + boolean + + + Does the multirange not extend to the right of the range? + + + '{[1,20)}'::int8multirange &< int8range(18,20) + t + + + + + + anyrange &< anymultirange + boolean + + + Does the range not extend to the right of the multirange? + + + int8range(1,20) &< '{[18,20)}'::int8multirange + t + + + + + + anymultirange &> anymultirange + boolean + + + Does the first multirange not extend to the left of the second? + + + '{[7,20)}'::int8multirange &> '{[5,10)}'::int8multirange + t + + + + + + anymultirange &> anyrange + boolean + + + Does the multirange not extend to the left of the range? + + + '{[7,20)}'::int8multirange &> int8range(5,10) + t + + + + + + anyrange &> anymultirange + boolean + + + Does the range not extend to the left of the multirange? + + + int8range(7,20) &> '{[5,10)}'::int8multirange + t + + + + + + anymultirange -|- anymultirange + boolean + + + Are the multiranges adjacent? + + + '{[1.1,2.2)}'::nummultirange -|- '{[2.2,3.3)}'::nummultirange + t + + + + + + anymultirange -|- anyrange + boolean + + + Is the multirange adjacent to the range? + + + '{[1.1,2.2)}'::nummultirange -|- numrange(2.2,3.3) + t + + + + + + anyrange -|- anymultirange + boolean + + + Is the range adjacent to the multirange? + + + numrange(1.1,2.2) -|- '{[2.2,3.3)}'::nummultirange + t + + + + + + anymultirange + anymultirange + anymultirange + + + Computes the union of the multiranges. The multiranges need not overlap + or be adjacent. + + + '{[5,10)}'::nummultirange + '{[15,20)}'::nummultirange + {[5,10), [15,20)} + + + + + + anymultirange * anymultirange + anymultirange + + + Computes the intersection of the multiranges. + + + '{[5,15)}'::int8multirange * '{[10,20)}'::int8multirange + {[10,15)} + + + + + + anymultirange - anymultirange + anymultirange + + + Computes the difference of the multiranges. + + + '{[5,20)}'::int8multirange - '{[10,15)}'::int8multirange + {[5,10), [15,20)} + + + + +
+ + + The left-of/right-of/adjacent operators always return false when an empty + range or multirange is involved; that is, an empty range is not considered to + be either before or after any other range. + + + + Elsewhere empty ranges and multiranges are treated as the additive identity: + anything unioned with an empty value is itself. Anything minus an empty + value is itself. An empty multirange has exactly the same points as an empty + range. Every range contains the empty range. Every multirange contains as many + empty ranges as you like. + + + + The range union and difference operators will fail if the resulting range would + need to contain two disjoint sub-ranges, as such a range cannot be + represented. There are separate operators for union and difference that take + multirange parameters and return a multirange, and they do not fail even if + their arguments are disjoint. So if you need a union or difference operation + for ranges that may be disjoint, you can avoid errors by first casting your + ranges to multiranges. + + + + shows the functions + available for use with range types. + shows the functions + available for use with multirange types. + + + + Range Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + lower + + lower ( anyrange ) + anyelement + + + Extracts the lower bound of the range (NULL if the + range is empty or has no lower bound). + + + lower(numrange(1.1,2.2)) + 1.1 + + + + + + + upper + + upper ( anyrange ) + anyelement + + + Extracts the upper bound of the range (NULL if the + range is empty or has no upper bound). + + + upper(numrange(1.1,2.2)) + 2.2 + + + + + + + isempty + + isempty ( anyrange ) + boolean + + + Is the range empty? + + + isempty(numrange(1.1,2.2)) + f + + + + + + + lower_inc + + lower_inc ( anyrange ) + boolean + + + Is the range's lower bound inclusive? + + + lower_inc(numrange(1.1,2.2)) + t + + + + + + + upper_inc + + upper_inc ( anyrange ) + boolean + + + Is the range's upper bound inclusive? + + + upper_inc(numrange(1.1,2.2)) + f + + + + + + + lower_inf + + lower_inf ( anyrange ) + boolean + + + Does the range have no lower bound? (A lower bound of + -Infinity returns false.) + + + lower_inf('(,)'::daterange) + t + + + + + + + upper_inf + + upper_inf ( anyrange ) + boolean + + + Does the range have no upper bound? (An upper bound of + Infinity returns false.) + + + upper_inf('(,)'::daterange) + t + + + + + + + range_merge + + range_merge ( anyrange, anyrange ) + anyrange + + + Computes the smallest range that includes both of the given ranges. + + + range_merge('[1,2)'::int4range, '[3,4)'::int4range) + [1,4) + + + + +
+ + + Multirange Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + lower + + lower ( anymultirange ) + anyelement + + + Extracts the lower bound of the multirange (NULL if the + multirange is empty or has no lower bound). + + + lower('{[1.1,2.2)}'::nummultirange) + 1.1 + + + + + + + upper + + upper ( anymultirange ) + anyelement + + + Extracts the upper bound of the multirange (NULL if the + multirange is empty or has no upper bound). + + + upper('{[1.1,2.2)}'::nummultirange) + 2.2 + + + + + + + isempty + + isempty ( anymultirange ) + boolean + + + Is the multirange empty? + + + isempty('{[1.1,2.2)}'::nummultirange) + f + + + + + + + lower_inc + + lower_inc ( anymultirange ) + boolean + + + Is the multirange's lower bound inclusive? + + + lower_inc('{[1.1,2.2)}'::nummultirange) + t + + + + + + + upper_inc + + upper_inc ( anymultirange ) + boolean + + + Is the multirange's upper bound inclusive? + + + upper_inc('{[1.1,2.2)}'::nummultirange) + f + + + + + + + lower_inf + + lower_inf ( anymultirange ) + boolean + + + Does the multirange have no lower bound? (A lower bound of + -Infinity returns false.) + + + lower_inf('{(,)}'::datemultirange) + t + + + + + + + upper_inf + + upper_inf ( anymultirange ) + boolean + + + Does the multirange have no upper bound? (An upper bound of + Infinity returns false.) + + + upper_inf('{(,)}'::datemultirange) + t + + + + + + + range_merge + + range_merge ( anymultirange ) + anyrange + + + Computes the smallest range that includes the entire multirange. + + + range_merge('{[1,2), [3,4)}'::int4multirange) + [1,4) + + + + + + + multirange (function) + + multirange ( anyrange ) + anymultirange + + + Returns a multirange containing just the given range. + + + multirange('[1,2)'::int4range) + {[1,2)} + + + + + + + unnest + for multirange + + unnest ( anymultirange ) + setof anyrange + + + Expands a multirange into a set of ranges in ascending order. + + + unnest('{[1,2), [3,4)}'::int4multirange) + + + [1,2) + [3,4) + + + + + +
+ + + The lower_inc, upper_inc, + lower_inf, and upper_inf + functions all return false for an empty range or multirange. + +
diff --git a/doc/src/sgml/func/func-sequence.sgml b/doc/src/sgml/func/func-sequence.sgml new file mode 100644 index 0000000000000..e9f5b4e8e6b27 --- /dev/null +++ b/doc/src/sgml/func/func-sequence.sgml @@ -0,0 +1,195 @@ + + Sequence Manipulation Functions + + + sequence + + + + This section describes functions for operating on sequence + objects, also called sequence generators or just sequences. + Sequence objects are special single-row tables created with . + Sequence objects are commonly used to generate unique identifiers + for rows of a table. The sequence functions, listed in , provide simple, multiuser-safe + methods for obtaining successive sequence values from sequence + objects. + + + + Sequence Functions + + + + + Function + + + Description + + + + + + + + + nextval + + nextval ( regclass ) + bigint + + + Advances the sequence object to its next value and returns that value. + This is done atomically: even if multiple sessions + execute nextval concurrently, each will safely + receive a distinct sequence value. + If the sequence object has been created with default parameters, + successive nextval calls will return successive + values beginning with 1. Other behaviors can be obtained by using + appropriate parameters in the + command. + + + This function requires USAGE + or UPDATE privilege on the sequence. + + + + + + + setval + + setval ( regclass, bigint , boolean ) + bigint + + + Sets the sequence object's current value, and optionally + its is_called flag. The two-parameter + form sets the sequence's last_value field to the + specified value and sets its is_called field to + true, meaning that the next + nextval will advance the sequence before + returning a value. The value that will be reported + by currval is also set to the specified value. + In the three-parameter form, is_called can be set + to either true + or false. true has the same + effect as the two-parameter form. If it is set + to false, the next nextval + will return exactly the specified value, and sequence advancement + commences with the following nextval. + Furthermore, the value reported by currval is not + changed in this case. For example, + +SELECT setval('myseq', 42); Next nextval will return 43 +SELECT setval('myseq', 42, true); Same as above +SELECT setval('myseq', 42, false); Next nextval will return 42 + + The result returned by setval is just the value of its + second argument. + + + This function requires UPDATE privilege on the + sequence. + + + + + + + currval + + currval ( regclass ) + bigint + + + Returns the value most recently obtained + by nextval for this sequence in the current + session. (An error is reported if nextval has + never been called for this sequence in this session.) Because this is + returning a session-local value, it gives a predictable answer whether + or not other sessions have executed nextval since + the current session did. + + + This function requires USAGE + or SELECT privilege on the sequence. + + + + + + + lastval + + lastval () + bigint + + + Returns the value most recently returned by + nextval in the current session. This function is + identical to currval, except that instead + of taking the sequence name as an argument it refers to whichever + sequence nextval was most recently applied to + in the current session. It is an error to call + lastval if nextval + has not yet been called in the current session. + + + This function requires USAGE + or SELECT privilege on the last used sequence. + + + + +
+ + + + To avoid blocking concurrent transactions that obtain numbers from + the same sequence, the value obtained by nextval + is not reclaimed for re-use if the calling transaction later aborts. + This means that transaction aborts or database crashes can result in + gaps in the sequence of assigned values. That can happen without a + transaction abort, too. For example an INSERT with + an ON CONFLICT clause will compute the to-be-inserted + tuple, including doing any required nextval + calls, before detecting any conflict that would cause it to follow + the ON CONFLICT rule instead. + Thus, PostgreSQL sequence + objects cannot be used to obtain gapless + sequences. + + + + Likewise, sequence state changes made by setval + are immediately visible to other transactions, and are not undone if + the calling transaction rolls back. + + + + If the database cluster crashes before committing a transaction + containing a nextval + or setval call, the sequence state change might + not have made its way to persistent storage, so that it is uncertain + whether the sequence will have its original or updated state after the + cluster restarts. This is harmless for usage of the sequence within + the database, since other effects of uncommitted transactions will not + be visible either. However, if you wish to use a sequence value for + persistent outside-the-database purposes, make sure that the + nextval call has been committed before doing so. + + + + + The sequence to be operated on by a sequence function is specified by + a regclass argument, which is simply the OID of the sequence in the + pg_class system catalog. You do not have to look up the + OID by hand, however, since the regclass data type's input + converter will do the work for you. See + for details. + +
diff --git a/doc/src/sgml/func/func-srf.sgml b/doc/src/sgml/func/func-srf.sgml new file mode 100644 index 0000000000000..eafc961c9f909 --- /dev/null +++ b/doc/src/sgml/func/func-srf.sgml @@ -0,0 +1,306 @@ + + Set Returning Functions + + + set returning functions + functions + + + + This section describes functions that possibly return more than one row. + The most widely used functions in this class are series generating + functions, as detailed in and + . Other, more specialized + set-returning functions are described elsewhere in this manual. + See for ways to combine multiple + set-returning functions. + + + + Series Generating Functions + + + + + Function + + + Description + + + + + + + + + generate_series + + generate_series ( start integer, stop integer , step integer ) + setof integer + + + generate_series ( start bigint, stop bigint , step bigint ) + setof bigint + + + generate_series ( start numeric, stop numeric , step numeric ) + setof numeric + + + Generates a series of values from start + to stop, with a step size + of step. step + defaults to 1. + + + + + + generate_series ( start timestamp, stop timestamp, step interval ) + setof timestamp + + + generate_series ( start timestamp with time zone, stop timestamp with time zone, step interval , timezone text ) + setof timestamp with time zone + + + Generates a series of values from start + to stop, with a step size + of step. + In the timezone-aware form, times of day and daylight-savings + adjustments are computed according to the time zone named by + the timezone argument, or the current + setting if that is omitted. + + + + +
+ + + When step is positive, zero rows are returned if + start is greater than stop. + Conversely, when step is negative, zero rows are + returned if start is less than stop. + Zero rows are also returned if any input is NULL. + It is an error + for step to be zero. Some examples follow: + +SELECT * FROM generate_series(2,4); + generate_series +----------------- + 2 + 3 + 4 +(3 rows) + +SELECT * FROM generate_series(5,1,-2); + generate_series +----------------- + 5 + 3 + 1 +(3 rows) + +SELECT * FROM generate_series(4,3); + generate_series +----------------- +(0 rows) + +SELECT generate_series(1.1, 4, 1.3); + generate_series +----------------- + 1.1 + 2.4 + 3.7 +(3 rows) + +-- this example relies on the date-plus-integer operator: +SELECT current_date + s.a AS dates FROM generate_series(0,14,7) AS s(a); + dates +------------ + 2004-02-05 + 2004-02-12 + 2004-02-19 +(3 rows) + +SELECT * FROM generate_series('2008-03-01 00:00'::timestamp, + '2008-03-04 12:00', '10 hours'); + generate_series +--------------------- + 2008-03-01 00:00:00 + 2008-03-01 10:00:00 + 2008-03-01 20:00:00 + 2008-03-02 06:00:00 + 2008-03-02 16:00:00 + 2008-03-03 02:00:00 + 2008-03-03 12:00:00 + 2008-03-03 22:00:00 + 2008-03-04 08:00:00 +(9 rows) + +-- this example assumes that TimeZone is set to UTC; note the DST transition: +SELECT * FROM generate_series('2001-10-22 00:00 -04:00'::timestamptz, + '2001-11-01 00:00 -05:00'::timestamptz, + '1 day'::interval, 'America/New_York'); + generate_series +------------------------ + 2001-10-22 04:00:00+00 + 2001-10-23 04:00:00+00 + 2001-10-24 04:00:00+00 + 2001-10-25 04:00:00+00 + 2001-10-26 04:00:00+00 + 2001-10-27 04:00:00+00 + 2001-10-28 04:00:00+00 + 2001-10-29 05:00:00+00 + 2001-10-30 05:00:00+00 + 2001-10-31 05:00:00+00 + 2001-11-01 05:00:00+00 +(11 rows) + + + + + Subscript Generating Functions + + + + + Function + + + Description + + + + + + + + + generate_subscripts + + generate_subscripts ( array anyarray, dim integer ) + setof integer + + + Generates a series comprising the valid subscripts of + the dim'th dimension of the given array. + + + + + + generate_subscripts ( array anyarray, dim integer, reverse boolean ) + setof integer + + + Generates a series comprising the valid subscripts of + the dim'th dimension of the given array. + When reverse is true, returns the series in + reverse order. + + + + +
+ + + generate_subscripts is a convenience function that generates + the set of valid subscripts for the specified dimension of the given + array. + Zero rows are returned for arrays that do not have the requested dimension, + or if any input is NULL. + Some examples follow: + +-- basic usage: +SELECT generate_subscripts('{NULL,1,NULL,2}'::int[], 1) AS s; + s +--- + 1 + 2 + 3 + 4 +(4 rows) + +-- presenting an array, the subscript and the subscripted +-- value requires a subquery: +SELECT * FROM arrays; + a +-------------------- + {-1,-2} + {100,200,300} +(2 rows) + +SELECT a AS array, s AS subscript, a[s] AS value +FROM (SELECT generate_subscripts(a, 1) AS s, a FROM arrays) foo; + array | subscript | value +---------------+-----------+------- + {-1,-2} | 1 | -1 + {-1,-2} | 2 | -2 + {100,200,300} | 1 | 100 + {100,200,300} | 2 | 200 + {100,200,300} | 3 | 300 +(5 rows) + +-- unnest a 2D array: +CREATE OR REPLACE FUNCTION unnest2(anyarray) +RETURNS SETOF anyelement AS $$ +select $1[i][j] + from generate_subscripts($1,1) g1(i), + generate_subscripts($1,2) g2(j); +$$ LANGUAGE sql IMMUTABLE; +CREATE FUNCTION +SELECT * FROM unnest2(ARRAY[[1,2],[3,4]]); + unnest2 +--------- + 1 + 2 + 3 + 4 +(4 rows) + + + + + ordinality + + + + When a function in the FROM clause is suffixed + by WITH ORDINALITY, a bigint column is + appended to the function's output column(s), which starts from 1 and + increments by 1 for each row of the function's output. + This is most useful in the case of set returning + functions such as unnest(). + + +-- set returning function WITH ORDINALITY: +SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); + ls | n +-----------------+---- + pg_serial | 1 + pg_twophase | 2 + postmaster.opts | 3 + pg_notify | 4 + postgresql.conf | 5 + pg_tblspc | 6 + logfile | 7 + base | 8 + postmaster.pid | 9 + pg_ident.conf | 10 + global | 11 + pg_xact | 12 + pg_snapshots | 13 + pg_multixact | 14 + PG_VERSION | 15 + pg_wal | 16 + pg_hba.conf | 17 + pg_stat_tmp | 18 + pg_subtrans | 19 +(19 rows) + + + +
diff --git a/doc/src/sgml/func/func-statistics.sgml b/doc/src/sgml/func/func-statistics.sgml new file mode 100644 index 0000000000000..22dee263cc2a0 --- /dev/null +++ b/doc/src/sgml/func/func-statistics.sgml @@ -0,0 +1,85 @@ + + Statistics Information Functions + + + function + statistics + + + + PostgreSQL provides a function to inspect complex + statistics defined using the CREATE STATISTICS command. + + + + Inspecting MCV Lists + + + pg_mcv_list_items + + + +pg_mcv_list_items ( pg_mcv_list ) setof record + + + + pg_mcv_list_items returns a set of records describing + all items stored in a multi-column MCV list. It + returns the following columns: + + + + + + Name + Type + Description + + + + + + index + integer + index of the item in the MCV list + + + values + text[] + values stored in the MCV item + + + nulls + boolean[] + flags identifying NULL values + + + frequency + double precision + frequency of this MCV item + + + base_frequency + double precision + base frequency of this MCV item + + + + + + + + The pg_mcv_list_items function can be used like this: + + +SELECT m.* FROM pg_statistic_ext join pg_statistic_ext_data on (oid = stxoid), + pg_mcv_list_items(stxdmcv) m WHERE stxname = 'stts'; + + + Values of the pg_mcv_list type can be obtained only from the + pg_statistic_ext_data.stxdmcv + column. + + + + diff --git a/doc/src/sgml/func/func-string.sgml b/doc/src/sgml/func/func-string.sgml new file mode 100644 index 0000000000000..01cc94c234e62 --- /dev/null +++ b/doc/src/sgml/func/func-string.sgml @@ -0,0 +1,1827 @@ + + String Functions and Operators + + + This section describes functions and operators for examining and + manipulating string values. Strings in this context include values + of the types character, character varying, + and text. Except where noted, these functions and operators + are declared to accept and return type text. They will + interchangeably accept character varying arguments. + Values of type character will be converted + to text before the function or operator is applied, resulting + in stripping any trailing spaces in the character value. + + + + SQL defines some string functions that use + key words, rather than commas, to separate + arguments. Details are in + . + PostgreSQL also provides versions of these functions + that use the regular function invocation syntax + (see ). + + + + + The string concatenation operator (||) will accept + non-string input, so long as at least one input is of string type, as shown + in . For other cases, inserting an + explicit coercion to text can be used to have non-string input + accepted. + + + + + <acronym>SQL</acronym> String Functions and Operators + + + + + Function/Operator + + + Description + + + Example(s) + + + + + + + + + character string + concatenation + + text || text + text + + + Concatenates the two strings. + + + 'Post' || 'greSQL' + PostgreSQL + + + + + + text || anynonarray + text + + + anynonarray || text + text + + + Converts the non-string input to text, then concatenates the two + strings. (The non-string input cannot be of an array type, because + that would create ambiguity with the array || + operators. If you want to concatenate an array's text equivalent, + cast it to text explicitly.) + + + 'Value: ' || 42 + Value: 42 + + + + + + + btrim + + btrim ( string text + , characters text ) + text + + + Removes the longest string containing only characters + in characters (a space by default) + from the start and end of string. + + + btrim('xyxtrimyyx', 'xyz') + trim + + + + + + + normalized + + + Unicode normalization + + text IS NOT form NORMALIZED + boolean + + + Checks whether the string is in the specified Unicode normalization + form. The optional form key word specifies the + form: NFC (the default), NFD, + NFKC, or NFKD. This expression can + only be used when the server encoding is UTF8. Note + that checking for normalization using this expression is often faster + than normalizing possibly already normalized strings. + + + U&'\0061\0308bc' IS NFD NORMALIZED + t + + + + + + + bit_length + + bit_length ( text ) + integer + + + Returns number of bits in the string (8 + times the octet_length). + + + bit_length('jose') + 32 + + + + + + + char_length + + + character string + length + + + length + of a character string + character string, length + + char_length ( text ) + integer + + + + character_length + + character_length ( text ) + integer + + + Returns number of characters in the string. + + + char_length('josé') + 4 + + + + + + + lower + + lower ( text ) + text + + + Converts the string to all lower case, according to the rules of the + database's locale. + + + lower('TOM') + tom + + + + + + + lpad + + lpad ( string text, + length integer + , fill text ) + text + + + Extends the string to length + length by prepending the characters + fill (a space by default). If the + string is already longer than + length then it is truncated (on the right). + + + lpad('hi', 5, 'xy') + xyxhi + + + + + + + ltrim + + ltrim ( string text + , characters text ) + text + + + Removes the longest string containing only characters in + characters (a space by default) from the start of + string. + + + ltrim('zzzytest', 'xyz') + test + + + + + + + normalize + + + Unicode normalization + + normalize ( text + , form ) + text + + + Converts the string to the specified Unicode + normalization form. The optional form key word + specifies the form: NFC (the default), + NFD, NFKC, or + NFKD. This function can only be used when the + server encoding is UTF8. + + + normalize(U&'\0061\0308bc', NFC) + U&'\00E4bc' + + + + + + + octet_length + + octet_length ( text ) + integer + + + Returns number of bytes in the string. + + + octet_length('josé') + 5 (if server encoding is UTF8) + + + + + + + octet_length + + octet_length ( character ) + integer + + + Returns number of bytes in the string. Since this version of the + function accepts type character directly, it will not + strip trailing spaces. + + + octet_length('abc '::character(4)) + 4 + + + + + + + overlay + + overlay ( string text PLACING newsubstring text FROM start integer FOR count integer ) + text + + + Replaces the substring of string that starts at + the start'th character and extends + for count characters + with newsubstring. + If count is omitted, it defaults to the length + of newsubstring. + + + overlay('Txxxxas' placing 'hom' from 2 for 4) + Thomas + + + + + + + position + + position ( substring text IN string text ) + integer + + + Returns first starting index of the specified + substring within + string, or zero if it's not present. + + + position('om' in 'Thomas') + 3 + + + + + + + rpad + + rpad ( string text, + length integer + , fill text ) + text + + + Extends the string to length + length by appending the characters + fill (a space by default). If the + string is already longer than + length then it is truncated. + + + rpad('hi', 5, 'xy') + hixyx + + + + + + + rtrim + + rtrim ( string text + , characters text ) + text + + + Removes the longest string containing only characters in + characters (a space by default) from the end of + string. + + + rtrim('testxxzx', 'xyz') + test + + + + + + + substring + + substring ( string text FROM start integer FOR count integer ) + text + + + Extracts the substring of string starting at + the start'th character if that is specified, + and stopping after count characters if that is + specified. Provide at least one of start + and count. + + + substring('Thomas' from 2 for 3) + hom + + + substring('Thomas' from 3) + omas + + + substring('Thomas' for 2) + Th + + + + + + substring ( string text FROM pattern text ) + text + + + Extracts the first substring matching POSIX regular expression; see + . + + + substring('Thomas' from '...$') + mas + + + + + + substring ( string text SIMILAR pattern text ESCAPE escape text ) + text + + + substring ( string text FROM pattern text FOR escape text ) + text + + + Extracts the first substring matching SQL regular expression; + see . The first form has + been specified since SQL:2003; the second form was only in SQL:1999 + and should be considered obsolete. + + + substring('Thomas' similar '%#"o_a#"_' escape '#') + oma + + + + + + + trim + + trim ( LEADING | TRAILING | BOTH + characters text FROM + string text ) + text + + + Removes the longest string containing only characters in + characters (a space by default) from the + start, end, or both ends (BOTH is the default) + of string. + + + trim(both 'xyz' from 'yxTomxx') + Tom + + + + + + trim ( LEADING | TRAILING | BOTH FROM + string text , + characters text ) + text + + + This is a non-standard syntax for trim(). + + + trim(both from 'yxTomxx', 'xyz') + Tom + + + + + + + unicode_assigned + + unicode_assigned ( text ) + boolean + + + Returns true if all characters in the string are + assigned Unicode codepoints; false otherwise. This + function can only be used when the server encoding is + UTF8. + + + + + + + upper + + upper ( text ) + text + + + Converts the string to all upper case, according to the rules of the + database's locale. + + + upper('tom') + TOM + + + + +
+ + + Additional string manipulation functions and operators are available + and are listed in . (Some of + these are used internally to implement + the SQL-standard string functions listed in + .) + There are also pattern-matching operators, which are described in + , and operators for full-text + search, which are described in . + + + + Other String Functions and Operators + + + + + Function/Operator + + + Description + + + Example(s) + + + + + + + + + character string + prefix test + + text ^@ text + boolean + + + Returns true if the first string starts with the second string + (equivalent to the starts_with() function). + + + 'alphabet' ^@ 'alph' + t + + + + + + + ascii + + ascii ( text ) + integer + + + Returns the numeric code of the first character of the argument. + In UTF8 encoding, returns the Unicode code point + of the character. In other multibyte encodings, the argument must + be an ASCII character. + + + ascii('x') + 120 + + + + + + + chr + + chr ( integer ) + text + + + Returns the character with the given code. In UTF8 + encoding the argument is treated as a Unicode code point. In other + multibyte encodings the argument must designate + an ASCII character. chr(0) is + disallowed because text data types cannot store that character. + + + chr(65) + A + + + + + + + concat + + concat ( val1 "any" + , val2 "any" , ... ) + text + + + Concatenates the text representations of all the arguments. + NULL arguments are ignored. + + + concat('abcde', 2, NULL, 22) + abcde222 + + + + + + + concat_ws + + concat_ws ( sep text, + val1 "any" + , val2 "any" , ... ) + text + + + Concatenates all but the first argument, with separators. The first + argument is used as the separator string, and should not be NULL. + Other NULL arguments are ignored. + + + concat_ws(',', 'abcde', 2, NULL, 22) + abcde,2,22 + + + + + + + format + + format ( formatstr text + , formatarg "any" , ... ) + text + + + Formats arguments according to a format string; + see . + This function is similar to the C function sprintf. + + + format('Hello %s, %1$s', 'World') + Hello World, World + + + + + + + initcap + + initcap ( text ) + text + + + Converts the first letter of each word to upper case (or title case + if the letter is a digraph and locale is ICU or + builtin PG_UNICODE_FAST) + and the rest to lower case. When using the libc or + builtin locale provider, words are sequences of + alphanumeric characters separated by non-alphanumeric characters; + when using the ICU locale provider, words are separated according to + u_strToTitle ICU function. + + + This function is primarily used for convenient + display, and the specific result should not be relied upon because of + the differences between locale providers and between different + ICU versions. If specific word boundary rules are desired, + it is recommended to write a custom function. + + + initcap('hi THOMAS') + Hi Thomas + + + + + + + casefold + + casefold ( text ) + text + + + Performs case folding of the input string according to the collation. + Case folding is similar to case conversion, but the purpose of case + folding is to facilitate case-insensitive matching of strings, + whereas the purpose of case conversion is to convert to a particular + cased form. This function can only be used when the server encoding + is UTF8. + + + Ordinarily, case folding simply converts to lowercase, but there may + be exceptions depending on the collation. For instance, some + characters have more than two lowercase variants, or fold to uppercase. + + + Case folding may change the length of the string. For instance, in + the PG_UNICODE_FAST collation, ß + (U+00DF) folds to ss. + + + casefold can be used for Unicode Default Caseless + Matching. It does not always preserve the normalized form of the + input string (see ). + + + The libc provider doesn't support case folding, so + casefold is identical to . + + + + + + + left + + left ( string text, + n integer ) + text + + + Returns first n characters in the + string, or when n is negative, returns + all but last |n| characters. + + + left('abcde', 2) + ab + + + + + + + length + + length ( text ) + integer + + + Returns the number of characters in the string. + + + length('jose') + 4 + + + + + + + md5 + + md5 ( text ) + text + + + Computes the MD5 hash of + the argument, with the result written in hexadecimal. + + + md5('abc') + 900150983cd24fb0&zwsp;d6963f7d28e17f72 + + + + + + + parse_ident + + parse_ident ( qualified_identifier text + , strict_mode boolean DEFAULT true ) + text[] + + + Splits qualified_identifier into an array of + identifiers, removing any quoting of individual identifiers. By + default, extra characters after the last identifier are considered an + error; but if the second parameter is false, then such + extra characters are ignored. (This behavior is useful for parsing + names for objects like functions.) Note that this function does not + truncate over-length identifiers. If you want truncation you can cast + the result to name[]. + + + parse_ident('"SomeSchema".someTable') + {SomeSchema,sometable} + + + + + + + pg_client_encoding + + pg_client_encoding ( ) + name + + + Returns current client encoding name. + + + pg_client_encoding() + UTF8 + + + + + + + quote_ident + + quote_ident ( text ) + text + + + Returns the given string suitably quoted to be used as an identifier + in an SQL statement string. + Quotes are added only if necessary (i.e., if the string contains + non-identifier characters or would be case-folded). + Embedded quotes are properly doubled. + See also . + + + quote_ident('Foo bar') + "Foo bar" + + + + + + + quote_literal + + quote_literal ( text ) + text + + + Returns the given string suitably quoted to be used as a string literal + in an SQL statement string. + Embedded single-quotes and backslashes are properly doubled. + Note that quote_literal returns null on null + input; if the argument might be null, + quote_nullable is often more suitable. + See also . + + + quote_literal(E'O\'Reilly') + 'O''Reilly' + + + + + + quote_literal ( anyelement ) + text + + + Converts the given value to text and then quotes it as a literal. + Embedded single-quotes and backslashes are properly doubled. + + + quote_literal(42.5) + '42.5' + + + + + + + quote_nullable + + quote_nullable ( text ) + text + + + Returns the given string suitably quoted to be used as a string literal + in an SQL statement string; or, if the argument + is null, returns NULL. + Embedded single-quotes and backslashes are properly doubled. + See also . + + + quote_nullable(NULL) + NULL + + + + + + quote_nullable ( anyelement ) + text + + + Converts the given value to text and then quotes it as a literal; + or, if the argument is null, returns NULL. + Embedded single-quotes and backslashes are properly doubled. + + + quote_nullable(42.5) + '42.5' + + + + + + + regexp_count + + regexp_count ( string text, pattern text + , start integer + , flags text ) + integer + + + Returns the number of times the POSIX regular + expression pattern matches in + the string; see + . + + + regexp_count('123456789012', '\d\d\d', 2) + 3 + + + + + + + regexp_instr + + regexp_instr ( string text, pattern text + , start integer + , N integer + , endoption integer + , flags text + , subexpr integer ) + integer + + + Returns the position within string where + the N'th match of the POSIX regular + expression pattern occurs, or zero if there is + no such match; see . + + + regexp_instr('ABCDEF', 'c(.)(..)', 1, 1, 0, 'i') + 3 + + + regexp_instr('ABCDEF', 'c(.)(..)', 1, 1, 0, 'i', 2) + 5 + + + + + + + regexp_like + + regexp_like ( string text, pattern text + , flags text ) + boolean + + + Checks whether a match of the POSIX regular + expression pattern occurs + within string; see + . + + + regexp_like('Hello World', 'world$', 'i') + t + + + + + + + regexp_match + + regexp_match ( string text, pattern text , flags text ) + text[] + + + Returns substrings within the first match of the POSIX regular + expression pattern to + the string; see + . + + + regexp_match('foobarbequebaz', '(bar)(beque)') + {bar,beque} + + + + + + + regexp_matches + + regexp_matches ( string text, pattern text , flags text ) + setof text[] + + + Returns substrings within the first match of the POSIX regular + expression pattern to + the string, or substrings within all + such matches if the g flag is used; + see . + + + regexp_matches('foobarbequebaz', 'ba.', 'g') + + + {bar} + {baz} + + + + + + + + regexp_replace + + regexp_replace ( string text, pattern text, replacement text + , flags text ) + text + + + Replaces the substring that is the first match to the POSIX + regular expression pattern, or all such + matches if the g flag is used; see + . + + + regexp_replace('Thomas', '.[mN]a.', 'M') + ThM + + + + + + regexp_replace ( string text, pattern text, replacement text, + start integer + , N integer + , flags text ) + text + + + Replaces the substring that is the N'th + match to the POSIX regular expression pattern, + or all such matches if N is zero, with the + search beginning at the start'th character + of string. If N is + omitted, it defaults to 1. See + . + + + regexp_replace('Thomas', '.', 'X', 3, 2) + ThoXas + + + regexp_replace(string=>'hello world', pattern=>'l', replacement=>'XX', start=>1, "N"=>2) + helXXo world + + + + + + + regexp_split_to_array + + regexp_split_to_array ( string text, pattern text , flags text ) + text[] + + + Splits string using a POSIX regular + expression as the delimiter, producing an array of results; see + . + + + regexp_split_to_array('hello world', '\s+') + {hello,world} + + + + + + + regexp_split_to_table + + regexp_split_to_table ( string text, pattern text , flags text ) + setof text + + + Splits string using a POSIX regular + expression as the delimiter, producing a set of results; see + . + + + regexp_split_to_table('hello world', '\s+') + + + hello + world + + + + + + + + regexp_substr + + regexp_substr ( string text, pattern text + , start integer + , N integer + , flags text + , subexpr integer ) + text + + + Returns the substring within string that + matches the N'th occurrence of the POSIX + regular expression pattern, + or NULL if there is no such match; see + . + + + regexp_substr('ABCDEF', 'c(.)(..)', 1, 1, 'i') + CDEF + + + regexp_substr('ABCDEF', 'c(.)(..)', 1, 1, 'i', 2) + EF + + + + + + + repeat + + repeat ( string text, number integer ) + text + + + Repeats string the specified + number of times. + + + repeat('Pg', 4) + PgPgPgPg + + + + + + + replace + + replace ( string text, + from text, + to text ) + text + + + Replaces all occurrences in string of + substring from with + substring to. + + + replace('abcdefabcdef', 'cd', 'XX') + abXXefabXXef + + + + + + + reverse + + reverse ( text ) + text + + + Reverses the order of the characters in the string. + + + reverse('abcde') + edcba + + + + + + + right + + right ( string text, + n integer ) + text + + + Returns last n characters in the string, + or when n is negative, returns all but + first |n| characters. + + + right('abcde', 2) + de + + + + + + + split_part + + split_part ( string text, + delimiter text, + n integer ) + text + + + Splits string at occurrences + of delimiter and returns + the n'th field (counting from one), + or when n is negative, returns + the |n|'th-from-last field. + + + split_part('abc~@~def~@~ghi', '~@~', 2) + def + + + split_part('abc,def,ghi,jkl', ',', -2) + ghi + + + + + + + starts_with + + starts_with ( string text, prefix text ) + boolean + + + Returns true if string starts + with prefix. + + + starts_with('alphabet', 'alph') + t + + + + + + + string_to_array + + string_to_array ( string text, delimiter text , null_string text ) + text[] + + + Splits the string at occurrences + of delimiter and forms the resulting fields + into a text array. + If delimiter is NULL, + each character in the string will become a + separate element in the array. + If delimiter is an empty string, then + the string is treated as a single field. + If null_string is supplied and is + not NULL, fields matching that string are + replaced by NULL. + See also array_to_string. + + + string_to_array('xx~~yy~~zz', '~~', 'yy') + {xx,NULL,zz} + + + + + + + string_to_table + + string_to_table ( string text, delimiter text , null_string text ) + setof text + + + Splits the string at occurrences + of delimiter and returns the resulting fields + as a set of text rows. + If delimiter is NULL, + each character in the string will become a + separate row of the result. + If delimiter is an empty string, then + the string is treated as a single field. + If null_string is supplied and is + not NULL, fields matching that string are + replaced by NULL. + + + string_to_table('xx~^~yy~^~zz', '~^~', 'yy') + + + xx + NULL + zz + + + + + + + + strpos + + strpos ( string text, substring text ) + integer + + + Returns first starting index of the specified substring + within string, or zero if it's not present. + (Same as position(substring in + string), but note the reversed + argument order.) + + + strpos('high', 'ig') + 2 + + + + + + + substr + + substr ( string text, start integer , count integer ) + text + + + Extracts the substring of string starting at + the start'th character, + and extending for count characters if that is + specified. (Same + as substring(string + from start + for count).) + + + substr('alphabet', 3) + phabet + + + substr('alphabet', 3, 2) + ph + + + + + + + to_ascii + + to_ascii ( string text ) + text + + + to_ascii ( string text, + encoding name ) + text + + + to_ascii ( string text, + encoding integer ) + text + + + Converts string to ASCII + from another encoding, which may be identified by name or number. + If encoding is omitted the database encoding + is assumed (which in practice is the only useful case). + The conversion consists primarily of dropping accents. + Conversion is only supported + from LATIN1, LATIN2, + LATIN9, and WIN1250 encodings. + (See the module for another, more flexible + solution.) + + + to_ascii('Karél') + Karel + + + + + + + to_bin + + to_bin ( integer ) + text + + + to_bin ( bigint ) + text + + + Converts the number to its equivalent two's complement binary + representation. + + + to_bin(2147483647) + 1111111111111111111111111111111 + + + to_bin(-1234) + 11111111111111111111101100101110 + + + + + + + to_hex + + to_hex ( integer ) + text + + + to_hex ( bigint ) + text + + + Converts the number to its equivalent two's complement hexadecimal + representation. + + + to_hex(2147483647) + 7fffffff + + + to_hex(-1234) + fffffb2e + + + + + + + to_oct + + to_oct ( integer ) + text + + + to_oct ( bigint ) + text + + + Converts the number to its equivalent two's complement octal + representation. + + + to_oct(2147483647) + 17777777777 + + + to_oct(-1234) + 37777775456 + + + + + + + translate + + translate ( string text, + from text, + to text ) + text + + + Replaces each character in string that + matches a character in the from set with the + corresponding character in the to + set. If from is longer than + to, occurrences of the extra characters in + from are deleted. + + + translate('12345', '143', 'ax') + a2x5 + + + + + + + unistr + + unistr ( text ) + text + + + Evaluate escaped Unicode characters in the argument. Unicode characters + can be specified as + \XXXX (4 hexadecimal + digits), \+XXXXXX (6 + hexadecimal digits), + \uXXXX (4 hexadecimal + digits), or \UXXXXXXXX + (8 hexadecimal digits). To specify a backslash, write two + backslashes. All other characters are taken literally. + + + + If the server encoding is not UTF-8, the Unicode code point identified + by one of these escape sequences is converted to the actual server + encoding; an error is reported if that's not possible. + + + + This function provides a (non-standard) alternative to string + constants with Unicode escapes (see ). + + + + unistr('d\0061t\+000061') + data + + + unistr('d\u0061t\U00000061') + data + + + + + +
+ + + The concat, concat_ws and + format functions are variadic, so it is possible to + pass the values to be concatenated or formatted as an array marked with + the VARIADIC keyword (see ). The array's elements are + treated as if they were separate ordinary arguments to the function. + If the variadic array argument is NULL, concat + and concat_ws return NULL, but + format treats a NULL as a zero-element array. + + + + See also the aggregate function string_agg in + , and the functions for + converting between strings and the bytea type in + . + + + + <function>format</function> + + + format + + + + The function format produces output formatted according to + a format string, in a style similar to the C function + sprintf. + + + + +format(formatstr text , formatarg "any" , ... ) + + formatstr is a format string that specifies how the + result should be formatted. Text in the format string is copied + directly to the result, except where format specifiers are + used. Format specifiers act as placeholders in the string, defining how + subsequent function arguments should be formatted and inserted into the + result. Each formatarg argument is converted to text + according to the usual output rules for its data type, and then formatted + and inserted into the result string according to the format specifier(s). + + + + Format specifiers are introduced by a % character and have + the form + +%[position][flags][width]type + + where the component fields are: + + + + position (optional) + + + A string of the form n$ where + n is the index of the argument to print. + Index 1 means the first argument after + formatstr. If the position is + omitted, the default is to use the next argument in sequence. + + + + + + flags (optional) + + + Additional options controlling how the format specifier's output is + formatted. Currently the only supported flag is a minus sign + (-) which will cause the format specifier's output to be + left-justified. This has no effect unless the width + field is also specified. + + + + + + width (optional) + + + Specifies the minimum number of characters to use to + display the format specifier's output. The output is padded on the + left or right (depending on the - flag) with spaces as + needed to fill the width. A too-small width does not cause + truncation of the output, but is simply ignored. The width may be + specified using any of the following: a positive integer; an + asterisk (*) to use the next function argument as the + width; or a string of the form *n$ to + use the nth function argument as the width. + + + + If the width comes from a function argument, that argument is + consumed before the argument that is used for the format specifier's + value. If the width argument is negative, the result is left + aligned (as if the - flag had been specified) within a + field of length abs(width). + + + + + + type (required) + + + The type of format conversion to use to produce the format + specifier's output. The following types are supported: + + + + s formats the argument value as a simple + string. A null value is treated as an empty string. + + + + + I treats the argument value as an SQL + identifier, double-quoting it if necessary. + It is an error for the value to be null (equivalent to + quote_ident). + + + + + L quotes the argument value as an SQL literal. + A null value is displayed as the string NULL, without + quotes (equivalent to quote_nullable). + + + + + + + + + + + In addition to the format specifiers described above, the special sequence + %% may be used to output a literal % character. + + + + Here are some examples of the basic format conversions: + + +SELECT format('Hello %s', 'World'); +Result: Hello World + +SELECT format('Testing %s, %s, %s, %%', 'one', 'two', 'three'); +Result: Testing one, two, three, % + +SELECT format('INSERT INTO %I VALUES(%L)', 'Foo bar', E'O\'Reilly'); +Result: INSERT INTO "Foo bar" VALUES('O''Reilly') + +SELECT format('INSERT INTO %I VALUES(%L)', 'locations', 'C:\Program Files'); +Result: INSERT INTO locations VALUES('C:\Program Files') + + + + + Here are examples using width fields + and the - flag: + + +SELECT format('|%10s|', 'foo'); +Result: | foo| + +SELECT format('|%-10s|', 'foo'); +Result: |foo | + +SELECT format('|%*s|', 10, 'foo'); +Result: | foo| + +SELECT format('|%*s|', -10, 'foo'); +Result: |foo | + +SELECT format('|%-*s|', 10, 'foo'); +Result: |foo | + +SELECT format('|%-*s|', -10, 'foo'); +Result: |foo | + + + + + These examples show use of position fields: + + +SELECT format('Testing %3$s, %2$s, %1$s', 'one', 'two', 'three'); +Result: Testing three, two, one + +SELECT format('|%*2$s|', 'foo', 10, 'bar'); +Result: | bar| + +SELECT format('|%1$*2$s|', 'foo', 10, 'bar'); +Result: | foo| + + + + + Unlike the standard C function sprintf, + PostgreSQL's format function allows format + specifiers with and without position fields to be mixed + in the same format string. A format specifier without a + position field always uses the next argument after the + last argument consumed. + In addition, the format function does not require all + function arguments to be used in the format string. + For example: + + +SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); +Result: Testing three, two, three + + + + + The %I and %L format specifiers are particularly + useful for safely constructing dynamic SQL statements. See + . + + + +
diff --git a/doc/src/sgml/func/func-subquery.sgml b/doc/src/sgml/func/func-subquery.sgml new file mode 100644 index 0000000000000..a9f2b12e48c66 --- /dev/null +++ b/doc/src/sgml/func/func-subquery.sgml @@ -0,0 +1,349 @@ + + Subquery Expressions + + + EXISTS + + + + IN + + + + NOT IN + + + + ANY + + + + ALL + + + + SOME + + + + subquery + + + + This section describes the SQL-compliant subquery + expressions available in PostgreSQL. + All of the expression forms documented in this section return + Boolean (true/false) results. + + + + <literal>EXISTS</literal> + + +EXISTS (subquery) + + + + The argument of EXISTS is an arbitrary SELECT statement, + or subquery. The + subquery is evaluated to determine whether it returns any rows. + If it returns at least one row, the result of EXISTS is + true; if the subquery returns no rows, the result of EXISTS + is false. + + + + The subquery can refer to variables from the surrounding query, + which will act as constants during any one evaluation of the subquery. + + + + The subquery will generally only be executed long enough to determine + whether at least one row is returned, not all the way to completion. + It is unwise to write a subquery that has side effects (such as + calling sequence functions); whether the side effects occur + might be unpredictable. + + + + Since the result depends only on whether any rows are returned, + and not on the contents of those rows, the output list of the + subquery is normally unimportant. A common coding convention is + to write all EXISTS tests in the form + EXISTS(SELECT 1 WHERE ...). There are exceptions to + this rule however, such as subqueries that use INTERSECT. + + + + This simple example is like an inner join on col2, but + it produces at most one output row for each tab1 row, + even if there are several matching tab2 rows: + +SELECT col1 +FROM tab1 +WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); + + + + + + <literal>IN</literal> + + +expression IN (subquery) + + + + The right-hand side is a parenthesized + subquery, which must return exactly one column. The left-hand expression + is evaluated and compared to each row of the subquery result. + The result of IN is true if any equal subquery row is found. + The result is false if no equal row is found (including the + case where the subquery returns no rows). + + + + Note that if the left-hand expression yields null, or if there are + no equal right-hand values and at least one right-hand row yields + null, the result of the IN construct will be null, not false. + This is in accordance with SQL's normal rules for Boolean combinations + of null values. + + + + As with EXISTS, it's unwise to assume that the subquery will + be evaluated completely. + + + +row_constructor IN (subquery) + + + + The left-hand side of this form of IN is a row constructor, + as described in . + The right-hand side is a parenthesized + subquery, which must return exactly as many columns as there are + expressions in the left-hand row. The left-hand expressions are + evaluated and compared row-wise to each row of the subquery result. + The result of IN is true if any equal subquery row is found. + The result is false if no equal row is found (including the + case where the subquery returns no rows). + + + + As usual, null values in the rows are combined per + the normal rules of SQL Boolean expressions. Two rows are considered + equal if all their corresponding members are non-null and equal; the rows + are unequal if any corresponding members are non-null and unequal; + otherwise the result of that row comparison is unknown (null). + If all the per-row results are either unequal or null, with at least one + null, then the result of IN is null. + + + + + <literal>NOT IN</literal> + + +expression NOT IN (subquery) + + + + The right-hand side is a parenthesized + subquery, which must return exactly one column. The left-hand expression + is evaluated and compared to each row of the subquery result. + The result of NOT IN is true if only unequal subquery rows + are found (including the case where the subquery returns no rows). + The result is false if any equal row is found. + + + + Note that if the left-hand expression yields null, or if there are + no equal right-hand values and at least one right-hand row yields + null, the result of the NOT IN construct will be null, not true. + This is in accordance with SQL's normal rules for Boolean combinations + of null values. + + + + As with EXISTS, it's unwise to assume that the subquery will + be evaluated completely. + + + +row_constructor NOT IN (subquery) + + + + The left-hand side of this form of NOT IN is a row constructor, + as described in . + The right-hand side is a parenthesized + subquery, which must return exactly as many columns as there are + expressions in the left-hand row. The left-hand expressions are + evaluated and compared row-wise to each row of the subquery result. + The result of NOT IN is true if only unequal subquery rows + are found (including the case where the subquery returns no rows). + The result is false if any equal row is found. + + + + As usual, null values in the rows are combined per + the normal rules of SQL Boolean expressions. Two rows are considered + equal if all their corresponding members are non-null and equal; the rows + are unequal if any corresponding members are non-null and unequal; + otherwise the result of that row comparison is unknown (null). + If all the per-row results are either unequal or null, with at least one + null, then the result of NOT IN is null. + + + + + <literal>ANY</literal>/<literal>SOME</literal> + + +expression operator ANY (subquery) +expression operator SOME (subquery) + + + + The right-hand side is a parenthesized + subquery, which must return exactly one column. The left-hand expression + is evaluated and compared to each row of the subquery result using the + given operator, which must yield a Boolean + result. + The result of ANY is true if any true result is obtained. + The result is false if no true result is found (including the + case where the subquery returns no rows). + + + + SOME is a synonym for ANY. + IN is equivalent to = ANY. + + + + Note that if there are no successes and at least one right-hand row yields + null for the operator's result, the result of the ANY construct + will be null, not false. + This is in accordance with SQL's normal rules for Boolean combinations + of null values. + + + + As with EXISTS, it's unwise to assume that the subquery will + be evaluated completely. + + + +row_constructor operator ANY (subquery) +row_constructor operator SOME (subquery) + + + + The left-hand side of this form of ANY is a row constructor, + as described in . + The right-hand side is a parenthesized + subquery, which must return exactly as many columns as there are + expressions in the left-hand row. The left-hand expressions are + evaluated and compared row-wise to each row of the subquery result, + using the given operator. + The result of ANY is true if the comparison + returns true for any subquery row. + The result is false if the comparison returns false for every + subquery row (including the case where the subquery returns no + rows). + The result is NULL if no comparison with a subquery row returns true, + and at least one comparison returns NULL. + + + + See for details about the meaning + of a row constructor comparison. + + + + + <literal>ALL</literal> + + +expression operator ALL (subquery) + + + + The right-hand side is a parenthesized + subquery, which must return exactly one column. The left-hand expression + is evaluated and compared to each row of the subquery result using the + given operator, which must yield a Boolean + result. + The result of ALL is true if all rows yield true + (including the case where the subquery returns no rows). + The result is false if any false result is found. + The result is NULL if no comparison with a subquery row returns false, + and at least one comparison returns NULL. + + + + NOT IN is equivalent to <> ALL. + + + + As with EXISTS, it's unwise to assume that the subquery will + be evaluated completely. + + + +row_constructor operator ALL (subquery) + + + + The left-hand side of this form of ALL is a row constructor, + as described in . + The right-hand side is a parenthesized + subquery, which must return exactly as many columns as there are + expressions in the left-hand row. The left-hand expressions are + evaluated and compared row-wise to each row of the subquery result, + using the given operator. + The result of ALL is true if the comparison + returns true for all subquery rows (including the + case where the subquery returns no rows). + The result is false if the comparison returns false for any + subquery row. + The result is NULL if no comparison with a subquery row returns false, + and at least one comparison returns NULL. + + + + See for details about the meaning + of a row constructor comparison. + + + + + Single-Row Comparison + + + comparison + subquery result row + + + +row_constructor operator (subquery) + + + + The left-hand side is a row constructor, + as described in . + The right-hand side is a parenthesized subquery, which must return exactly + as many columns as there are expressions in the left-hand row. Furthermore, + the subquery cannot return more than one row. (If it returns zero rows, + the result is taken to be null.) The left-hand side is evaluated and + compared row-wise to the single subquery result row. + + + + See for details about the meaning + of a row constructor comparison. + + + diff --git a/doc/src/sgml/func/func-textsearch.sgml b/doc/src/sgml/func/func-textsearch.sgml new file mode 100644 index 0000000000000..a06a58f14983a --- /dev/null +++ b/doc/src/sgml/func/func-textsearch.sgml @@ -0,0 +1,1046 @@ + + Text Search Functions and Operators + + + full text search + functions and operators + + + + text search + functions and operators + + + + , + and + + summarize the functions and operators that are provided + for full text searching. See for a detailed + explanation of PostgreSQL's text search + facility. + + + + Text Search Operators + + + + + Operator + + + Description + + + Example(s) + + + + + + + + tsvector @@ tsquery + boolean + + + tsquery @@ tsvector + boolean + + + Does tsvector match tsquery? + (The arguments can be given in either order.) + + + to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') + t + + + + + + text @@ tsquery + boolean + + + Does text string, after implicit invocation + of to_tsvector(), match tsquery? + + + 'fat cats ate rats' @@ to_tsquery('cat & rat') + t + + + + + + tsvector || tsvector + tsvector + + + Concatenates two tsvectors. If both inputs contain + lexeme positions, the second input's positions are adjusted + accordingly. + + + 'a:1 b:2'::tsvector || 'c:1 d:2 b:3'::tsvector + 'a':1 'b':2,5 'c':3 'd':4 + + + + + + tsquery && tsquery + tsquery + + + ANDs two tsquerys together, producing a query that + matches documents that match both input queries. + + + 'fat | rat'::tsquery && 'cat'::tsquery + ( 'fat' | 'rat' ) & 'cat' + + + + + + tsquery || tsquery + tsquery + + + ORs two tsquerys together, producing a query that + matches documents that match either input query. + + + 'fat | rat'::tsquery || 'cat'::tsquery + 'fat' | 'rat' | 'cat' + + + + + + !! tsquery + tsquery + + + Negates a tsquery, producing a query that matches + documents that do not match the input query. + + + !! 'cat'::tsquery + !'cat' + + + + + + tsquery <-> tsquery + tsquery + + + Constructs a phrase query, which matches if the two input queries + match at successive lexemes. + + + to_tsquery('fat') <-> to_tsquery('rat') + 'fat' <-> 'rat' + + + + + + tsquery @> tsquery + boolean + + + Does first tsquery contain the second? (This considers + only whether all the lexemes appearing in one query appear in the + other, ignoring the combining operators.) + + + 'cat'::tsquery @> 'cat & rat'::tsquery + f + + + + + + tsquery <@ tsquery + boolean + + + Is first tsquery contained in the second? (This + considers only whether all the lexemes appearing in one query appear + in the other, ignoring the combining operators.) + + + 'cat'::tsquery <@ 'cat & rat'::tsquery + t + + + 'cat'::tsquery <@ '!cat & rat'::tsquery + t + + + + +
+ + + In addition to these specialized operators, the usual comparison + operators shown in are + available for types tsvector and tsquery. + These are not very + useful for text searching but allow, for example, unique indexes to be + built on columns of these types. + + + + Text Search Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + array_to_tsvector + + array_to_tsvector ( text[] ) + tsvector + + + Converts an array of text strings to a tsvector. + The given strings are used as lexemes as-is, without further + processing. Array elements must not be empty strings + or NULL. + + + array_to_tsvector('{fat,cat,rat}'::text[]) + 'cat' 'fat' 'rat' + + + + + + + get_current_ts_config + + get_current_ts_config ( ) + regconfig + + + Returns the OID of the current default text search configuration + (as set by ). + + + get_current_ts_config() + english + + + + + + + length + + length ( tsvector ) + integer + + + Returns the number of lexemes in the tsvector. + + + length('fat:2,4 cat:3 rat:5A'::tsvector) + 3 + + + + + + + numnode + + numnode ( tsquery ) + integer + + + Returns the number of lexemes plus operators in + the tsquery. + + + numnode('(fat & rat) | cat'::tsquery) + 5 + + + + + + + plainto_tsquery + + plainto_tsquery ( + config regconfig, + query text ) + tsquery + + + Converts text to a tsquery, normalizing words according to + the specified or default configuration. Any punctuation in the string + is ignored (it does not determine query operators). The resulting + query matches documents containing all non-stopwords in the text. + + + plainto_tsquery('english', 'The Fat Rats') + 'fat' & 'rat' + + + + + + + phraseto_tsquery + + phraseto_tsquery ( + config regconfig, + query text ) + tsquery + + + Converts text to a tsquery, normalizing words according to + the specified or default configuration. Any punctuation in the string + is ignored (it does not determine query operators). The resulting + query matches phrases containing all non-stopwords in the text. + + + phraseto_tsquery('english', 'The Fat Rats') + 'fat' <-> 'rat' + + + phraseto_tsquery('english', 'The Cat and Rats') + 'cat' <2> 'rat' + + + + + + + websearch_to_tsquery + + websearch_to_tsquery ( + config regconfig, + query text ) + tsquery + + + Converts text to a tsquery, normalizing words according + to the specified or default configuration. Quoted word sequences are + converted to phrase tests. The word or is understood + as producing an OR operator, and a dash produces a NOT operator; + other punctuation is ignored. + This approximates the behavior of some common web search tools. + + + websearch_to_tsquery('english', '"fat rat" or cat dog') + 'fat' <-> 'rat' | 'cat' & 'dog' + + + + + + + querytree + + querytree ( tsquery ) + text + + + Produces a representation of the indexable portion of + a tsquery. A result that is empty or + just T indicates a non-indexable query. + + + querytree('foo & ! bar'::tsquery) + 'foo' + + + + + + + setweight + + setweight ( vector tsvector, weight "char" ) + tsvector + + + Assigns the specified weight to each element + of the vector. + + + setweight('fat:2,4 cat:3 rat:5B'::tsvector, 'A') + 'cat':3A 'fat':2A,4A 'rat':5A + + + + + + + setweight + setweight for specific lexeme(s) + + setweight ( vector tsvector, weight "char", lexemes text[] ) + tsvector + + + Assigns the specified weight to elements + of the vector that are listed + in lexemes. + The strings in lexemes are taken as lexemes + as-is, without further processing. Strings that do not match any + lexeme in vector are ignored. + + + setweight('fat:2,4 cat:3 rat:5,6B'::tsvector, 'A', '{cat,rat}') + 'cat':3A 'fat':2,4 'rat':5A,6A + + + + + + + strip + + strip ( tsvector ) + tsvector + + + Removes positions and weights from the tsvector. + + + strip('fat:2,4 cat:3 rat:5A'::tsvector) + 'cat' 'fat' 'rat' + + + + + + + to_tsquery + + to_tsquery ( + config regconfig, + query text ) + tsquery + + + Converts text to a tsquery, normalizing words according to + the specified or default configuration. The words must be combined + by valid tsquery operators. + + + to_tsquery('english', 'The & Fat & Rats') + 'fat' & 'rat' + + + + + + + to_tsvector + + to_tsvector ( + config regconfig, + document text ) + tsvector + + + Converts text to a tsvector, normalizing words according + to the specified or default configuration. Position information is + included in the result. + + + to_tsvector('english', 'The Fat Rats') + 'fat':2 'rat':3 + + + + + + to_tsvector ( + config regconfig, + document json ) + tsvector + + + to_tsvector ( + config regconfig, + document jsonb ) + tsvector + + + Converts each string value in the JSON document to + a tsvector, normalizing words according to the specified + or default configuration. The results are then concatenated in + document order to produce the output. Position information is + generated as though one stopword exists between each pair of string + values. (Beware that document order of the fields of a + JSON object is implementation-dependent when the input + is jsonb; observe the difference in the examples.) + + + to_tsvector('english', '{"aa": "The Fat Rats", "b": "dog"}'::json) + 'dog':5 'fat':2 'rat':3 + + + to_tsvector('english', '{"aa": "The Fat Rats", "b": "dog"}'::jsonb) + 'dog':1 'fat':4 'rat':5 + + + + + + + json_to_tsvector + + json_to_tsvector ( + config regconfig, + document json, + filter jsonb ) + tsvector + + + + jsonb_to_tsvector + + jsonb_to_tsvector ( + config regconfig, + document jsonb, + filter jsonb ) + tsvector + + + Selects each item in the JSON document that is requested by + the filter and converts each one to + a tsvector, normalizing words according to the specified + or default configuration. The results are then concatenated in + document order to produce the output. Position information is + generated as though one stopword exists between each pair of selected + items. (Beware that document order of the fields of a + JSON object is implementation-dependent when the input + is jsonb.) + The filter must be a jsonb + array containing zero or more of these keywords: + "string" (to include all string values), + "numeric" (to include all numeric values), + "boolean" (to include all boolean values), + "key" (to include all keys), or + "all" (to include all the above). + As a special case, the filter can also be a + simple JSON value that is one of these keywords. + + + json_to_tsvector('english', '{"a": "The Fat Rats", "b": 123}'::json, '["string", "numeric"]') + '123':5 'fat':2 'rat':3 + + + json_to_tsvector('english', '{"cat": "The Fat Rats", "dog": 123}'::json, '"all"') + '123':9 'cat':1 'dog':7 'fat':4 'rat':5 + + + + + + + ts_delete + + ts_delete ( vector tsvector, lexeme text ) + tsvector + + + Removes any occurrence of the given lexeme + from the vector. + The lexeme string is treated as a lexeme as-is, + without further processing. + + + ts_delete('fat:2,4 cat:3 rat:5A'::tsvector, 'fat') + 'cat':3 'rat':5A + + + + + + ts_delete ( vector tsvector, lexemes text[] ) + tsvector + + + Removes any occurrences of the lexemes + in lexemes + from the vector. + The strings in lexemes are taken as lexemes + as-is, without further processing. Strings that do not match any + lexeme in vector are ignored. + + + ts_delete('fat:2,4 cat:3 rat:5A'::tsvector, ARRAY['fat','rat']) + 'cat':3 + + + + + + + ts_filter + + ts_filter ( vector tsvector, weights "char"[] ) + tsvector + + + Selects only elements with the given weights + from the vector. + + + ts_filter('fat:2,4 cat:3b,7c rat:5A'::tsvector, '{a,b}') + 'cat':3B 'rat':5A + + + + + + + ts_headline + + ts_headline ( + config regconfig, + document text, + query tsquery + , options text ) + text + + + Displays, in an abbreviated form, the match(es) for + the query in + the document, which must be raw text not + a tsvector. Words in the document are normalized + according to the specified or default configuration before matching to + the query. Use of this function is discussed in + , which also describes the + available options. + + + ts_headline('The fat cat ate the rat.', 'cat') + The fat <b>cat</b> ate the rat. + + + + + + ts_headline ( + config regconfig, + document json, + query tsquery + , options text ) + text + + + ts_headline ( + config regconfig, + document jsonb, + query tsquery + , options text ) + text + + + Displays, in an abbreviated form, match(es) for + the query that occur in string values + within the JSON document. + See for more details. + + + ts_headline('{"cat":"raining cats and dogs"}'::jsonb, 'cat') + {"cat": "raining <b>cats</b> and dogs"} + + + + + + + ts_rank + + ts_rank ( + weights real[], + vector tsvector, + query tsquery + , normalization integer ) + real + + + Computes a score showing how well + the vector matches + the query. See + for details. + + + ts_rank(to_tsvector('raining cats and dogs'), 'cat') + 0.06079271 + + + + + + + ts_rank_cd + + ts_rank_cd ( + weights real[], + vector tsvector, + query tsquery + , normalization integer ) + real + + + Computes a score showing how well + the vector matches + the query, using a cover density + algorithm. See for details. + + + ts_rank_cd(to_tsvector('raining cats and dogs'), 'cat') + 0.1 + + + + + + + ts_rewrite + + ts_rewrite ( query tsquery, + target tsquery, + substitute tsquery ) + tsquery + + + Replaces occurrences of target + with substitute + within the query. + See for details. + + + ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'foo|bar'::tsquery) + 'b' & ( 'foo' | 'bar' ) + + + + + + ts_rewrite ( query tsquery, + select text ) + tsquery + + + Replaces portions of the query according to + target(s) and substitute(s) obtained by executing + a SELECT command. + See for details. + + + SELECT ts_rewrite('a & b'::tsquery, 'SELECT t,s FROM aliases') + 'b' & ( 'foo' | 'bar' ) + + + + + + + tsquery_phrase + + tsquery_phrase ( query1 tsquery, query2 tsquery ) + tsquery + + + Constructs a phrase query that searches + for matches of query1 + and query2 at successive lexemes (same + as <-> operator). + + + tsquery_phrase(to_tsquery('fat'), to_tsquery('cat')) + 'fat' <-> 'cat' + + + + + + tsquery_phrase ( query1 tsquery, query2 tsquery, distance integer ) + tsquery + + + Constructs a phrase query that searches + for matches of query1 and + query2 that occur exactly + distance lexemes apart. + + + tsquery_phrase(to_tsquery('fat'), to_tsquery('cat'), 10) + 'fat' <10> 'cat' + + + + + + + tsvector_to_array + + tsvector_to_array ( tsvector ) + text[] + + + Converts a tsvector to an array of lexemes. + + + tsvector_to_array('fat:2,4 cat:3 rat:5A'::tsvector) + {cat,fat,rat} + + + + + + + unnest + for tsvector + + unnest ( tsvector ) + setof record + ( lexeme text, + positions smallint[], + weights text ) + + + Expands a tsvector into a set of rows, one per lexeme. + + + select * from unnest('cat:3 fat:2,4 rat:5A'::tsvector) + + + lexeme | positions | weights +--------+-----------+--------- + cat | {3} | {D} + fat | {2,4} | {D,D} + rat | {5} | {A} + + + + + +
+ + + + All the text search functions that accept an optional regconfig + argument will use the configuration specified by + + when that argument is omitted. + + + + + The functions in + + are listed separately because they are not usually used in everyday text + searching operations. They are primarily helpful for development and + debugging of new text search configurations. + + + + Text Search Debugging Functions + + + + + Function + + + Description + + + Example(s) + + + + + + + + + ts_debug + + ts_debug ( + config regconfig, + document text ) + setof record + ( alias text, + description text, + token text, + dictionaries regdictionary[], + dictionary regdictionary, + lexemes text[] ) + + + Extracts and normalizes tokens from + the document according to the specified or + default text search configuration, and returns information about how + each token was processed. + See for details. + + + ts_debug('english', 'The Brightest supernovaes') + (asciiword,"Word, all ASCII",The,{english_stem},english_stem,{}) ... + + + + + + + ts_lexize + + ts_lexize ( dict regdictionary, token text ) + text[] + + + Returns an array of replacement lexemes if the input token is known to + the dictionary, or an empty array if the token is known to the + dictionary but it is a stop word, or NULL if it is not a known word. + See for details. + + + ts_lexize('english_stem', 'stars') + {star} + + + + + + + ts_parse + + ts_parse ( parser_name text, + document text ) + setof record + ( tokid integer, + token text ) + + + Extracts tokens from the document using the + named parser. + See for details. + + + ts_parse('default', 'foo - bar') + (1,foo) ... + + + + + + ts_parse ( parser_oid oid, + document text ) + setof record + ( tokid integer, + token text ) + + + Extracts tokens from the document using a + parser specified by OID. + See for details. + + + ts_parse(3722, 'foo - bar') + (1,foo) ... + + + + + + + ts_token_type + + ts_token_type ( parser_name text ) + setof record + ( tokid integer, + alias text, + description text ) + + + Returns a table that describes each type of token the named parser can + recognize. + See for details. + + + ts_token_type('default') + (1,asciiword,"Word, all ASCII") ... + + + + + + ts_token_type ( parser_oid oid ) + setof record + ( tokid integer, + alias text, + description text ) + + + Returns a table that describes each type of token a parser specified + by OID can recognize. + See for details. + + + ts_token_type(3722) + (1,asciiword,"Word, all ASCII") ... + + + + + + + ts_stat + + ts_stat ( sqlquery text + , weights text ) + setof record + ( word text, + ndoc integer, + nentry integer ) + + + Executes the sqlquery, which must return a + single tsvector column, and returns statistics about each + distinct lexeme contained in the data. + See for details. + + + ts_stat('SELECT vector FROM apod') + (foo,10,15) ... + + + + +
+ +
diff --git a/doc/src/sgml/func/func-trigger.sgml b/doc/src/sgml/func/func-trigger.sgml new file mode 100644 index 0000000000000..94b40adbdb84a --- /dev/null +++ b/doc/src/sgml/func/func-trigger.sgml @@ -0,0 +1,135 @@ + + Trigger Functions + + + While many uses of triggers involve user-written trigger functions, + PostgreSQL provides a few built-in trigger + functions that can be used directly in user-defined triggers. These + are summarized in . + (Additional built-in trigger functions exist, which implement foreign + key constraints and deferred index constraints. Those are not documented + here since users need not use them directly.) + + + + For more information about creating triggers, see + . + + + + Built-In Trigger Functions + + + + + Function + + + Description + + + Example Usage + + + + + + + + + suppress_redundant_updates_trigger + + suppress_redundant_updates_trigger ( ) + trigger + + + Suppresses do-nothing update operations. See below for details. + + + CREATE TRIGGER ... suppress_redundant_updates_trigger() + + + + + + + tsvector_update_trigger + + tsvector_update_trigger ( ) + trigger + + + Automatically updates a tsvector column from associated + plain-text document column(s). The text search configuration to use + is specified by name as a trigger argument. See + for details. + + + CREATE TRIGGER ... tsvector_update_trigger(tsvcol, 'pg_catalog.swedish', title, body) + + + + + + + tsvector_update_trigger_column + + tsvector_update_trigger_column ( ) + trigger + + + Automatically updates a tsvector column from associated + plain-text document column(s). The text search configuration to use + is taken from a regconfig column of the table. See + for details. + + + CREATE TRIGGER ... tsvector_update_trigger_column(tsvcol, tsconfigcol, title, body) + + + + +
+ + + The suppress_redundant_updates_trigger function, + when applied as a row-level BEFORE UPDATE trigger, + will prevent any update that does not actually change the data in the + row from taking place. This overrides the normal behavior which always + performs a physical row update + regardless of whether or not the data has changed. (This normal behavior + makes updates run faster, since no checking is required, and is also + useful in certain cases.) + + + + Ideally, you should avoid running updates that don't actually + change the data in the record. Redundant updates can cost considerable + unnecessary time, especially if there are lots of indexes to alter, + and space in dead rows that will eventually have to be vacuumed. + However, detecting such situations in client code is not + always easy, or even possible, and writing expressions to detect + them can be error-prone. An alternative is to use + suppress_redundant_updates_trigger, which will skip + updates that don't change the data. You should use this with care, + however. The trigger takes a small but non-trivial time for each record, + so if most of the records affected by updates do actually change, + use of this trigger will make updates run slower on average. + + + + The suppress_redundant_updates_trigger function can be + added to a table like this: + +CREATE TRIGGER z_min_update +BEFORE UPDATE ON tablename +FOR EACH ROW EXECUTE FUNCTION suppress_redundant_updates_trigger(); + + In most cases, you need to fire this trigger last for each row, so that + it does not override other triggers that might wish to alter the row. + Bearing in mind that triggers fire in name order, you would therefore + choose a trigger name that comes after the name of any other trigger + you might have on the table. (Hence the z prefix in the + example.) + +
diff --git a/doc/src/sgml/func/func-uuid.sgml b/doc/src/sgml/func/func-uuid.sgml new file mode 100644 index 0000000000000..65c5ddec6b7dc --- /dev/null +++ b/doc/src/sgml/func/func-uuid.sgml @@ -0,0 +1,188 @@ + + UUID Functions + + + UUID + generating + + + + gen_random_uuid + + + + uuidv4 + + + + uuidv7 + + + + uuid_extract_timestamp + + + + uuid_extract_version + + + + shows the PostgreSQL + functions that can be used to generate UUIDs. + + + + <acronym>UUID</acronym> Generation Functions + + + + + + Function + + + Description + + + Example(s) + + + + + + + + + + gen_random_uuid + uuid + + + uuidv4 + uuid + + + Generate a version 4 (random) UUID. + + + gen_random_uuid() + 5b30857f-0bfa-48b5-ac0b-5c64e28078d1 + + + uuidv4() + b42410ee-132f-42ee-9e4f-09a6485c95b8 + + + + + + + uuidv7 + ( shift interval ) + uuid + + + Generate a version 7 (time-ordered) UUID. The timestamp is computed using UNIX timestamp + with millisecond precision + sub-millisecond timestamp + random. The optional parameter + shift will shift the computed timestamp by the given interval. + + + uuidv7() + 019535d9-3df7-79fb-b466-fa907fa17f9e + + + + + +
+ + + + The module provides additional functions that + implement other standard algorithms for generating UUIDs. + + + + + shows the PostgreSQL + functions that can be used to extract information from UUIDs. + + + + <acronym>UUID</acronym> Extraction Functions + + + + + + Function + + + Description + + + Example(s) + + + + + + + + + + uuid_extract_timestamp + ( uuid ) + timestamp with time zone + + + Extracts a timestamp with time zone from UUID + version 1 and 7. For other versions, this function returns null. Note that + the extracted timestamp is not necessarily exactly equal to the time the + UUID was generated; this depends on the implementation that generated the + UUID. + + + uuid_extract_timestamp('019535d9-3df7-79fb-b466-&zwsp;fa907fa17f9e'::uuid) + 2025-02-23 21:46:24.503-05 + + + + + + + uuid_extract_version + ( uuid ) + smallint + + + Extracts the version from a UUID of the variant described by + RFC 9562. For + other variants, this function returns null. For example, for a UUID + generated by gen_random_uuid, this function will + return 4. + + + uuid_extract_version('41db1265-8bc1-4ab3-992f-&zwsp;885799a4af1d'::uuid) + 4 + + + uuid_extract_version('019535d9-3df7-79fb-b466-&zwsp;fa907fa17f9e'::uuid) + 7 + + + + + +
+ + + PostgreSQL also provides the usual comparison + operators shown in for + UUIDs. + + + See for details on the data type + uuid in PostgreSQL. + +
diff --git a/doc/src/sgml/func/func-window.sgml b/doc/src/sgml/func/func-window.sgml new file mode 100644 index 0000000000000..cce0165b9526e --- /dev/null +++ b/doc/src/sgml/func/func-window.sgml @@ -0,0 +1,284 @@ + + Window Functions + + + window function + built-in + + + + Window functions provide the ability to perform + calculations across sets of rows that are related to the current query + row. See for an introduction to this + feature, and for syntax + details. + + + + The built-in window functions are listed in + . Note that these functions + must be invoked using window function syntax, i.e., an + OVER clause is required. + + + + In addition to these functions, any built-in or user-defined + ordinary aggregate (i.e., not ordered-set or hypothetical-set aggregates) + can be used as a window function; see + for a list of the built-in aggregates. + Aggregate functions act as window functions only when an OVER + clause follows the call; otherwise they act as plain aggregates + and return a single row for the entire set. + + + + General-Purpose Window Functions + + + + + Function + + + Description + + + + + + + + + row_number + + row_number () + bigint + + + Returns the number of the current row within its partition, counting + from 1. + + + + + + + rank + + rank () + bigint + + + Returns the rank of the current row, with gaps; that is, + the row_number of the first row in its peer + group. + + + + + + + dense_rank + + dense_rank () + bigint + + + Returns the rank of the current row, without gaps; this function + effectively counts peer groups. + + + + + + + percent_rank + + percent_rank () + double precision + + + Returns the relative rank of the current row, that is + (rank - 1) / (total partition rows - 1). + The value thus ranges from 0 to 1 inclusive. + + + + + + + cume_dist + + cume_dist () + double precision + + + Returns the cumulative distribution, that is (number of partition rows + preceding or peers with current row) / (total partition rows). + The value thus ranges from 1/N to 1. + + + + + + + ntile + + ntile ( num_buckets integer ) + integer + + + Returns an integer ranging from 1 to the argument value, dividing the + partition as equally as possible. + + + + + + + lag + + lag ( value anycompatible + , offset integer + , default anycompatible ) + anycompatible + + + Returns value evaluated at + the row that is offset + rows before the current row within the partition; if there is no such + row, instead returns default + (which must be of a type compatible with + value). + Both offset and + default are evaluated + with respect to the current row. If omitted, + offset defaults to 1 and + default to NULL. + + + + + + + lead + + lead ( value anycompatible + , offset integer + , default anycompatible ) + anycompatible + + + Returns value evaluated at + the row that is offset + rows after the current row within the partition; if there is no such + row, instead returns default + (which must be of a type compatible with + value). + Both offset and + default are evaluated + with respect to the current row. If omitted, + offset defaults to 1 and + default to NULL. + + + + + + + first_value + + first_value ( value anyelement ) + anyelement + + + Returns value evaluated + at the row that is the first row of the window frame. + + + + + + + last_value + + last_value ( value anyelement ) + anyelement + + + Returns value evaluated + at the row that is the last row of the window frame. + + + + + + + nth_value + + nth_value ( value anyelement, n integer ) + anyelement + + + Returns value evaluated + at the row that is the n'th + row of the window frame (counting from 1); + returns NULL if there is no such row. + + + + +
+ + + All of the functions listed in + depend on the sort ordering + specified by the ORDER BY clause of the associated window + definition. Rows that are not distinct when considering only the + ORDER BY columns are said to be peers. + The four ranking functions (including cume_dist) are + defined so that they give the same answer for all rows of a peer group. + + + + Note that first_value, last_value, and + nth_value consider only the rows within the window + frame, which by default contains the rows from the start of the + partition through the last peer of the current row. This is + likely to give unhelpful results for last_value and + sometimes also nth_value. You can redefine the frame by + adding a suitable frame specification (RANGE, + ROWS or GROUPS) to + the OVER clause. + See for more information + about frame specifications. + + + + When an aggregate function is used as a window function, it aggregates + over the rows within the current row's window frame. + An aggregate used with ORDER BY and the default window frame + definition produces a running sum type of behavior, which may or + may not be what's wanted. To obtain + aggregation over the whole partition, omit ORDER BY or use + ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING. + Other frame specifications can be used to obtain other effects. + + + + + The SQL standard defines a RESPECT NULLS or + IGNORE NULLS option for lead, lag, + first_value, last_value, and + nth_value. This is not implemented in + PostgreSQL: the behavior is always the + same as the standard's default, namely RESPECT NULLS. + Likewise, the standard's FROM FIRST or FROM LAST + option for nth_value is not implemented: only the + default FROM FIRST behavior is supported. (You can achieve + the result of FROM LAST by reversing the ORDER BY + ordering.) + + + +
diff --git a/doc/src/sgml/func/func-xml.sgml b/doc/src/sgml/func/func-xml.sgml new file mode 100644 index 0000000000000..21f34467a4f8a --- /dev/null +++ b/doc/src/sgml/func/func-xml.sgml @@ -0,0 +1,1283 @@ + + + XML Functions + + + XML Functions + + + + The functions and function-like expressions described in this + section operate on values of type xml. See for information about the xml + type. The function-like expressions xmlparse + and xmlserialize for converting to and from + type xml are documented there, not in this section. + + + + Use of most of these functions + requires PostgreSQL to have been built + with configure --with-libxml. + + + + Producing XML Content + + + A set of functions and function-like expressions is available for + producing XML content from SQL data. As such, they are + particularly suitable for formatting query results into XML + documents for processing in client applications. + + + + <literal>xmltext</literal> + + + xmltext + + + +xmltext ( text ) xml + + + + The function xmltext returns an XML value with a single + text node containing the input argument as its content. Predefined entities + like ampersand (), left and right angle brackets + (]]>), and quotation marks () + are escaped. + + + + Example: +'); + xmltext +------------------------- + < foo & bar > +]]> + + + + + <literal>xmlcomment</literal> + + + xmlcomment + + + +xmlcomment ( text ) xml + + + + The function xmlcomment creates an XML value + containing an XML comment with the specified text as content. + The text cannot contain -- or end with a + -, otherwise the resulting construct + would not be a valid XML comment. + If the argument is null, the result is null. + + + + Example: + +]]> + + + + + <literal>xmlconcat</literal> + + + xmlconcat + + + +xmlconcat ( xml , ... ) xml + + + + The function xmlconcat concatenates a list + of individual XML values to create a single value containing an + XML content fragment. Null values are omitted; the result is + only null if there are no nonnull arguments. + + + + Example: +', 'foo'); + + xmlconcat +---------------------- + foo +]]> + + + + XML declarations, if present, are combined as follows. If all + argument values have the same XML version declaration, that + version is used in the result, else no version is used. If all + argument values have the standalone declaration value + yes, then that value is used in the result. If + all argument values have a standalone declaration value and at + least one is no, then that is used in the result. + Else the result will have no standalone declaration. If the + result is determined to require a standalone declaration but no + version declaration, a version declaration with version 1.0 will + be used because XML requires an XML declaration to contain a + version declaration. Encoding declarations are ignored and + removed in all cases. + + + + Example: +', ''); + + xmlconcat +----------------------------------- + +]]> + + + + + <literal>xmlelement</literal> + + + xmlelement + + + +xmlelement ( NAME name , XMLATTRIBUTES ( attvalue AS attname , ... ) , content , ... ) xml + + + + The xmlelement expression produces an XML + element with the given name, attributes, and content. + The name + and attname items shown in the syntax are + simple identifiers, not values. The attvalue + and content items are expressions, which can + yield any PostgreSQL data type. The + argument(s) within XMLATTRIBUTES generate attributes + of the XML element; the content value(s) are + concatenated to form its content. + + + + Examples: + + +SELECT xmlelement(name foo, xmlattributes('xyz' as bar)); + + xmlelement +------------------ + + +SELECT xmlelement(name foo, xmlattributes(current_date as bar), 'cont', 'ent'); + + xmlelement +------------------------------------- + content +]]> + + + + Element and attribute names that are not valid XML names are + escaped by replacing the offending characters by the sequence + _xHHHH_, where + HHHH is the character's Unicode + codepoint in hexadecimal notation. For example: + +]]> + + + + An explicit attribute name need not be specified if the attribute + value is a column reference, in which case the column's name will + be used as the attribute name by default. In other cases, the + attribute must be given an explicit name. So this example is + valid: + +CREATE TABLE test (a xml, b xml); +SELECT xmlelement(name test, xmlattributes(a, b)) FROM test; + + But these are not: + +SELECT xmlelement(name test, xmlattributes('constant'), a, b) FROM test; +SELECT xmlelement(name test, xmlattributes(func(a, b))) FROM test; + + + + + Element content, if specified, will be formatted according to + its data type. If the content is itself of type xml, + complex XML documents can be constructed. For example: + +]]> + + Content of other types will be formatted into valid XML character + data. This means in particular that the characters <, >, + and & will be converted to entities. Binary data (data type + bytea) will be represented in base64 or hex + encoding, depending on the setting of the configuration parameter + . The particular behavior for + individual data types is expected to evolve in order to align the + PostgreSQL mappings with those specified in SQL:2006 and later, + as discussed in . + + + + + <literal>xmlforest</literal> + + + xmlforest + + + +xmlforest ( content AS name , ... ) xml + + + + The xmlforest expression produces an XML + forest (sequence) of elements using the given names and content. + As for xmlelement, + each name must be a simple identifier, while + the content expressions can have any data + type. + + + + Examples: + +SELECT xmlforest('abc' AS foo, 123 AS bar); + + xmlforest +------------------------------ + <foo>abc</foo><bar>123</bar> + + +SELECT xmlforest(table_name, column_name) +FROM information_schema.columns +WHERE table_schema = 'pg_catalog'; + + xmlforest +------------------------------------&zwsp;----------------------------------- + <table_name>pg_authid</table_name>&zwsp;<column_name>rolname</column_name> + <table_name>pg_authid</table_name>&zwsp;<column_name>rolsuper</column_name> + ... + + + As seen in the second example, the element name can be omitted if + the content value is a column reference, in which case the column + name is used by default. Otherwise, a name must be specified. + + + + Element names that are not valid XML names are escaped as shown + for xmlelement above. Similarly, content + data is escaped to make valid XML content, unless it is already + of type xml. + + + + Note that XML forests are not valid XML documents if they consist + of more than one element, so it might be useful to wrap + xmlforest expressions in + xmlelement. + + + + + <literal>xmlpi</literal> + + + xmlpi + + + +xmlpi ( NAME name , content ) xml + + + + The xmlpi expression creates an XML + processing instruction. + As for xmlelement, + the name must be a simple identifier, while + the content expression can have any data type. + The content, if present, must not contain the + character sequence ?>. + + + + Example: + +]]> + + + + + <literal>xmlroot</literal> + + + xmlroot + + + +xmlroot ( xml, VERSION {text|NO VALUE} , STANDALONE {YES|NO|NO VALUE} ) xml + + + + The xmlroot expression alters the properties + of the root node of an XML value. If a version is specified, + it replaces the value in the root node's version declaration; if a + standalone setting is specified, it replaces the value in the + root node's standalone declaration. + + + +abc'), + version '1.0', standalone yes); + + xmlroot +---------------------------------------- + + abc +]]> + + + + + <literal>xmlagg</literal> + + + xmlagg + + + +xmlagg ( xml ) xml + + + + The function xmlagg is, unlike the other + functions described here, an aggregate function. It concatenates the + input values to the aggregate function call, + much like xmlconcat does, except that concatenation + occurs across rows rather than across expressions in a single row. + See for additional information + about aggregate functions. + + + + Example: +abc'); +INSERT INTO test VALUES (2, ''); +SELECT xmlagg(x) FROM test; + xmlagg +---------------------- + abc +]]> + + + + To determine the order of the concatenation, an ORDER BY + clause may be added to the aggregate call as described in + . For example: + +abc +]]> + + + + The following non-standard approach used to be recommended + in previous versions, and may still be useful in specific + cases: + +abc +]]> + + + + + + XML Predicates + + + The expressions described in this section check properties + of xml values. + + + + <literal>IS DOCUMENT</literal> + + + IS DOCUMENT + + + +xml IS DOCUMENT boolean + + + + The expression IS DOCUMENT returns true if the + argument XML value is a proper XML document, false if it is not + (that is, it is a content fragment), or null if the argument is + null. See about the difference + between documents and content fragments. + + + + + <literal>IS NOT DOCUMENT</literal> + + + IS NOT DOCUMENT + + + +xml IS NOT DOCUMENT boolean + + + + The expression IS NOT DOCUMENT returns false if the + argument XML value is a proper XML document, true if it is not (that is, + it is a content fragment), or null if the argument is null. + + + + + <literal>XMLEXISTS</literal> + + + XMLEXISTS + + + +XMLEXISTS ( text PASSING BY {REF|VALUE} xml BY {REF|VALUE} ) boolean + + + + The function xmlexists evaluates an XPath 1.0 + expression (the first argument), with the passed XML value as its context + item. The function returns false if the result of that evaluation + yields an empty node-set, true if it yields any other value. The + function returns null if any argument is null. A nonnull value + passed as the context item must be an XML document, not a content + fragment or any non-XML value. + + + + Example: + TorontoOttawa'); + + xmlexists +------------ + t +(1 row) +]]> + + + + The BY REF and BY VALUE clauses + are accepted in PostgreSQL, but are ignored, + as discussed in . + + + + In the SQL standard, the xmlexists function + evaluates an expression in the XML Query language, + but PostgreSQL allows only an XPath 1.0 + expression, as discussed in + . + + + + + <literal>xml_is_well_formed</literal> + + + xml_is_well_formed + + + + xml_is_well_formed_document + + + + xml_is_well_formed_content + + + +xml_is_well_formed ( text ) boolean +xml_is_well_formed_document ( text ) boolean +xml_is_well_formed_content ( text ) boolean + + + + These functions check whether a text string represents + well-formed XML, returning a Boolean result. + xml_is_well_formed_document checks for a well-formed + document, while xml_is_well_formed_content checks + for well-formed content. xml_is_well_formed does + the former if the configuration + parameter is set to DOCUMENT, or the latter if it is set to + CONTENT. This means that + xml_is_well_formed is useful for seeing whether + a simple cast to type xml will succeed, whereas the other two + functions are useful for seeing whether the corresponding variants of + XMLPARSE will succeed. + + + + Examples: + +'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed(''); + xml_is_well_formed +-------------------- + t +(1 row) + +SET xmloption TO CONTENT; +SELECT xml_is_well_formed('abc'); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed_document('bar'); + xml_is_well_formed_document +----------------------------- + t +(1 row) + +SELECT xml_is_well_formed_document('bar'); + xml_is_well_formed_document +----------------------------- + f +(1 row) +]]> + + The last example shows that the checks include whether + namespaces are correctly matched. + + + + + + Processing XML + + + To process values of data type xml, PostgreSQL offers + the functions xpath and + xpath_exists, which evaluate XPath 1.0 + expressions, and the XMLTABLE + table function. + + + + <literal>xpath</literal> + + + XPath + + + +xpath ( xpath text, xml xml , nsarray text[] ) xml[] + + + + The function xpath evaluates the XPath 1.0 + expression xpath (given as text) + against the XML value + xml. It returns an array of XML values + corresponding to the node-set produced by the XPath expression. + If the XPath expression returns a scalar value rather than a node-set, + a single-element array is returned. + + + + The second argument must be a well formed XML document. In particular, + it must have a single root node element. + + + + The optional third argument of the function is an array of namespace + mappings. This array should be a two-dimensional text array with + the length of the second axis being equal to 2 (i.e., it should be an + array of arrays, each of which consists of exactly 2 elements). + The first element of each array entry is the namespace name (alias), the + second the namespace URI. It is not required that aliases provided in + this array be the same as those being used in the XML document itself (in + other words, both in the XML document and in the xpath + function context, aliases are local). + + + + Example: +test', + ARRAY[ARRAY['my', 'http://example.com']]); + + xpath +-------- + {test} +(1 row) +]]> + + + + To deal with default (anonymous) namespaces, do something like this: +test', + ARRAY[ARRAY['mydefns', 'http://example.com']]); + + xpath +-------- + {test} +(1 row) +]]> + + + + + <literal>xpath_exists</literal> + + + xpath_exists + + + +xpath_exists ( xpath text, xml xml , nsarray text[] ) boolean + + + + The function xpath_exists is a specialized form + of the xpath function. Instead of returning the + individual XML values that satisfy the XPath 1.0 expression, this function + returns a Boolean indicating whether the query was satisfied or not + (specifically, whether it produced any value other than an empty node-set). + This function is equivalent to the XMLEXISTS predicate, + except that it also offers support for a namespace mapping argument. + + + + Example: +test', + ARRAY[ARRAY['my', 'http://example.com']]); + + xpath_exists +-------------- + t +(1 row) +]]> + + + + + <literal>xmltable</literal> + + + xmltable + + + + table function + XMLTABLE + + + +XMLTABLE ( + XMLNAMESPACES ( namespace_uri AS namespace_name , ... ), + row_expression PASSING BY {REF|VALUE} document_expression BY {REF|VALUE} + COLUMNS name { type PATH column_expression DEFAULT default_expression NOT NULL | NULL + | FOR ORDINALITY } + , ... +) setof record + + + + The xmltable expression produces a table based + on an XML value, an XPath filter to extract rows, and a + set of column definitions. + Although it syntactically resembles a function, it can only appear + as a table in a query's FROM clause. + + + + The optional XMLNAMESPACES clause gives a + comma-separated list of namespace definitions, where + each namespace_uri is a text + expression and each namespace_name is a simple + identifier. It specifies the XML namespaces used in the document and + their aliases. A default namespace specification is not currently + supported. + + + + The required row_expression argument is an + XPath 1.0 expression (given as text) that is evaluated, + passing the XML value document_expression as + its context item, to obtain a set of XML nodes. These nodes are what + xmltable transforms into output rows. No rows + will be produced if the document_expression + is null, nor if the row_expression produces + an empty node-set or any value other than a node-set. + + + + document_expression provides the context + item for the row_expression. It must be a + well-formed XML document; fragments/forests are not accepted. + The BY REF and BY VALUE clauses + are accepted but ignored, as discussed in + . + + + + In the SQL standard, the xmltable function + evaluates expressions in the XML Query language, + but PostgreSQL allows only XPath 1.0 + expressions, as discussed in + . + + + + The required COLUMNS clause specifies the + column(s) that will be produced in the output table. + See the syntax summary above for the format. + A name is required for each column, as is a data type + (unless FOR ORDINALITY is specified, in which case + type integer is implicit). The path, default and + nullability clauses are optional. + + + + A column marked FOR ORDINALITY will be populated + with row numbers, starting with 1, in the order of nodes retrieved from + the row_expression's result node-set. + At most one column may be marked FOR ORDINALITY. + + + + + XPath 1.0 does not specify an order for nodes in a node-set, so code + that relies on a particular order of the results will be + implementation-dependent. Details can be found in + . + + + + + The column_expression for a column is an + XPath 1.0 expression that is evaluated for each row, with the current + node from the row_expression result as its + context item, to find the value of the column. If + no column_expression is given, then the + column name is used as an implicit path. + + + + If a column's XPath expression returns a non-XML value (which is limited + to string, boolean, or double in XPath 1.0) and the column has a + PostgreSQL type other than xml, the column will be set + as if by assigning the value's string representation to the PostgreSQL + type. (If the value is a boolean, its string representation is taken + to be 1 or 0 if the output + column's type category is numeric, otherwise true or + false.) + + + + If a column's XPath expression returns a non-empty set of XML nodes + and the column's PostgreSQL type is xml, the column will + be assigned the expression result exactly, if it is of document or + content form. + + + A result containing more than one element node at the top level, or + non-whitespace text outside of an element, is an example of content form. + An XPath result can be of neither form, for example if it returns an + attribute node selected from the element that contains it. Such a result + will be put into content form with each such disallowed node replaced by + its string value, as defined for the XPath 1.0 + string function. + + + + + + A non-XML result assigned to an xml output column produces + content, a single text node with the string value of the result. + An XML result assigned to a column of any other type may not have more than + one node, or an error is raised. If there is exactly one node, the column + will be set as if by assigning the node's string + value (as defined for the XPath 1.0 string function) + to the PostgreSQL type. + + + + The string value of an XML element is the concatenation, in document order, + of all text nodes contained in that element and its descendants. The string + value of an element with no descendant text nodes is an + empty string (not NULL). + Any xsi:nil attributes are ignored. + Note that the whitespace-only text() node between two non-text + elements is preserved, and that leading whitespace on a text() + node is not flattened. + The XPath 1.0 string function may be consulted for the + rules defining the string value of other XML node types and non-XML values. + + + + The conversion rules presented here are not exactly those of the SQL + standard, as discussed in . + + + + If the path expression returns an empty node-set + (typically, when it does not match) + for a given row, the column will be set to NULL, unless + a default_expression is specified; then the + value resulting from evaluating that expression is used. + + + + A default_expression, rather than being + evaluated immediately when xmltable is called, + is evaluated each time a default is needed for the column. + If the expression qualifies as stable or immutable, the repeat + evaluation may be skipped. + This means that you can usefully use volatile functions like + nextval in + default_expression. + + + + Columns may be marked NOT NULL. If the + column_expression for a NOT + NULL column does not match anything and there is + no DEFAULT or + the default_expression also evaluates to null, + an error is reported. + + + + Examples: + + + AU + Australia + + + JP + Japan + Shinzo Abe + 145935 + + + SG + Singapore + 697 + + +$$ AS data; + +SELECT xmltable.* + FROM xmldata, + XMLTABLE('//ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + ordinality FOR ORDINALITY, + "COUNTRY_NAME" text, + country_id text PATH 'COUNTRY_ID', + size_sq_km float PATH 'SIZE[@unit = "sq_km"]', + size_other text PATH + 'concat(SIZE[@unit!="sq_km"], " ", SIZE[@unit!="sq_km"]/@unit)', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + + id | ordinality | COUNTRY_NAME | country_id | size_sq_km | size_other | premier_name +----+------------+--------------+------------+------------+--------------+--------------- + 1 | 1 | Australia | AU | | | not specified + 5 | 2 | Japan | JP | | 145935 sq_mi | Shinzo Abe + 6 | 3 | Singapore | SG | 697 | | not specified +]]> + + The following example shows concatenation of multiple text() nodes, + usage of the column name as XPath filter, and the treatment of whitespace, + XML comments and processing instructions: + + + Hello2a2 bbbxxxCC + +$$ AS data; + +SELECT xmltable.* + FROM xmlelements, XMLTABLE('/root' PASSING data COLUMNS element text); + element +------------------------- + Hello2a2 bbbxxxCC +]]> + + + + The following example illustrates how + the XMLNAMESPACES clause can be used to specify + a list of namespaces + used in the XML document as well as in the XPath expressions: + + + + + +'::xml) +) +SELECT xmltable.* + FROM XMLTABLE(XMLNAMESPACES('http://example.com/myns' AS x, + 'http://example.com/b' AS "B"), + '/x:example/x:item' + PASSING (SELECT data FROM xmldata) + COLUMNS foo int PATH '@foo', + bar int PATH '@B:bar'); + foo | bar +-----+----- + 1 | 2 + 3 | 4 + 4 | 5 +(3 rows) +]]> + + + + + + Mapping Tables to XML + + + XML export + + + + The following functions map the contents of relational tables to + XML values. They can be thought of as XML export functionality: + +table_to_xml ( table regclass, nulls boolean, + tableforest boolean, targetns text ) xml +query_to_xml ( query text, nulls boolean, + tableforest boolean, targetns text ) xml +cursor_to_xml ( cursor refcursor, count integer, nulls boolean, + tableforest boolean, targetns text ) xml + + + + + table_to_xml maps the content of the named + table, passed as parameter table. The + regclass type accepts strings identifying tables using the + usual notation, including optional schema qualification and + double quotes (see for details). + query_to_xml executes the + query whose text is passed as parameter + query and maps the result set. + cursor_to_xml fetches the indicated number of + rows from the cursor specified by the parameter + cursor. This variant is recommended if + large tables have to be mapped, because the result value is built + up in memory by each function. + + + + If tableforest is false, then the resulting + XML document looks like this: + + + data + data + + + + ... + + + ... + +]]> + + If tableforest is true, the result is an + XML content fragment that looks like this: + + data + data + + + + ... + + +... +]]> + + If no table name is available, that is, when mapping a query or a + cursor, the string table is used in the first + format, row in the second format. + + + + The choice between these formats is up to the user. The first + format is a proper XML document, which will be important in many + applications. The second format tends to be more useful in the + cursor_to_xml function if the result values are to be + reassembled into one document later on. The functions for + producing XML content discussed above, in particular + xmlelement, can be used to alter the results + to taste. + + + + The data values are mapped in the same way as described for the + function xmlelement above. + + + + The parameter nulls determines whether null + values should be included in the output. If true, null values in + columns are represented as: + +]]> + where xsi is the XML namespace prefix for XML + Schema Instance. An appropriate namespace declaration will be + added to the result value. If false, columns containing null + values are simply omitted from the output. + + + + The parameter targetns specifies the + desired XML namespace of the result. If no particular namespace + is wanted, an empty string should be passed. + + + + The following functions return XML Schema documents describing the + mappings performed by the corresponding functions above: + +table_to_xmlschema ( table regclass, nulls boolean, + tableforest boolean, targetns text ) xml +query_to_xmlschema ( query text, nulls boolean, + tableforest boolean, targetns text ) xml +cursor_to_xmlschema ( cursor refcursor, nulls boolean, + tableforest boolean, targetns text ) xml + + It is essential that the same parameters are passed in order to + obtain matching XML data mappings and XML Schema documents. + + + + The following functions produce XML data mappings and the + corresponding XML Schema in one document (or forest), linked + together. They can be useful where self-contained and + self-describing results are wanted: + +table_to_xml_and_xmlschema ( table regclass, nulls boolean, + tableforest boolean, targetns text ) xml +query_to_xml_and_xmlschema ( query text, nulls boolean, + tableforest boolean, targetns text ) xml + + + + + In addition, the following functions are available to produce + analogous mappings of entire schemas or the entire current + database: + +schema_to_xml ( schema name, nulls boolean, + tableforest boolean, targetns text ) xml +schema_to_xmlschema ( schema name, nulls boolean, + tableforest boolean, targetns text ) xml +schema_to_xml_and_xmlschema ( schema name, nulls boolean, + tableforest boolean, targetns text ) xml + +database_to_xml ( nulls boolean, + tableforest boolean, targetns text ) xml +database_to_xmlschema ( nulls boolean, + tableforest boolean, targetns text ) xml +database_to_xml_and_xmlschema ( nulls boolean, + tableforest boolean, targetns text ) xml + + + These functions ignore tables that are not readable by the current user. + The database-wide functions additionally ignore schemas that the current + user does not have USAGE (lookup) privilege for. + + + + Note that these potentially produce a lot of data, which needs to + be built up in memory. When requesting content mappings of large + schemas or databases, it might be worthwhile to consider mapping the + tables separately instead, possibly even through a cursor. + + + + The result of a schema content mapping looks like this: + + + +table1-mapping + +table2-mapping + +... + +]]> + + where the format of a table mapping depends on the + tableforest parameter as explained above. + + + + The result of a database content mapping looks like this: + + + + + ... + + + + ... + + +... + +]]> + + where the schema mapping is as above. + + + + As an example of using the output produced by these functions, + shows an XSLT stylesheet that + converts the output of + table_to_xml_and_xmlschema to an HTML + document containing a tabular rendition of the table data. In a + similar manner, the results from these functions can be + converted into other XML-based formats. + + + + XSLT Stylesheet for Converting SQL/XML Output to HTML + + + + + + + + + + + + + <xsl:value-of select="name(current())"/> + + + + + + + + + + + + + + + + +
+ + +
+ +
+]]>
+
+
+
diff --git a/doc/src/sgml/func/func.sgml b/doc/src/sgml/func/func.sgml new file mode 100644 index 0000000000000..f351ef53f63d4 --- /dev/null +++ b/doc/src/sgml/func/func.sgml @@ -0,0 +1,84 @@ + + + + Functions and Operators + + + function + + + + operator + + + + PostgreSQL provides a large number of + functions and operators for the built-in data types. This chapter + describes most of them, although additional special-purpose functions + appear in relevant sections of the manual. Users can also + define their own functions and operators, as described in + . The + psql commands \df and + \do can be used to list all + available functions and operators, respectively. + + + + The notation used throughout this chapter to describe the argument and + result data types of a function or operator is like this: + +repeat ( text, integer ) text + + which says that the function repeat takes one text and + one integer argument and returns a result of type text. The right arrow + is also used to indicate the result of an example, thus: + +repeat('Pg', 4) PgPgPgPg + + + + + If you are concerned about portability then note that most of + the functions and operators described in this chapter, with the + exception of the most trivial arithmetic and comparison operators + and some explicitly marked functions, are not specified by the + SQL standard. Some of this extended functionality + is present in other SQL database management + systems, and in many cases this functionality is compatible and + consistent between the various implementations. + + + +&func-logical; +&func-comparison; +&func-math; +&func-string; +&func-binarystring; +&func-bitstring; +&func-matching; +&func-formatting; +&func-datetime; +&func-enum; +&func-geometry; +&func-net; +&func-textsearch; +&func-uuid; +&func-xml; +&func-json; +&func-sequence; +&func-conditional; +&func-array; +&func-range; +&func-aggregate; +&func-window; +&func-merge-support; +&func-subquery; +&func-comparisons; +&func-srf; +&func-info; +&func-admin; +&func-trigger; +&func-event-triggers; +&func-statistics; + + diff --git a/doc/src/sgml/glossary.sgml b/doc/src/sgml/glossary.sgml index b88cac598e901..8651f0cdb9198 100644 --- a/doc/src/sgml/glossary.sgml +++ b/doc/src/sgml/glossary.sgml @@ -1419,11 +1419,15 @@ Relation - The generic term for all objects in a - database - that have a name and a list of - attributes - defined in a specific order. + Mathematically, a relation is a set of + tuples; + this is the sense meant in the term "relational database". + + + + In PostgreSQL, "relation" is commonly used to + mean an SQL object + that has a name and a list of attributes defined in a specific order. Tables, sequences, views, @@ -1431,15 +1435,14 @@ materialized views, composite types, and indexes are all relations. + A relation in this sense is a container or a descriptor for a set of tuples. + - More generically, a relation is a set of tuples; for example, - the result of a query is also a relation. - - - In PostgreSQL, - Class is an archaic synonym for - relation. + Class is an alternative but archaic term. + The system catalog + pg_class + holds an entry for each PostgreSQL relation. diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index 8e5da767c48b2..a4ad80a678211 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -71,10 +71,9 @@ - You need an ISO/ANSI C compiler (at least - C99-compliant). Recent - versions of GCC are recommended, but - PostgreSQL is known to build using a wide variety + You need a C compiler that supports at least C11. Recent versions of + GCC are recommended, but + PostgreSQL is known to build using a variety of compilers from different vendors. diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml index fcac55aefe665..9ccd5ec500601 100644 --- a/doc/src/sgml/logical-replication.sgml +++ b/doc/src/sgml/logical-replication.sgml @@ -542,8 +542,8 @@ it manually before the subscription can be activated. The steps to create the slot and activate the subscription are shown in the following examples. These examples specify the standard logical decoding output plugin - (pgoutput), which is what the built-in logical - replication uses. + (), + which is what the built-in logical replication uses.
First, create a publication for the examples to use. @@ -1804,11 +1804,27 @@ Publications:
+ + update_deleted + + + The tuple to be updated was concurrently deleted by another origin. The + update will simply be skipped in this scenario. Note that this conflict + can only be detected when + track_commit_timestamp + and retain_dead_tuples + are enabled. Note that if a tuple cannot be found due to the table being + truncated, only a update_missing conflict will + arise. Additionally, if the tuple was deleted by the same origin, an + update_missing conflict will arise. + + + update_missing - The tuple to be updated was not found. The update will simply be + The row to be updated was not found. The update will simply be skipped in this scenario. @@ -1829,7 +1845,7 @@ Publications: delete_missing - The tuple to be deleted was not found. The delete will simply be + The row to be deleted was not found. The delete will simply be skipped in this scenario. @@ -1863,8 +1879,8 @@ DETAIL: detailed_explanation. where detail_values is one of: Key (column_name , ...)=(column_value , ...) - existing local tuple (column_name , ...)=(column_value , ...) - remote tuple (column_name , ...)=(column_value , ...) + existing local row (column_name , ...)=(column_value , ...) + remote row (column_name , ...)=(column_value , ...) replica identity {(column_name , ...)=(column_value , ...) | full (column_name , ...)=(column_value , ...)} @@ -1898,32 +1914,32 @@ DETAIL: detailed_explanation. detailed_explanation includes the origin, transaction ID, and commit timestamp of the transaction that - modified the existing local tuple, if available. + modified the existing local row, if available. The Key section includes the key values of the local - tuple that violated a unique constraint for + row that violated a unique constraint for insert_exists, update_exists or multiple_unique_conflicts conflicts. - The existing local tuple section includes the local - tuple if its origin differs from the remote tuple for + The existing local row section includes the local + row if its origin differs from the remote row for update_origin_differs or delete_origin_differs - conflicts, or if the key value conflicts with the remote tuple for + conflicts, or if the key value conflicts with the remote row for insert_exists, update_exists or multiple_unique_conflicts conflicts. - The remote tuple section includes the new tuple from + The remote row section includes the new row from the remote insert or update operation that caused the conflict. Note that - for an update operation, the column value of the new tuple will be null + for an update operation, the column value of the new row will be null if the value is unchanged and toasted. @@ -1931,7 +1947,7 @@ DETAIL: detailed_explanation. The replica identity section includes the replica identity key values that were used to search for the existing local - tuple to be updated or deleted. This may include the full tuple value + row to be updated or deleted. This may include the full row value if the local relation is marked with REPLICA IDENTITY FULL. @@ -1939,7 +1955,7 @@ DETAIL: detailed_explanation. column_name is the column name. - For existing local tuple, remote tuple, + For existing local row, remote row, and replica identity full cases, column names are logged only if the user lacks the privilege to access all columns of the table. If column names are present, they appear in the same order @@ -1996,7 +2012,7 @@ DETAIL: detailed_explanation. ERROR: conflict detected on relation "public.test": conflict=insert_exists DETAIL: Key already exists in unique index "t_pkey", which was modified locally in transaction 740 at 2024-06-26 10:47:04.727375+08. -Key (c)=(1); existing local tuple (1, 'local'); remote tuple (1, 'remote'). +Key (c)=(1); existing local row (1, 'local'); remote row (1, 'remote'). CONTEXT: processing remote data for replication origin "pg_16395" during "INSERT" for replication target relation "public.test" in transaction 725 finished at 0/014C0378 The LSN of the transaction that contains the change violating the constraint and @@ -2157,8 +2173,8 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER implemented by walsender and apply processes. The walsender process starts logical decoding (described in ) of the WAL and loads the standard - logical decoding output plugin (pgoutput). The plugin - transforms the changes read + logical decoding output plugin (). + The plugin transforms the changes read from WAL to the logical replication protocol (see ) and filters the data according to the publication specification. The data is then continuously diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml index 593f784b69dcb..b803a819cf1f2 100644 --- a/doc/src/sgml/logicaldecoding.sgml +++ b/doc/src/sgml/logicaldecoding.sgml @@ -290,7 +290,7 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU A logical slot will emit each change just once in normal operation. The current position of each slot is persisted only at checkpoint, so in - the case of a crash the slot may return to an earlier LSN, which will + the case of a crash the slot might return to an earlier LSN, which will then cause recent changes to be sent again when the server restarts. Logical decoding clients are responsible for avoiding ill effects from handling the same message more than once. Clients may wish to record @@ -409,7 +409,7 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU should be used with caution. Unlike automatic synchronization, it does not include cyclic retries, making it more prone to synchronization failures, particularly during initial sync scenarios where the required WAL files - or catalog rows for the slot may have already been removed or are at risk + or catalog rows for the slot might have already been removed or are at risk of being removed on the standby. In contrast, automatic synchronization via sync_replication_slots provides continuous slot updates, enabling seamless failover and supporting high availability. @@ -420,18 +420,18 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU When slot synchronization is configured as recommended, and the initial synchronization is performed either automatically or - manually via pg_sync_replication_slot, the standby can persist the - synchronized slot only if the following condition is met: The logical - replication slot on the primary must retain WALs and system catalog - rows that are still available on the standby. This ensures data + manually via pg_sync_replication_slots, the standby + can persist the synchronized slot only if the following condition is met: + The logical replication slot on the primary must retain WALs and system + catalog rows that are still available on the standby. This ensures data integrity and allows logical replication to continue smoothly after promotion. If the required WALs or catalog rows have already been purged from the standby, the slot will not be persisted to avoid data loss. In such cases, the following log message may appear: - LOG: could not synchronize replication slot "failover_slot" - DETAIL: Synchronization could lead to data loss as the remote slot needs WAL at LSN 0/03003F28 and catalog xmin 754, but the standby has LSN 0/03003F28 and catalog xmin 756 +LOG: could not synchronize replication slot "failover_slot" +DETAIL: Synchronization could lead to data loss, because the remote slot needs WAL at LSN 0/03003F28 and catalog xmin 754, but the standby has LSN 0/03003F28 and catalog xmin 756. If the logical replication slot is actively used by a consumer, no manual intervention is needed; the slot will advance automatically, @@ -574,6 +574,170 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU Logical Decoding Output Plugins + + + PostgreSQL provides two logical decoding + output plugins, and + . You can also develop custom output plugins + (see for details). + + + + pgoutput — Standard Logical Decoding Output Plugin + + + pgoutput + + + + pgoutput is the standard logical decoding output + plugin provided by PostgreSQL. + It's used for the built-in + logical replication. + + + + Options + + + + proto_version (integer) + + + Specifies the protocol version. + Currently versions 1, 2, + 3, and 4 are supported. A valid + version is required. + + + Version 2 is supported on server version 14 + and above, and is required when streaming + is set to on to stream large in-progress + transactions. + + + Version 3 is supported on server version 15 + and above, and is required when two_phase + is enabled to stream two-phase commits. + + + Version 4 is supported on server version 16 + and above, and is required when streaming + is set to parallel to stream large in-progress + transactions to be applied in parallel. + + + + + + publication_names (string) + + + A comma-separated list of publication names to subscribe to. + The individual publication names are treated + as standard objects names and can be quoted the same as needed. + At least one publication name is required. + + + + + + binary (boolean) + + + Enables binary transfer mode. Binary mode is faster + than the text mode but slightly less robust. + The default is off. + + + + + + messages (boolean) + + + Enables sending the messages that are written by + pg_logical_emit_message. + The default is off. + + + + + + streaming (enum) + + + Enables streaming of in-progress transactions. Valid values are + off (the default), on and + parallel. + + + When set to off, pgoutput + fully decodes a transaction before sending it as a whole. + This mode works with any protocol version. + + + When set to on, pgoutput + streams large in-progress transactions. + This requires protocol version 2 or higher. + + + When set to parallel, pgoutput + streams large in-progress transactions and also sends + extra information in some messages to support parallel processing. + This requires protocol version 4 or higher. + + + + + + two_phase (boolean) + + + Enables sending two-phase transactions. + Minimum protocol version 3 is required to turn it on. + The default is off. + + + + + + origin (enum) + + + Specifies whether to send changes by their origin. Possible values are + none to only send the changes that have no origin + associated, or any + to send the changes regardless of their origin. This can be used + to avoid loops (infinite replication of the same data) among + replication nodes. + The default is any. + + + + + + + + + Notes + + + pgoutput produces binary output, + so functions expecting textual data ( + pg_logical_slot_peek_changes and + pg_logical_slot_get_changes) + cannot be used with it. Use + pg_logical_slot_peek_binary_changes or + pg_logical_slot_get_binary_changes + instead. + + + + + + + Writing Logical Decoding Output Plugins An example output plugin can be found in the diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 823afe1b30b22..3f4a27a736e27 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -2223,6 +2223,17 @@ description | Waiting for a newly initialized WAL file to reach durable storage
+ + + confl_update_deleted bigint + + + Number of times the tuple to be updated was concurrently deleted by + another source during the application of changes. See + for details about this conflict. + + + confl_update_missing bigint @@ -6780,6 +6791,16 @@ FROM pg_stat_get_backend_idset() AS backendid; advances when the phase is streaming database files. + + + + backup_type text + + + Backup type. Either full or + incremental. + + diff --git a/doc/src/sgml/pageinspect.sgml b/doc/src/sgml/pageinspect.sgml index 1292933366555..f5014787c783b 100644 --- a/doc/src/sgml/pageinspect.sgml +++ b/doc/src/sgml/pageinspect.sgml @@ -741,7 +741,7 @@ test=# SELECT first_tid, nbytes, tids[0:5] AS some_tids For example: test=# SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 2)); - lsn | nsn | rightlink | flags + lsn | nsn | rightlink | flags ------------+------------+-----------+-------- 0/0B5FE088 | 0/00000000 | 1 | {leaf} (1 row) diff --git a/doc/src/sgml/pgstatstatements.sgml b/doc/src/sgml/pgstatstatements.sgml index 7baa07dcdbf7f..d753de5836efb 100644 --- a/doc/src/sgml/pgstatstatements.sgml +++ b/doc/src/sgml/pgstatstatements.sgml @@ -554,6 +554,24 @@ + + + generic_plan_calls bigint + + + Number of times the statement has been executed using a generic plan + + + + + + custom_plan_calls bigint + + + Number of times the statement has been executed using a custom plan + + + stats_since timestamp with time zone diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml index cb065bf5f88db..27c4467ba7da3 100644 --- a/doc/src/sgml/plpython.sgml +++ b/doc/src/sgml/plpython.sgml @@ -662,6 +662,14 @@ $$ LANGUAGE plpython3u; in PL/Python + + PL/Python can be used to define trigger + functions. + PostgreSQL requires that a function that is to + be called as a trigger must be declared as a function with no arguments and + a return type of trigger. + + When a function is used as a trigger, the dictionary TD contains trigger-related values: @@ -769,6 +777,74 @@ $$ LANGUAGE plpython3u; + + Event Trigger Functions + + + event trigger + in PL/Python + + + + PL/Python can be used to define event triggers + (see also ). + PostgreSQL requires that a function that is to + be called as an event trigger must be declared as a function with no + arguments and a return type of event_trigger. + + + + When a function is used as an event trigger, the dictionary + TD contains trigger-related values: + + + + TD["event"] + + + The event the trigger was fired for, as a string, for example + ddl_command_start. + + + + + + TD["tag"] + + + The command tag for which the trigger was fired, as a string, for + example DROP TABLE. + + + + + + + + shows an example of an + event trigger function in PL/Python. + + + + A <application>PL/Python</application> Event Trigger Function + + + This example trigger simply raises a NOTICE message + each time a supported command is executed. + + + +CREATE OR REPLACE FUNCTION pysnitch() RETURNS event_trigger +LANGUAGE plpython3u +AS $$ + plpy.notice("TD[event] => " + TD["event"] + " ; TD[tag] => " + TD["tag"]); +$$; + +CREATE EVENT TRIGGER pysnitch ON ddl_command_start EXECUTE FUNCTION pysnitch(); + + + + Database Access diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index b115884acb346..b5395604fb8b7 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -537,6 +537,11 @@ The frontend should not respond to this message, but should continue listening for a ReadyForQuery message. + + The PostgreSQL server will always send this + message, but some third party backend implementations of the protocol + that don't support query cancellation are known not to. + @@ -2550,8 +2555,8 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" - - XLogData (B) + + WALData (B) @@ -2599,11 +2604,11 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" - A single WAL record is never split across two XLogData messages. + A single WAL record is never split across two WALData messages. When a WAL record crosses a WAL page boundary, and is therefore already split using continuation records, it can be split at the page boundary. In other words, the first main WAL record and its - continuation records can be sent in different XLogData messages. + continuation records can be sent in different WALData messages. @@ -2950,7 +2955,7 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" The name of an option passed to the slot's logical decoding output - plugin. See for + plugin. See for options that are accepted by the standard (pgoutput) plugin. @@ -3518,134 +3523,15 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" the physical streaming replication protocol. - - PostgreSQL logical decoding supports output - plugins. pgoutput is the standard one used for - the built-in logical replication. - - Logical Streaming Replication Parameters - Using the START_REPLICATION command, - pgoutput accepts the following options: - - - - - proto_version - - - - Protocol version. Currently versions 1, 2, - 3, and 4 are supported. A valid - version is required. - - - Version 2 is supported only for server version 14 - and above, and it allows streaming of large in-progress transactions. - - - Version 3 is supported only for server version 15 - and above, and it allows streaming of two-phase commits. - - - Version 4 is supported only for server version 16 - and above, and it allows streams of large in-progress transactions to - be applied in parallel. - - - - - - - publication_names - - - - Comma-separated list of publication names for which to subscribe - (receive changes). The individual publication names are treated - as standard objects names and can be quoted the same as needed. - At least one publication name is required. - - - - - - - binary - - - - Boolean option to use binary transfer mode. Binary mode is faster - than the text mode but slightly less robust. - The default is off. - - - - - - - messages - - - - Boolean option to enable sending the messages that are written - by pg_logical_emit_message. - The default is off. - - - - - - - streaming - - - - Option to enable streaming of in-progress transactions. Valid values are - off (the default), on and - parallel. The setting parallel - enables sending extra information with some messages to be used for - parallelization. Minimum protocol version 2 is required to turn it - on. Minimum protocol version 4 is required for the - parallel value. - - - - - - - two_phase - - - - Boolean option to enable two-phase transactions. Minimum protocol - version 3 is required to turn it on. - The default is off. - - - - - - - origin - - - - Option to send changes by their origin. Possible values are - none to only send the changes that have no origin - associated, or any - to send the changes regardless of their origin. This can be used - to avoid loops (infinite replication of the same data) among - replication nodes. - The default is any. - - - - - + The START_REPLICATION command can pass + options to the logical decoding output plugin associated + with the specified replication slot. + See for options + that are accepted by the standard (pgoutput) plugin. @@ -4250,7 +4136,7 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" message, indicated by the length field. - The maximum key length is 256 bytes. The + The minimum and maximum key length are 4 and 256 bytes, respectively. The PostgreSQL server only sends keys up to 32 bytes, but the larger maximum size allows for future server versions, as well as connection poolers and other middleware, to use diff --git a/doc/src/sgml/ref/alter_subscription.sgml b/doc/src/sgml/ref/alter_subscription.sgml index d48cdc76bd34d..12f72ba31675c 100644 --- a/doc/src/sgml/ref/alter_subscription.sgml +++ b/doc/src/sgml/ref/alter_subscription.sgml @@ -236,8 +236,9 @@ ALTER SUBSCRIPTION name RENAME TO < run_as_owner, origin, failover, - two_phase, and - retain_dead_tuples. + two_phase, + retain_dead_tuples, and + max_retention_duration. Only a superuser can set password_required = false. diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml index 1e4f26c13f650..8867da6c69307 100644 --- a/doc/src/sgml/ref/alter_table.sgml +++ b/doc/src/sgml/ref/alter_table.sgml @@ -210,6 +210,8 @@ WITH ( MODULUS numeric_literal, REM When this form is used, the column's statistics are removed, so running ANALYZE on the table afterwards is recommended. + For a virtual generated column, ANALYZE + is not necessary because such columns never have statistics. @@ -240,9 +242,10 @@ WITH ( MODULUS numeric_literal, REM provided none of the records in the table contain a NULL value for the column. Ordinarily this is checked during the ALTER TABLE by scanning the - entire table; however, if a valid CHECK constraint is - found which proves no NULL can exist, then the - table scan is skipped. + entire table, unless NOT VALID is specified; + however, if a valid CHECK constraint exists + (and is not dropped in the same command) which proves no + NULL can exist, then the table scan is skipped. If a column has an invalid not-null constraint, SET NOT NULL validates it. @@ -270,6 +273,15 @@ WITH ( MODULUS numeric_literal, REM in a stored generated column is rewritten and all the future changes will apply the new generation expression. + + + When this form is used on a stored generated column, its statistics + are removed, so running + ANALYZE + on the table afterwards is recommended. + For a virtual generated column, ANALYZE + is not necessary because such columns never have statistics. + diff --git a/doc/src/sgml/ref/create_subscription.sgml b/doc/src/sgml/ref/create_subscription.sgml index b8cd15f32806b..fc3144373110f 100644 --- a/doc/src/sgml/ref/create_subscription.sgml +++ b/doc/src/sgml/ref/create_subscription.sgml @@ -445,10 +445,11 @@ CREATE SUBSCRIPTION subscription_namefalse. - If set to true, a physical replication slot named - pg_conflict_detection will be - created on the subscriber to prevent the conflict information from - being removed. + If set to true, the detection of + is enabled, and a physical + replication slot named pg_conflict_detection + is created on the subscriber to prevent the information for detecting + conflicts from being removed. @@ -520,6 +521,47 @@ CREATE SUBSCRIPTION subscription_name + + + max_retention_duration (integer) + + + Maximum duration in milliseconds for which this subscription's apply worker + is allowed to retain the information useful for conflict detection when + retain_dead_tuples is enabled. The default value + is 0, indicating that the information is retained + until it is no longer needed for detection purposes. + + + The information useful for conflict detection is no longer retained if + all apply workers associated with the subscriptions, where + retain_dead_tuples is enabled, confirm that the + retention duration has exceeded the + max_retention_duration set within the corresponding + subscription. The retention will not be automatically resumed unless a + new subscription is created with retain_dead_tuples = + true, or the user manually re-enables + retain_dead_tuples. + + + Note that overall retention will not stop if other subscriptions that + have a value greater than 0 for this parameter have not exceeded it, + or if they set this option to 0. + + + This option is effective only when + retain_conflict_info is enabled and the apply + worker associated with the subscription is active. + + + + Note that setting a non-zero value for this option could lead to + information for conflict detection being removed prematurely, + potentially resulting in incorrect conflict detection. + + + + diff --git a/doc/src/sgml/ref/create_trigger.sgml b/doc/src/sgml/ref/create_trigger.sgml index ed6d206ae7143..0d8d463479bc1 100644 --- a/doc/src/sgml/ref/create_trigger.sgml +++ b/doc/src/sgml/ref/create_trigger.sgml @@ -197,9 +197,11 @@ CREATE [ OR REPLACE ] [ CONSTRAINT ] TRIGGER name of the rows inserted, deleted, or modified by the current SQL statement. This feature lets the trigger see a global view of what the statement did, not just one row at a time. This option is only allowed for - an AFTER trigger that is not a constraint trigger; also, if - the trigger is an UPDATE trigger, it must not specify - a column_name list. + an AFTER trigger on a plain table (not a foreign table). + The trigger should not be a constraint trigger. Also, if the trigger is + an UPDATE trigger, it must not specify + a column_name list when using + this option. OLD TABLE may only be specified once, and only for a trigger that can fire on UPDATE or DELETE; it creates a transition relation containing the before-images of all rows diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index 2ae084b5fa6fc..fd4ecf01a0a00 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -96,6 +96,18 @@ PostgreSQL documentation light of the limitations listed below. + + + Restoring a dump causes the destination to execute arbitrary code of the + source superusers' choice. Partial dumps and partial restores do not limit + that. If the source superusers are not trusted, the dumped SQL statements + must be inspected before restoring. Non-plain-text dumps can be inspected + by using pg_restore's + option. Note that the client running the dump and restore need not trust + the source or destination superusers. + + + @@ -1252,6 +1264,29 @@ PostgreSQL documentation + + + + + Use the provided string as the psql + \restrict key in the dump output. This can only be + specified for plain-text dumps, i.e., when is + set to plain or the option + is omitted. If no restrict key is specified, + pg_dump will generate a random one as + needed. Keys may contain only alphanumeric characters. + + + This option is primarily intended for testing purposes and other + scenarios that require repeatable output (e.g., comparing dump files). + It is not recommended for general use, as a malicious server with + advance knowledge of the key may be able to inject arbitrary code that + will be executed on the machine that runs + psql with the dump output. + + + + @@ -1354,6 +1389,15 @@ PostgreSQL documentation + + + + + Dump statistics. + + + + @@ -1440,33 +1484,6 @@ PostgreSQL documentation - - - - - Dump data. This is the default. - - - - - - - - - Dump schema (data definitions). This is the default. - - - - - - - - - Dump statistics. - - - - @@ -1682,7 +1699,7 @@ CREATE DATABASE foo WITH TEMPLATE template0; - If is specified, + If is specified, pg_dump will include most optimizer statistics in the resulting dump file. However, some statistics may not be included, such as those created explicitly with or diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index 8ca68da5a5560..9f639f61db021 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -16,10 +16,7 @@ PostgreSQL documentation pg_dumpall - - - export a PostgreSQL database cluster as an SQL script or to other formats - + extract a PostgreSQL database cluster into a script file @@ -36,7 +33,7 @@ PostgreSQL documentation pg_dumpall is a utility for writing out (dumping) all PostgreSQL databases - of a cluster into an SQL script file or an archive. The output contains + of a cluster into one script file. The script file contains SQL commands that can be used as input to to restore the databases. It does this by calling for each database in the cluster. @@ -55,16 +52,11 @@ PostgreSQL documentation - Plain text SQL scripts will be written to the standard output. Use the + The SQL script will be written to the standard output. Use the / option or shell operators to redirect it into a file. - - Archives in other formats will be placed in a directory named using the - /, which is required in this case. - - pg_dumpall needs to connect several times to the PostgreSQL server (once per @@ -74,6 +66,16 @@ PostgreSQL documentation linkend="libpq-pgpass"/> for more information. + + + Restoring a dump causes the destination to execute arbitrary code of the + source superusers' choice. Partial dumps and partial restores do not limit + that. If the source superusers are not trusted, the dumped SQL statements + must be inspected before restoring. Note that the client running the dump + and restore need not trust the source or destination superusers. + + + @@ -129,85 +131,10 @@ PostgreSQL documentation Send output to the specified file. If this is omitted, the standard output is used. - Note: This option can only be omitted when is plain - - - - - - Specify the format of dump files. In plain format, all the dump data is - sent in a single text stream. This is the default. - - In all other modes, pg_dumpall first creates two files: - global.dat and map.dat, in the directory - specified by . - The first file contains global data, such as roles and tablespaces. The second - contains a mapping between database oids and names. These files are used by - pg_restore. Data for individual databases is placed in - databases subdirectory, named using the database's oid. - - - - d - directory - - - Output directory-format archives for each database, - suitable for input into pg_restore. The directory - will have database oid as its name. - - - - - - p - plain - - - Output a plain-text SQL script file (the default). - - - - - - c - custom - - - Output a custom-format archive for each database, - suitable for input into pg_restore. The archive - will be named dboid.dmp where dboid is the - oid of the database. - - - - - - t - tar - - - Output a tar-format archive for each database, - suitable for input into pg_restore. The archive - will be named dboid.tar where dboid is the - oid of the database. - - - - - - - Note: see for details - of how the various non plain text archives work. - - - - - @@ -674,6 +601,26 @@ exclude database PATTERN + + + + + Use the provided string as the psql + \restrict key in the dump output. If no restrict + key is specified, pg_dumpall will generate a + random one as needed. Keys may contain only alphanumeric characters. + + + This option is primarily intended for testing purposes and other + scenarios that require repeatable output (e.g., comparing dump files). + It is not recommended for general use, as a malicious server with + advance knowledge of the key may be able to inject arbitrary code that + will be executed on the machine that runs + psql with the dump output. + + + + @@ -688,6 +635,15 @@ exclude database PATTERN + + + + + Dump statistics. + + + + @@ -723,33 +679,6 @@ exclude database PATTERN - - - - - Dump data. This is the default. - - - - - - - - - Dump schema (data definitions). This is the default. - - - - - - - - - Dump statistics. - - - - @@ -961,7 +890,7 @@ exclude database PATTERN - If is specified, + If is specified, pg_dumpall will include most optimizer statistics in the resulting dump file. However, some statistics may not be included, such as those created explicitly with or diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index b649bd3a5ae0f..a468a38361a13 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -18,9 +18,8 @@ PostgreSQL documentation pg_restore - restore PostgreSQL databases from archives - created by pg_dump or - pg_dumpall + restore a PostgreSQL database from an + archive file created by pg_dump @@ -39,14 +38,13 @@ PostgreSQL documentation pg_restore is a utility for restoring a - PostgreSQL database or cluster from an archive - created by or - in one of the non-plain-text + PostgreSQL database from an archive + created by in one of the non-plain-text formats. It will issue the commands necessary to reconstruct the - database or cluster to the state it was in at the time it was saved. The - archives also allow pg_restore to + database to the state it was in at the time it was saved. The + archive files also allow pg_restore to be selective about what is restored, or even to reorder the items - prior to being restored. The archive formats are designed to be + prior to being restored. The archive files are designed to be portable across architectures. @@ -54,17 +52,10 @@ PostgreSQL documentation pg_restore can operate in two modes. If a database name is specified, pg_restore connects to that database and restores archive contents directly into - the database. - When restoring from a dump made by pg_dumpall, - each database will be created and then the restoration will be run in that - database. - - Otherwise, when a database name is not specified, a script containing the SQL - commands necessary to rebuild the database or cluster is created and written + the database. Otherwise, a script containing the SQL + commands necessary to rebuild the database is created and written to a file or standard output. This script output is equivalent to - the plain text output format of pg_dump or - pg_dumpall. - + the plain text output format of pg_dump. Some of the options controlling the output are therefore analogous to pg_dump options. @@ -77,6 +68,18 @@ PostgreSQL documentation pg_restore will not be able to load the data using COPY statements. + + + + Restoring a dump causes the destination to execute arbitrary code of the + source superusers' choice. Partial dumps and partial restores do not limit + that. If the source superusers are not trusted, the dumped SQL statements + must be inspected before restoring. Non-plain-text dumps can be inspected + by using pg_restore's + option. Note that the client running the dump and restore need not trust + the source or destination superusers. + + @@ -149,8 +152,6 @@ PostgreSQL documentation commands that mention this database. Access privileges for the database itself are also restored, unless is specified. - is required when restoring multiple databases - from an archive created by pg_dumpall. @@ -246,19 +247,6 @@ PostgreSQL documentation - - - - - - Restore only global objects (roles and tablespaces), no databases. - - - This option is only relevant when restoring from an archive made using pg_dumpall. - - - - @@ -603,28 +591,6 @@ PostgreSQL documentation - - - - - Do not restore databases whose name matches - pattern. - Multiple patterns can be excluded by writing multiple - switches. The - pattern parameter is - interpreted as a pattern according to the same rules used by - psql's \d - commands (see ), - so multiple databases can also be excluded by writing wildcard - characters in the pattern. When using wildcards, be careful to - quote the pattern if needed to prevent shell wildcard expansion. - - - This option is only relevant when restoring from an archive made using pg_dumpall. - - - - @@ -842,6 +808,28 @@ PostgreSQL documentation + + + + + Use the provided string as the psql + \restrict key in the dump output. This can only be + specified for SQL script output, i.e., when the + option is used. If no restrict key is specified, + pg_restore will generate a random one as + needed. Keys may contain only alphanumeric characters. + + + This option is primarily intended for testing purposes and other + scenarios that require repeatable output (e.g., comparing dump files). + It is not recommended for general use, as a malicious server with + advance knowledge of the key may be able to inject arbitrary code that + will be executed on the machine that runs + psql with the dump output. + + + + @@ -861,6 +849,16 @@ PostgreSQL documentation + + + + + Output commands to restore statistics, if the archive contains them. + This is the default. + + + + @@ -919,36 +917,6 @@ PostgreSQL documentation - - - - - Output commands to restore data, if the archive contains them. - This is the default. - - - - - - - - - Output commands to restore schema (data definitions), if the archive - contains them. This is the default. - - - - - - - - - Output commands to restore statistics, if the archive contains them. - This is the default. - - - - diff --git a/doc/src/sgml/ref/pgupgrade.sgml b/doc/src/sgml/ref/pgupgrade.sgml index 5ddf3a8ae9257..356baa912991d 100644 --- a/doc/src/sgml/ref/pgupgrade.sgml +++ b/doc/src/sgml/ref/pgupgrade.sgml @@ -70,6 +70,14 @@ PostgreSQL documentation pg_upgrade supports upgrades from 9.2.X and later to the current major release of PostgreSQL, including snapshot and beta releases. + + + + Upgrading a cluster causes the destination to execute arbitrary code of the + source superusers' choice. Ensure that the source superusers are trusted + before upgrading. + + diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index 4f7b11175c671..1a339600bc48f 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -3551,6 +3551,24 @@ SELECT $1 \parse stmt1 + + \restrict restrict_key + + + Enter "restricted" mode with the provided key. In this mode, the only + allowed meta-command is \unrestrict, to exit + restricted mode. The key may contain only alphanumeric characters. + + + This command is primarily intended for use in plain-text dumps + generated by pg_dump, + pg_dumpall, and + pg_restore, but it may be useful elsewhere. + + + + + \s [ filename ] @@ -3802,6 +3820,24 @@ SELECT 1 \bind \sendpipeline + + \unrestrict restrict_key + + + Exit "restricted" mode (i.e., where all other meta-commands are + blocked), provided the specified key matches the one given to + \restrict when restricted mode was entered. + + + This command is primarily intended for use in plain-text dumps + generated by pg_dump, + pg_dumpall, and + pg_restore, but it may be useful elsewhere. + + + + + \unset name diff --git a/doc/src/sgml/ref/vacuumdb.sgml b/doc/src/sgml/ref/vacuumdb.sgml index b0680a61814cc..84c76d7350c83 100644 --- a/doc/src/sgml/ref/vacuumdb.sgml +++ b/doc/src/sgml/ref/vacuumdb.sgml @@ -282,14 +282,24 @@ PostgreSQL documentation Only analyze relations that are missing statistics for a column, index - expression, or extended statistics object. This option prevents - vacuumdb from deleting existing statistics - so that the query optimizer's choices do not become transiently worse. + expression, or extended statistics object. When used with + , this option prevents + vacuumdb from temporarily replacing existing + statistics with ones generated with lower statistics targets, thus + avoiding transiently worse query optimizer choices. This option can only be used in conjunction with or . + + Note that requires + SELECT privileges on + pg_statistic + and + pg_statistic_ext_data, + which are restricted to superusers by default. + @@ -395,6 +405,15 @@ PostgreSQL documentation Multiple tables can be vacuumed by writing multiple switches. + + If no tables are specified with the option, + vacuumdb will clean all regular tables + and materialized views in the connected database. + If or + is also specified, + it will analyze all regular tables, partitioned tables, + and materialized views (but not foreign tables). + If you specify columns, you probably have to escape the parentheses diff --git a/doc/src/sgml/regress.sgml b/doc/src/sgml/regress.sgml index bf4ffb3057636..8838fe7f0225f 100644 --- a/doc/src/sgml/regress.sgml +++ b/doc/src/sgml/regress.sgml @@ -285,75 +285,88 @@ make check-world PG_TEST_EXTRA='kerberos ldap ssl load_balance libpq_encryption' - sepgsql + libpq_encryption - Runs the test suite under contrib/sepgsql. This - requires an SELinux environment that is set up in a specific way; see - . + Runs the test src/interfaces/libpq/t/005_negotiate_encryption.pl. + This opens TCP/IP listen sockets. If PG_TEST_EXTRA + also includes kerberos, additional tests that require + an MIT Kerberos installation are enabled. - ssl + load_balance - Runs the test suite under src/test/ssl. This opens TCP/IP listen sockets. + Runs the test src/interfaces/libpq/t/004_load_balance_dns.pl. + This requires editing the system hosts file and + opens TCP/IP listen sockets. - load_balance + oauth - Runs the test src/interfaces/libpq/t/004_load_balance_dns.pl. - This requires editing the system hosts file and - opens TCP/IP listen sockets. + Runs the test suite under src/test/modules/oauth_validator. + This opens TCP/IP listen sockets for a test server running HTTPS. - libpq_encryption + regress_dump_restore - Runs the test src/interfaces/libpq/t/005_negotiate_encryption.pl. - This opens TCP/IP listen sockets. If PG_TEST_EXTRA - also includes kerberos, additional tests that require - an MIT Kerberos installation are enabled. + Runs an additional test suite in + src/bin/pg_upgrade/t/002_pg_upgrade.pl which + cycles the regression database through pg_dump/ + pg_restore. Not enabled by default because it + is resource intensive. - wal_consistency_checking + sepgsql - Uses wal_consistency_checking=all while running - certain tests under src/test/recovery. Not - enabled by default because it is resource intensive. + Runs the test suite under contrib/sepgsql. This + requires an SELinux environment that is set up in a specific way; see + . - xid_wraparound + ssl - Runs the test suite under src/test/modules/xid_wraparound. - Not enabled by default because it is resource intensive. + Runs the test suite under src/test/ssl. This opens TCP/IP listen sockets. - oauth + wal_consistency_checking - Runs the test suite under src/test/modules/oauth_validator. - This opens TCP/IP listen sockets for a test server running HTTPS. + Uses wal_consistency_checking=all while running + certain tests under src/test/recovery. Not + enabled by default because it is resource intensive. + + + + + + xid_wraparound + + + Runs the test suite under src/test/modules/xid_wraparound. + Not enabled by default because it is resource intensive. diff --git a/doc/src/sgml/rules.sgml b/doc/src/sgml/rules.sgml index 8467d961fd0a0..282dcd722d495 100644 --- a/doc/src/sgml/rules.sgml +++ b/doc/src/sgml/rules.sgml @@ -60,6 +60,7 @@ SQL statement where the single parts that it is built from are stored separately. These query trees can be shown in the server log if you set the configuration parameters + debug_print_raw_parse, debug_print_parse, debug_print_rewritten, or debug_print_plan. The rule actions are also diff --git a/doc/src/sgml/sources.sgml b/doc/src/sgml/sources.sgml index fa68d4d024a93..261f19b3534b5 100644 --- a/doc/src/sgml/sources.sgml +++ b/doc/src/sgml/sources.sgml @@ -907,12 +907,12 @@ BETTER: unrecognized node type: 42 C Standard Code in PostgreSQL should only rely on language - features available in the C99 standard. That means a conforming - C99 compiler has to be able to compile postgres, at least aside + features available in the C11 standard. That means a conforming + C11 compiler has to be able to compile postgres, at least aside from a few platform dependent pieces. - A few features included in the C99 standard are, at this time, not + A few features included in the C11 standard are, at this time, not permitted to be used in core PostgreSQL code. This currently includes variable length arrays, intermingled declarations and code, // comments, universal @@ -924,13 +924,11 @@ BETTER: unrecognized node type: 42 features can be used, if a fallback is provided. - For example _Static_assert() and + For example typeof() and __builtin_constant_p are currently used, even though they are from newer revisions of the C standard and a GCC extension respectively. If not available - we respectively fall back to using a C99 compatible replacement that - performs the same checks, but emits rather cryptic messages and do not - use __builtin_constant_p. + we do not use them. diff --git a/doc/src/sgml/storage.sgml b/doc/src/sgml/storage.sgml index 61250799ec076..02ddfda834a2e 100644 --- a/doc/src/sgml/storage.sgml +++ b/doc/src/sgml/storage.sgml @@ -39,6 +39,8 @@ these required items, the cluster configuration files Contents of <varname>PGDATA</varname> + + @@ -743,6 +745,8 @@ There are five parts to each page. Overall Page Layout Page Layout + + diff --git a/doc/src/sgml/stylesheet-fo.xsl b/doc/src/sgml/stylesheet-fo.xsl index e7916a6a88347..aec6de7064a7b 100644 --- a/doc/src/sgml/stylesheet-fo.xsl +++ b/doc/src/sgml/stylesheet-fo.xsl @@ -14,24 +14,11 @@ 3 - - - - - - - - - - - 1.5em +0 +0 @@ -42,6 +29,8 @@ an "Unresolved ID reference found" warning during PDF builds. solid 1pt black + 0.25in + 0.25in 12pt 12pt 6pt @@ -415,5 +404,21 @@ an "Unresolved ID reference found" warning during PDF builds. + + + + + + + + + + + + diff --git a/doc/src/sgml/xfunc.sgml b/doc/src/sgml/xfunc.sgml index 2d81afce8cb9b..04bf919b34384 100644 --- a/doc/src/sgml/xfunc.sgml +++ b/doc/src/sgml/xfunc.sgml @@ -2051,8 +2051,7 @@ PG_MODULE_MAGIC_EXT( - By-value types can only be 1, 2, or 4 bytes in length - (also 8 bytes, if sizeof(Datum) is 8 on your machine). + By-value types can only be 1, 2, 4, or 8 bytes in length. You should be careful to define your types such that they will be the same size (in bytes) on all architectures. For example, the long type is dangerous because it is 4 bytes on some @@ -2165,7 +2164,7 @@ memcpy(destination->data, buffer, 40); it's considered good style to use the macro VARHDRSZ to refer to the size of the overhead for a variable-length type. Also, the length field must be set using the - SET_VARSIZE macro, not by simple assignment. + SET_VARSIZE function, not by simple assignment. @@ -3669,11 +3668,14 @@ LWLockRelease(AddinShmemInitLock); shmem_startup_hook provides a convenient place for the initialization code, but it is not strictly required that all such code - be placed in this hook. Each backend will execute the registered - shmem_startup_hook shortly after it attaches to shared - memory. Note that add-ins should still acquire + be placed in this hook. On Windows (and anywhere else where + EXEC_BACKEND is defined), each backend executes the + registered shmem_startup_hook shortly after it + attaches to shared memory, so add-ins should still acquire AddinShmemInitLock within this hook, as shown in the - example above. + example above. On other platforms, only the postmaster process executes + the shmem_startup_hook, and each backend automatically + inherits the pointers to shared memory. @@ -3760,7 +3762,7 @@ LWLockPadded *GetNamedLWLockTranche(const char *tranche_name) shmem_request_hook. To do so, first allocate a tranche_id by calling: -int LWLockNewTrancheId(void) +int LWLockNewTrancheId(const char *name) Next, initialize each LWLock, passing the new tranche_id as an argument: @@ -3778,17 +3780,8 @@ void LWLockInitialize(LWLock *lock, int tranche_id) - Finally, each backend using the tranche_id should - associate it with a tranche_name by calling: - -void LWLockRegisterTranche(int tranche_id, const char *tranche_name) - - - - - A complete usage example of LWLockNewTrancheId, - LWLockInitialize, and - LWLockRegisterTranche can be found in + A complete usage example of LWLockNewTrancheId and + LWLockInitialize can be found in contrib/pg_prewarm/autoprewarm.c in the PostgreSQL source tree. diff --git a/meson.build b/meson.build index 5365aaf95e64b..ab8101d67b26d 100644 --- a/meson.build +++ b/meson.build @@ -280,10 +280,6 @@ elif host_system == 'windows' # define before including for getting localtime_r() etc. on MinGW cppflags += '-D_POSIX_C_SOURCE' endif - if cc.get_id() == 'msvc' - # required for VA_ARGS_NARGS() in c.h; requires VS 2019 - cppflags += '/Zc:preprocessor' - endif export_file_format = 'win' export_file_suffix = 'def' @@ -550,6 +546,33 @@ dir_doc_extension = dir_doc / 'extension' # used, they need to be added to test_c_args as well. ############################################################### +# Do we need an option to enable C11? +c11_test = ''' +#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L +# error "Compiler does not advertise C11 conformance" +#endif +''' + +if not cc.compiles(c11_test, name: 'C11') + c11_ok = false + if cc.get_id() == 'msvc' + c11_test_args = ['/std:c11'] + else + c11_test_args = ['-std=gnu11', '-std=c11'] + endif + foreach arg : c11_test_args + if cc.compiles(c11_test, name: 'C11 with @0@'.format(arg), args: [arg]) + c11_ok = true + cflags += arg + break + endif + endforeach + if not c11_ok + error('C compiler does not support C11') + endif +endif + + postgres_inc = [include_directories(postgres_inc_d)] test_lib_d = postgres_lib_d test_c_args = cppflags + cflags @@ -1704,49 +1727,6 @@ endif # Compiler tests ############################################################### -# Do we need -std=c99 to compile C99 code? We don't want to add -std=c99 -# unnecessarily, because we optionally rely on newer features. -c99_test = ''' -#include -#include -#include -#include - -struct named_init_test { - int a; - int b; -}; - -extern void structfunc(struct named_init_test); - -int main(int argc, char **argv) -{ - struct named_init_test nit = { - .a = 3, - .b = 5, - }; - - for (int loop_var = 0; loop_var < 3; loop_var++) - { - nit.a += nit.b; - } - - structfunc((struct named_init_test){1, 0}); - - return nit.a != 0; -} -''' - -if not cc.compiles(c99_test, name: 'c99', args: test_c_args) - if cc.compiles(c99_test, name: 'c99 with -std=c99', - args: test_c_args + ['-std=c99']) - test_c_args += '-std=c99' - cflags += '-std=c99' - else - error('C compiler does not support C99') - endif -endif - if host_machine.endian() == 'big' cdata.set('WORDS_BIGENDIAN', 1) endif @@ -1996,10 +1976,7 @@ if cc.links(''' cdata.set('HAVE__BUILTIN_OP_OVERFLOW', 1) endif - -# XXX: The configure.ac check for __cpuid() is broken, we don't copy that -# here. To prevent problems due to two detection methods working, stop -# checking after one. +# Check for __get_cpuid() and __cpuid(). if cc.links(''' #include int main(int arg, char **argv) @@ -3136,6 +3113,8 @@ gen_export_kwargs = { 'install': false, } +# command to create stamp files on all OSs +stamp_cmd = [python, '-c', 'import sys; open(sys.argv[1], "w")', '@OUTPUT0@'] ### @@ -3253,14 +3232,14 @@ subdir('src/port') frontend_common_code = declare_dependency( compile_args: ['-DFRONTEND'], include_directories: [postgres_inc], - sources: generated_headers, + sources: generated_headers_stamp, dependencies: [os_deps, zlib, zstd, lz4], ) backend_common_code = declare_dependency( compile_args: ['-DBUILDING_DLL'], include_directories: [postgres_inc], - sources: generated_headers, + sources: generated_headers_stamp, dependencies: [os_deps, zlib, zstd], ) @@ -3275,7 +3254,7 @@ shlib_code = declare_dependency( frontend_stlib_code = declare_dependency( include_directories: [postgres_inc], link_with: [common_static, pgport_static], - sources: generated_headers, + sources: generated_headers_stamp, dependencies: [os_deps, libintl], ) @@ -3283,7 +3262,7 @@ frontend_stlib_code = declare_dependency( frontend_shlib_code = declare_dependency( include_directories: [postgres_inc], link_with: [common_shlib, pgport_shlib], - sources: generated_headers, + sources: generated_headers_stamp, dependencies: [shlib_code, os_deps, libintl], ) @@ -3293,7 +3272,7 @@ frontend_shlib_code = declare_dependency( frontend_no_fe_utils_code = declare_dependency( include_directories: [postgres_inc], link_with: [common_static, pgport_static], - sources: generated_headers, + sources: generated_headers_stamp, dependencies: [os_deps, libintl], ) @@ -3308,6 +3287,7 @@ libpq_deps += [ ] libpq_oauth_deps += [ + thread_dep, libcurl, ] @@ -3320,7 +3300,7 @@ subdir('src/interfaces/libpq-oauth') frontend_code = declare_dependency( include_directories: [postgres_inc], link_with: [fe_utils, common_static, pgport_static], - sources: generated_headers, + sources: generated_headers_stamp, dependencies: [os_deps, libintl], ) @@ -3350,7 +3330,7 @@ backend_code = declare_dependency( include_directories: [postgres_inc], link_args: ldflags_be, link_with: [], - sources: generated_headers + generated_backend_headers, + sources: generated_backend_headers_stamp, dependencies: os_deps + backend_both_deps + backend_deps, ) @@ -3475,6 +3455,13 @@ installed_targets = [ ecpg_targets, ] +if oauth_flow_supported + installed_targets += [ + libpq_oauth_so, + libpq_oauth_st, + ] +endif + # all targets that require building code all_built = [ installed_targets, diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 04952b533ded9..8b1b357beaa04 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -254,7 +254,7 @@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ PG_SYSROOT = @PG_SYSROOT@ -override CPPFLAGS := $(ICU_CFLAGS) $(LIBNUMA_CFLAGS) $(LIBURING_CFLAGS) $(CPPFLAGS) +override CPPFLAGS += $(ICU_CFLAGS) $(LIBNUMA_CFLAGS) $(LIBURING_CFLAGS) ifdef PGXS override CPPFLAGS := -I$(includedir_server) -I$(includedir_internal) $(CPPFLAGS) diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index 4204088fa0d7d..7ff7467e462b0 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -1608,7 +1608,7 @@ brin_build_desc(Relation rel) opcInfoFn = index_getprocinfo(rel, keyno + 1, BRIN_PROCNUM_OPCINFO); opcinfo[keyno] = (BrinOpcInfo *) - DatumGetPointer(FunctionCall1(opcInfoFn, attr->atttypid)); + DatumGetPointer(FunctionCall1(opcInfoFn, ObjectIdGetDatum(attr->atttypid))); totalstored += opcinfo[keyno]->oi_nstored; } @@ -2262,7 +2262,7 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup, PointerGetDatum(bdesc), PointerGetDatum(bval), values[keyno], - nulls[keyno]); + BoolGetDatum(nulls[keyno])); /* if that returned true, we need to insert the updated tuple */ modified |= DatumGetBool(result); diff --git a/src/backend/access/brin/brin_bloom.c b/src/backend/access/brin/brin_bloom.c index 82b425ce37daa..7c3f7d454fc25 100644 --- a/src/backend/access/brin/brin_bloom.c +++ b/src/backend/access/brin/brin_bloom.c @@ -540,7 +540,7 @@ brin_bloom_add_value(PG_FUNCTION_ARGS) BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0); BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1); Datum newval = PG_GETARG_DATUM(2); - bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_DATUM(3); + bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_BOOL(3); BloomOptions *opts = (BloomOptions *) PG_GET_OPCLASS_OPTIONS(); Oid colloid = PG_GET_COLLATION(); FmgrInfo *hashFn; diff --git a/src/backend/access/brin/brin_minmax.c b/src/backend/access/brin/brin_minmax.c index d21ab3a668cce..79c5a0aa18578 100644 --- a/src/backend/access/brin/brin_minmax.c +++ b/src/backend/access/brin/brin_minmax.c @@ -66,7 +66,7 @@ brin_minmax_add_value(PG_FUNCTION_ARGS) BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0); BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1); Datum newval = PG_GETARG_DATUM(2); - bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_DATUM(3); + bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_BOOL(3); Oid colloid = PG_GET_COLLATION(); FmgrInfo *cmpFn; Datum compar; @@ -225,8 +225,8 @@ brin_minmax_union(PG_FUNCTION_ARGS) /* Adjust minimum, if B's min is less than A's min */ finfo = minmax_get_strategy_procinfo(bdesc, attno, attr->atttypid, BTLessStrategyNumber); - needsadj = FunctionCall2Coll(finfo, colloid, col_b->bv_values[0], - col_a->bv_values[0]); + needsadj = DatumGetBool(FunctionCall2Coll(finfo, colloid, col_b->bv_values[0], + col_a->bv_values[0])); if (needsadj) { if (!attr->attbyval) @@ -238,8 +238,8 @@ brin_minmax_union(PG_FUNCTION_ARGS) /* Adjust maximum, if B's max is greater than A's max */ finfo = minmax_get_strategy_procinfo(bdesc, attno, attr->atttypid, BTGreaterStrategyNumber); - needsadj = FunctionCall2Coll(finfo, colloid, col_b->bv_values[1], - col_a->bv_values[1]); + needsadj = DatumGetBool(FunctionCall2Coll(finfo, colloid, col_b->bv_values[1], + col_a->bv_values[1])); if (needsadj) { if (!attr->attbyval) diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c index 0d1507a2a3624..c87f1b9cd7eb2 100644 --- a/src/backend/access/brin/brin_minmax_multi.c +++ b/src/backend/access/brin/brin_minmax_multi.c @@ -624,7 +624,7 @@ brin_range_serialize(Ranges *range) for (i = 0; i < nvalues; i++) { - len += VARSIZE_ANY(range->values[i]); + len += VARSIZE_ANY(DatumGetPointer(range->values[i])); } } else if (typlen == -2) /* cstring */ @@ -1992,8 +1992,8 @@ brin_minmax_multi_distance_tid(PG_FUNCTION_ARGS) double da1, da2; - ItemPointer pa1 = (ItemPointer) PG_GETARG_DATUM(0); - ItemPointer pa2 = (ItemPointer) PG_GETARG_DATUM(1); + ItemPointer pa1 = (ItemPointer) PG_GETARG_POINTER(0); + ItemPointer pa2 = (ItemPointer) PG_GETARG_POINTER(1); /* * We know the values are range boundaries, but the range may be collapsed @@ -2032,7 +2032,7 @@ brin_minmax_multi_distance_numeric(PG_FUNCTION_ARGS) d = DirectFunctionCall2(numeric_sub, a2, a1); /* a2 - a1 */ - PG_RETURN_FLOAT8(DirectFunctionCall1(numeric_float8, d)); + PG_RETURN_DATUM(DirectFunctionCall1(numeric_float8, d)); } /* @@ -2414,7 +2414,7 @@ brin_minmax_multi_add_value(PG_FUNCTION_ARGS) BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0); BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1); Datum newval = PG_GETARG_DATUM(2); - bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_DATUM(3); + bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_BOOL(3); MinMaxMultiOptions *opts = (MinMaxMultiOptions *) PG_GET_OPCLASS_OPTIONS(); Oid colloid = PG_GET_COLLATION(); bool modified = false; diff --git a/src/backend/access/brin/brin_xlog.c b/src/backend/access/brin/brin_xlog.c index 85db2f0fd5ace..55348140fad3b 100644 --- a/src/backend/access/brin/brin_xlog.c +++ b/src/backend/access/brin/brin_xlog.c @@ -31,7 +31,7 @@ brin_xlog_createidx(XLogReaderState *record) /* create the index' metapage */ buf = XLogInitBufferForRedo(record, 0); Assert(BufferIsValid(buf)); - page = (Page) BufferGetPage(buf); + page = BufferGetPage(buf); brin_metapage_init(page, xlrec->pagesPerRange, xlrec->version); PageSetLSN(page, lsn); MarkBufferDirty(buf); @@ -82,7 +82,7 @@ brin_xlog_insert_update(XLogReaderState *record, Assert(tuple->bt_blkno == xlrec->heapBlk); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); offnum = xlrec->offnum; if (PageGetMaxOffsetNumber(page) + 1 < offnum) elog(PANIC, "brin_xlog_insert_update: invalid max offset number"); @@ -104,7 +104,7 @@ brin_xlog_insert_update(XLogReaderState *record, ItemPointerData tid; ItemPointerSet(&tid, regpgno, xlrec->offnum); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); brinSetHeapBlockItemptr(buffer, xlrec->pagesPerRange, xlrec->heapBlk, tid); @@ -146,7 +146,7 @@ brin_xlog_update(XLogReaderState *record) Page page; OffsetNumber offnum; - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); offnum = xlrec->oldOffnum; @@ -185,7 +185,7 @@ brin_xlog_samepage_update(XLogReaderState *record) brintuple = (BrinTuple *) XLogRecGetBlockData(record, 0, &tuplen); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); offnum = xlrec->offnum; @@ -254,7 +254,7 @@ brin_xlog_revmap_extend(XLogReaderState *record) */ buf = XLogInitBufferForRedo(record, 1); - page = (Page) BufferGetPage(buf); + page = BufferGetPage(buf); brin_page_init(page, BRIN_PAGETYPE_REVMAP); PageSetLSN(page, lsn); diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 969d1028cae89..1173a6d81b5ed 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -105,7 +105,7 @@ missing_hash(const void *key, Size keysize) { const missing_cache_key *entry = (missing_cache_key *) key; - return hash_bytes((const unsigned char *) entry->value, entry->len); + return hash_bytes((const unsigned char *) DatumGetPointer(entry->value), entry->len); } static int @@ -189,7 +189,7 @@ getmissingattr(TupleDesc tupleDesc, if (att->attlen > 0) key.len = att->attlen; else - key.len = VARSIZE_ANY(attrmiss->am_value); + key.len = VARSIZE_ANY(DatumGetPointer(attrmiss->am_value)); key.value = attrmiss->am_value; entry = hash_search(missing_cache, &key, HASH_ENTER, &found); @@ -901,9 +901,9 @@ expand_tuple(HeapTuple *targetHeapTuple, att->attlen, attrmiss[attnum].am_value); - targetDataLen = att_addlength_pointer(targetDataLen, - att->attlen, - attrmiss[attnum].am_value); + targetDataLen = att_addlength_datum(targetDataLen, + att->attlen, + attrmiss[attnum].am_value); } else { diff --git a/src/backend/access/common/printsimple.c b/src/backend/access/common/printsimple.c index f346ab3e8125b..a09c8fcd3323e 100644 --- a/src/backend/access/common/printsimple.c +++ b/src/backend/access/common/printsimple.c @@ -123,7 +123,7 @@ printsimple(TupleTableSlot *slot, DestReceiver *self) case OIDOID: { - Oid num = ObjectIdGetDatum(value); + Oid num = DatumGetObjectId(value); char str[10]; /* 10 digits */ int len; diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index 830a3d883aa2e..6d3045e233211 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -350,7 +350,7 @@ printtup(TupleTableSlot *slot, DestReceiver *self) */ if (thisState->typisvarlena) VALGRIND_CHECK_MEM_IS_DEFINED(DatumGetPointer(attr), - VARSIZE_ANY(attr)); + VARSIZE_ANY(DatumGetPointer(attr))); if (thisState->format == 0) { diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 50747c1639612..0af3fea68fa48 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -1164,7 +1164,7 @@ add_local_string_reloption(local_relopts *relopts, const char *name, * but we declare them as Datums to avoid including array.h in reloptions.h. */ Datum -transformRelOptions(Datum oldOptions, List *defList, const char *namspace, +transformRelOptions(Datum oldOptions, List *defList, const char *nameSpace, const char *const validnsps[], bool acceptOidsOff, bool isReset) { Datum result; @@ -1190,8 +1190,8 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace, for (i = 0; i < noldoptions; i++) { - char *text_str = VARDATA(oldoptions[i]); - int text_len = VARSIZE(oldoptions[i]) - VARHDRSZ; + char *text_str = VARDATA(DatumGetPointer(oldoptions[i])); + int text_len = VARSIZE(DatumGetPointer(oldoptions[i])) - VARHDRSZ; /* Search for a match in defList */ foreach(cell, defList) @@ -1200,14 +1200,14 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace, int kw_len; /* ignore if not in the same namespace */ - if (namspace == NULL) + if (nameSpace == NULL) { if (def->defnamespace != NULL) continue; } else if (def->defnamespace == NULL) continue; - else if (strcmp(def->defnamespace, namspace) != 0) + else if (strcmp(def->defnamespace, nameSpace) != 0) continue; kw_len = strlen(def->defname); @@ -1277,14 +1277,14 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace, } /* ignore if not in the same namespace */ - if (namspace == NULL) + if (nameSpace == NULL) { if (def->defnamespace != NULL) continue; } else if (def->defnamespace == NULL) continue; - else if (strcmp(def->defnamespace, namspace) != 0) + else if (strcmp(def->defnamespace, nameSpace) != 0) continue; /* @@ -1456,8 +1456,8 @@ parseRelOptionsInternal(Datum options, bool validate, for (i = 0; i < noptions; i++) { - char *text_str = VARDATA(optiondatums[i]); - int text_len = VARSIZE(optiondatums[i]) - VARHDRSZ; + char *text_str = VARDATA(DatumGetPointer(optiondatums[i])); + int text_len = VARSIZE(DatumGetPointer(optiondatums[i])) - VARHDRSZ; int j; /* Search for a match in reloptions */ diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c index 7d8be8346ce52..a1d0eed8953ba 100644 --- a/src/backend/access/common/toast_internals.c +++ b/src/backend/access/common/toast_internals.c @@ -64,11 +64,11 @@ toast_compress_datum(Datum value, char cmethod) switch (cmethod) { case TOAST_PGLZ_COMPRESSION: - tmp = pglz_compress_datum((const struct varlena *) value); + tmp = pglz_compress_datum((const struct varlena *) DatumGetPointer(value)); cmid = TOAST_PGLZ_COMPRESSION_ID; break; case TOAST_LZ4_COMPRESSION: - tmp = lz4_compress_datum((const struct varlena *) value); + tmp = lz4_compress_datum((const struct varlena *) DatumGetPointer(value)); cmid = TOAST_LZ4_COMPRESSION_ID; break; default: @@ -144,7 +144,7 @@ toast_save_datum(Relation rel, Datum value, int num_indexes; int validIndex; - Assert(!VARATT_IS_EXTERNAL(value)); + Assert(!VARATT_IS_EXTERNAL(dval)); /* * Open the toast relation and its indexes. We can use the index to check diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index 020d00cd01ce7..568edacb9bdae 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -815,10 +815,10 @@ hashRowType(TupleDesc desc) uint32 s; int i; - s = hash_combine(0, hash_uint32(desc->natts)); - s = hash_combine(s, hash_uint32(desc->tdtypeid)); + s = hash_combine(0, hash_bytes_uint32(desc->natts)); + s = hash_combine(s, hash_bytes_uint32(desc->tdtypeid)); for (i = 0; i < desc->natts; ++i) - s = hash_combine(s, hash_uint32(TupleDescAttr(desc, i)->atttypid)); + s = hash_combine(s, hash_bytes_uint32(TupleDescAttr(desc, i)->atttypid)); return s; } @@ -993,7 +993,7 @@ TupleDescInitBuiltinEntry(TupleDesc desc, case INT8OID: att->attlen = 8; - att->attbyval = FLOAT8PASSBYVAL; + att->attbyval = true; att->attalign = TYPALIGN_DOUBLE; att->attstorage = TYPSTORAGE_PLAIN; att->attcompression = InvalidCompressionMethod; diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index f29ccd3c2d1ff..656299b1b528f 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -1327,6 +1327,8 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, */ do { + CHECK_FOR_INTERRUPTS(); + ItemPointerSetMin(item); match = true; for (i = 0; i < so->nkeys && match; i++) @@ -1966,8 +1968,6 @@ gingetbitmap(IndexScanDesc scan, TIDBitmap *tbm) for (;;) { - CHECK_FOR_INTERRUPTS(); - if (!scanGetItem(scan, iptr, &iptr, &recheck)) break; diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index a65acd8910493..e9d4b27427e59 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -2189,7 +2189,10 @@ typedef struct * we simply copy the whole Datum, so that we don't have to care about stuff * like endianess etc. We could make it a little bit smaller, but it's not * worth it - it's a tiny fraction of the data, and we need to MAXALIGN the - * start of the TID list anyway. So we wouldn't save anything. + * start of the TID list anyway. So we wouldn't save anything. (This would + * not be a good idea for the permanent in-index data, since we'd prefer + * that that not depend on sizeof(Datum). But this is just a transient + * representation to use while sorting the data.) * * The TID list is serialized as compressed - it's highly compressible, and * we already have ginCompressPostingList for this purpose. The list may be @@ -2233,7 +2236,7 @@ _gin_build_tuple(OffsetNumber attrnum, unsigned char category, else if (typlen > 0) keylen = typlen; else if (typlen == -1) - keylen = VARSIZE_ANY(key); + keylen = VARSIZE_ANY(DatumGetPointer(key)); else if (typlen == -2) keylen = strlen(DatumGetPointer(key)) + 1; else diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c index c2d1771bd77b5..26081693383c7 100644 --- a/src/backend/access/gin/ginscan.c +++ b/src/backend/access/gin/ginscan.c @@ -271,6 +271,7 @@ ginNewScanKey(IndexScanDesc scan) ScanKey scankey = scan->keyData; GinScanOpaque so = (GinScanOpaque) scan->opaque; int i; + int numExcludeOnly; bool hasNullQuery = false; bool attrHasNormalScan[INDEX_MAX_KEYS] = {false}; MemoryContext oldCtx; @@ -393,6 +394,7 @@ ginNewScanKey(IndexScanDesc scan) * excludeOnly scan key must receive a GIN_CAT_EMPTY_QUERY hidden entry * and be set to normal (excludeOnly = false). */ + numExcludeOnly = 0; for (i = 0; i < so->nkeys; i++) { GinScanKey key = &so->keys[i]; @@ -406,6 +408,47 @@ ginNewScanKey(IndexScanDesc scan) ginScanKeyAddHiddenEntry(so, key, GIN_CAT_EMPTY_QUERY); attrHasNormalScan[key->attnum - 1] = true; } + else + numExcludeOnly++; + } + + /* + * If we left any excludeOnly scan keys as-is, move them to the end of the + * scan key array: they must appear after normal key(s). + */ + if (numExcludeOnly > 0) + { + GinScanKey tmpkeys; + int iNormalKey; + int iExcludeOnly; + + /* We'd better have made at least one normal key */ + Assert(numExcludeOnly < so->nkeys); + /* Make a temporary array to hold the re-ordered scan keys */ + tmpkeys = (GinScanKey) palloc(so->nkeys * sizeof(GinScanKeyData)); + /* Re-order the keys ... */ + iNormalKey = 0; + iExcludeOnly = so->nkeys - numExcludeOnly; + for (i = 0; i < so->nkeys; i++) + { + GinScanKey key = &so->keys[i]; + + if (key->excludeOnly) + { + memcpy(tmpkeys + iExcludeOnly, key, sizeof(GinScanKeyData)); + iExcludeOnly++; + } + else + { + memcpy(tmpkeys + iNormalKey, key, sizeof(GinScanKeyData)); + iNormalKey++; + } + } + Assert(iNormalKey == so->nkeys - numExcludeOnly); + Assert(iExcludeOnly == so->nkeys); + /* ... and copy them back to so->keys[] */ + memcpy(so->keys, tmpkeys, so->nkeys * sizeof(GinScanKeyData)); + pfree(tmpkeys); } /* diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index fbbe3a6dd7046..2d833d6d61875 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -753,7 +753,7 @@ ginvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy); LockBuffer(buffer, GIN_SHARE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (GinPageIsRecyclable(page)) { diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index 55a1ec09776ba..4478e92820494 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -30,7 +30,7 @@ ginRedoClearIncompleteSplit(XLogReaderState *record, uint8 block_id) if (XLogReadBufferForRedo(record, block_id, &buffer) == BLK_NEEDS_REDO) { - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); GinPageGetOpaque(page)->flags &= ~GIN_INCOMPLETE_SPLIT; PageSetLSN(page, lsn); @@ -50,7 +50,7 @@ ginRedoCreatePTree(XLogReaderState *record) Page page; buffer = XLogInitBufferForRedo(record, 0); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); GinInitBuffer(buffer, GIN_DATA | GIN_LEAF | GIN_COMPRESSED); diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 7b24380c97801..a96796d5c7da9 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -696,7 +696,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, gistcheckpage(state.r, stack->buffer); } - stack->page = (Page) BufferGetPage(stack->buffer); + stack->page = BufferGetPage(stack->buffer); stack->lsn = xlocked ? PageGetLSN(stack->page) : BufferGetLSNAtomic(stack->buffer); Assert(!RelationNeedsWAL(state.r) || !XLogRecPtrIsInvalid(stack->lsn)); @@ -783,7 +783,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, LockBuffer(stack->buffer, GIST_UNLOCK); LockBuffer(stack->buffer, GIST_EXCLUSIVE); xlocked = true; - stack->page = (Page) BufferGetPage(stack->buffer); + stack->page = BufferGetPage(stack->buffer); if (PageGetLSN(stack->page) != stack->lsn) { @@ -847,7 +847,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, LockBuffer(stack->buffer, GIST_UNLOCK); LockBuffer(stack->buffer, GIST_EXCLUSIVE); xlocked = true; - stack->page = (Page) BufferGetPage(stack->buffer); + stack->page = BufferGetPage(stack->buffer); stack->lsn = PageGetLSN(stack->page); if (stack->blkno == GIST_ROOT_BLKNO) @@ -938,7 +938,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum) buffer = ReadBuffer(r, top->blkno); LockBuffer(buffer, GIST_SHARE); gistcheckpage(r, buffer); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (GistPageIsLeaf(page)) { @@ -1033,7 +1033,7 @@ gistFindCorrectParent(Relation r, GISTInsertStack *child, bool is_build) GISTInsertStack *ptr; gistcheckpage(r, parent->buffer); - parent->page = (Page) BufferGetPage(parent->buffer); + parent->page = BufferGetPage(parent->buffer); maxoff = PageGetMaxOffsetNumber(parent->page); /* Check if the downlink is still where it was before */ @@ -1098,7 +1098,7 @@ gistFindCorrectParent(Relation r, GISTInsertStack *child, bool is_build) parent->buffer = ReadBuffer(r, parent->blkno); LockBuffer(parent->buffer, GIST_EXCLUSIVE); gistcheckpage(r, parent->buffer); - parent->page = (Page) BufferGetPage(parent->buffer); + parent->page = BufferGetPage(parent->buffer); } /* @@ -1121,7 +1121,7 @@ gistFindCorrectParent(Relation r, GISTInsertStack *child, bool is_build) while (ptr) { ptr->buffer = ReadBuffer(r, ptr->blkno); - ptr->page = (Page) BufferGetPage(ptr->buffer); + ptr->page = BufferGetPage(ptr->buffer); ptr = ptr->parent; } diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 9e707167d984b..9b2ec9815f17e 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -969,7 +969,7 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup, buffer = ReadBuffer(indexrel, blkno); LockBuffer(buffer, GIST_EXCLUSIVE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); childoffnum = gistchoose(indexrel, page, itup, giststate); iid = PageGetItemId(page, childoffnum); idxtuple = (IndexTuple) PageGetItem(page, iid); @@ -1448,7 +1448,7 @@ gistGetMaxLevel(Relation index) * pro forma. */ LockBuffer(buffer, GIST_SHARE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (GistPageIsLeaf(page)) { diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c index 392163cb22900..f2ec6cbe2e524 100644 --- a/src/backend/access/gist/gistproc.c +++ b/src/backend/access/gist/gistproc.c @@ -1707,8 +1707,8 @@ gist_bbox_zorder_cmp(Datum a, Datum b, SortSupport ssup) * Abbreviated version of Z-order comparison * * The abbreviated format is a Z-order value computed from the two 32-bit - * floats. If SIZEOF_DATUM == 8, the 64-bit Z-order value fits fully in the - * abbreviated Datum, otherwise use its most significant bits. + * floats. Now that sizeof(Datum) is always 8, the 64-bit Z-order value + * always fits fully in the abbreviated Datum. */ static Datum gist_bbox_zorder_abbrev_convert(Datum original, SortSupport ssup) @@ -1718,11 +1718,7 @@ gist_bbox_zorder_abbrev_convert(Datum original, SortSupport ssup) z = point_zorder_internal(p->x, p->y); -#if SIZEOF_DATUM == 8 - return (Datum) z; -#else - return (Datum) (z >> 32); -#endif + return UInt64GetDatum(z); } /* diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index dca236b6e5735..b925eda2b9b42 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -330,7 +330,7 @@ gistvacuumpage(GistVacState *vstate, Buffer buffer) * exclusive lock. */ LockBuffer(buffer, GIST_EXCLUSIVE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (gistPageRecyclable(page)) { @@ -528,7 +528,7 @@ gistvacuum_delete_empty_pages(IndexVacuumInfo *info, GistVacState *vstate) RBM_NORMAL, info->strategy); LockBuffer(buffer, GIST_SHARE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (PageIsNew(page) || GistPageIsDeleted(page) || GistPageIsLeaf(page)) { diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index b354e4ba5d1b7..42fee1f0764fa 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -83,7 +83,7 @@ gistRedoPageUpdateRecord(XLogReaderState *record) data = begin = XLogRecGetBlockData(record, 0, &datalen); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (xldata->ntodelete == 1 && xldata->ntoinsert == 1) { @@ -201,7 +201,7 @@ gistRedoDeleteRecord(XLogReaderState *record) if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) { - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); PageIndexMultiDelete(page, toDelete, xldata->ntodelete); @@ -280,7 +280,7 @@ gistRedoPageSplitRecord(XLogReaderState *record) } buffer = XLogInitBufferForRedo(record, i + 1); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); data = XLogRecGetBlockData(record, i + 1, &datalen); tuples = decodePageSplitRecord(data, datalen, &num); @@ -348,7 +348,7 @@ gistRedoPageDelete(XLogReaderState *record) if (XLogReadBufferForRedo(record, 0, &leafBuffer) == BLK_NEEDS_REDO) { - Page page = (Page) BufferGetPage(leafBuffer); + Page page = BufferGetPage(leafBuffer); GistPageSetDeleted(page, xldata->deleteXid); @@ -358,7 +358,7 @@ gistRedoPageDelete(XLogReaderState *record) if (XLogReadBufferForRedo(record, 1, &parentBuffer) == BLK_NEEDS_REDO) { - Page page = (Page) BufferGetPage(parentBuffer); + Page page = BufferGetPage(parentBuffer); PageIndexTupleDelete(page, xldata->downlinkOffset); diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c index 8d97067fe5403..d963a0c370292 100644 --- a/src/backend/access/hash/hash_xlog.c +++ b/src/backend/access/hash/hash_xlog.c @@ -38,7 +38,7 @@ hash_xlog_init_meta_page(XLogReaderState *record) Assert(BufferIsValid(metabuf)); _hash_init_metabuffer(metabuf, xlrec->num_tuples, xlrec->procid, xlrec->ffactor, true); - page = (Page) BufferGetPage(metabuf); + page = BufferGetPage(metabuf); PageSetLSN(page, lsn); MarkBufferDirty(metabuf); @@ -235,7 +235,7 @@ hash_xlog_add_ovfl_page(XLogReaderState *record) if (XLogReadBufferForRedo(record, 2, &mapbuffer) == BLK_NEEDS_REDO) { - Page mappage = (Page) BufferGetPage(mapbuffer); + Page mappage = BufferGetPage(mapbuffer); uint32 *freep = NULL; uint32 *bitmap_page_bit; @@ -538,7 +538,7 @@ hash_xlog_move_page_contents(XLogReaderState *record) data = begin = XLogRecGetBlockData(record, 1, &datalen); - writepage = (Page) BufferGetPage(writebuf); + writepage = BufferGetPage(writebuf); if (xldata->ntups > 0) { @@ -584,7 +584,7 @@ hash_xlog_move_page_contents(XLogReaderState *record) ptr = XLogRecGetBlockData(record, 2, &len); - page = (Page) BufferGetPage(deletebuf); + page = BufferGetPage(deletebuf); if (len > 0) { @@ -670,7 +670,7 @@ hash_xlog_squeeze_page(XLogReaderState *record) data = begin = XLogRecGetBlockData(record, 1, &datalen); - writepage = (Page) BufferGetPage(writebuf); + writepage = BufferGetPage(writebuf); if (xldata->ntups > 0) { @@ -807,7 +807,7 @@ hash_xlog_squeeze_page(XLogReaderState *record) /* replay the record for bitmap page */ if (XLogReadBufferForRedo(record, 5, &mapbuf) == BLK_NEEDS_REDO) { - Page mappage = (Page) BufferGetPage(mapbuf); + Page mappage = BufferGetPage(mapbuf); uint32 *freep = NULL; char *data; uint32 *bitmap_page_bit; @@ -895,7 +895,7 @@ hash_xlog_delete(XLogReaderState *record) ptr = XLogRecGetBlockData(record, 1, &len); - page = (Page) BufferGetPage(deletebuf); + page = BufferGetPage(deletebuf); if (len > 0) { @@ -946,7 +946,7 @@ hash_xlog_split_cleanup(XLogReaderState *record) { HashPageOpaque bucket_opaque; - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); bucket_opaque = HashPageGetOpaque(page); bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP; @@ -1029,7 +1029,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record) if (action == BLK_NEEDS_REDO) { - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); PageIndexMultiDelete(page, toDelete, xldata->ntuples); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 0dcd6ee817e04..4c5ae205a7a60 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -1143,6 +1143,17 @@ heap_beginscan(Relation relation, Snapshot snapshot, if (!(snapshot && IsMVCCSnapshot(snapshot))) scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE; + /* Check that a historic snapshot is not used for non-catalog tables */ + if (snapshot && + IsHistoricMVCCSnapshot(snapshot) && + !RelationIsAccessibleInLogicalDecoding(relation)) + { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TRANSACTION_STATE), + errmsg("cannot query non-catalog table \"%s\" during logical decoding", + RelationGetRelationName(relation)))); + } + /* * For seqscan and sample scans in a serializable transaction, acquire a * predicate lock on the entire relation. This is required not only to @@ -2636,9 +2647,6 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, */ if (all_frozen_set) { - Assert(PageIsAllVisible(page)); - Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer)); - /* * It's fine to use InvalidTransactionId here - this is only used * when HEAP_INSERT_FROZEN is specified, which intentionally @@ -6088,7 +6096,7 @@ heap_finish_speculative(Relation relation, ItemPointer tid) buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid)); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); offnum = ItemPointerGetOffsetNumber(tid); if (PageGetMaxOffsetNumber(page) >= offnum) diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index cb4bc35c93ed4..bcbac844bb669 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -2280,7 +2280,7 @@ heapam_scan_sample_next_tuple(TableScanDesc scan, SampleScanState *scanstate, if (!pagemode) LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE); - page = (Page) BufferGetPage(hscan->rs_cbuf); + page = BufferGetPage(hscan->rs_cbuf); all_visible = PageIsAllVisible(page) && !scan->rs_snapshot->takenDuringRecovery; maxoffset = PageGetMaxOffsetNumber(page); diff --git a/src/backend/access/heap/heapam_xlog.c b/src/backend/access/heap/heapam_xlog.c index eb4bd3d6ae3a3..cf843277938de 100644 --- a/src/backend/access/heap/heapam_xlog.c +++ b/src/backend/access/heap/heapam_xlog.c @@ -78,7 +78,7 @@ heap_xlog_prune_freeze(XLogReaderState *record) &buffer); if (action == BLK_NEEDS_REDO) { - Page page = (Page) BufferGetPage(buffer); + Page page = BufferGetPage(buffer); OffsetNumber *redirected; OffsetNumber *nowdead; OffsetNumber *nowunused; @@ -295,7 +295,6 @@ heap_xlog_visible(XLogReaderState *record) LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK); reln = CreateFakeRelcacheEntry(rlocator); - visibilitymap_pin(reln, blkno, &vmbuffer); visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer, xlrec->snapshotConflictHorizon, vmbits); @@ -599,7 +598,7 @@ heap_xlog_multi_insert(XLogReaderState *record) tupdata = XLogRecGetBlockData(record, 0, &len); endptr = tupdata + len; - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); for (i = 0; i < xlrec->ntuples; i++) { @@ -801,7 +800,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update) else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) { nbuffer = XLogInitBufferForRedo(record, 0); - page = (Page) BufferGetPage(nbuffer); + page = BufferGetPage(nbuffer); PageInit(page, BufferGetPageSize(nbuffer), 0); newaction = BLK_NEEDS_REDO; } @@ -1027,7 +1026,7 @@ heap_xlog_lock(XLogReaderState *record) if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) { - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); offnum = xlrec->offnum; if (PageGetMaxOffsetNumber(page) >= offnum) diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index a8025889be088..7ebd22f00a370 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -1563,7 +1563,7 @@ heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused) { - Page page = (Page) BufferGetPage(buffer); + Page page = BufferGetPage(buffer); OffsetNumber *offnum; HeapTupleHeader htup PG_USED_FOR_ASSERTS_ONLY; diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 14036c27e878a..981d9380a925c 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -140,7 +140,6 @@ #include "access/visibilitymap.h" #include "access/xloginsert.h" #include "catalog/storage.h" -#include "commands/dbcommands.h" #include "commands/progress.h" #include "commands/vacuum.h" #include "common/int.h" @@ -2122,8 +2121,11 @@ lazy_scan_prune(LVRelState *vacrel, else if (all_visible_according_to_vm && !PageIsAllVisible(page) && visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0) { - elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u", - vacrel->relname, blkno); + ereport(WARNING, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u", + vacrel->relname, blkno))); + visibilitymap_clear(vacrel->rel, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); } @@ -2144,8 +2146,11 @@ lazy_scan_prune(LVRelState *vacrel, */ else if (presult.lpdead_items > 0 && PageIsAllVisible(page)) { - elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u", - vacrel->relname, blkno); + ereport(WARNING, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u", + vacrel->relname, blkno))); + PageClearAllVisible(page); MarkBufferDirty(buf); visibilitymap_clear(vacrel->rel, blkno, vmbuffer, diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 8f918e00af7ed..7306c16f05cd3 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -255,11 +255,12 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, uint8 status; #ifdef TRACE_VISIBILITYMAP - elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk); + elog(DEBUG1, "vm_set flags 0x%02X for %s %d", + flags, RelationGetRelationName(rel), heapBlk); #endif Assert(InRecovery || XLogRecPtrIsInvalid(recptr)); - Assert(InRecovery || PageIsAllVisible((Page) BufferGetPage(heapBuf))); + Assert(InRecovery || PageIsAllVisible(BufferGetPage(heapBuf))); Assert((flags & VISIBILITYMAP_VALID_BITS) == flags); /* Must never set all_frozen bit without also setting all_visible bit */ @@ -269,6 +270,8 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk) elog(ERROR, "wrong heap buffer passed to visibilitymap_set"); + Assert(!BufferIsValid(heapBuf) || BufferIsExclusiveLocked(heapBuf)); + /* Check that we have the right VM page pinned */ if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock) elog(ERROR, "wrong VM buffer passed to visibilitymap_set"); diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index 219df1971da66..86d11f4ec791d 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -263,6 +263,16 @@ index_beginscan(Relation heapRelation, Assert(snapshot != InvalidSnapshot); + /* Check that a historic snapshot is not used for non-catalog tables */ + if (IsHistoricMVCCSnapshot(snapshot) && + !RelationIsAccessibleInLogicalDecoding(heapRelation)) + { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TRANSACTION_STATE), + errmsg("cannot query non-catalog table \"%s\" during logical decoding", + RelationGetRelationName(heapRelation)))); + } + scan = index_beginscan_internal(indexRelation, nkeys, norderbys, snapshot, NULL, false); /* @@ -986,11 +996,6 @@ index_store_float8_orderby_distances(IndexScanDesc scan, Oid *orderByTypes, { if (orderByTypes[i] == FLOAT8OID) { -#ifndef USE_FLOAT8_BYVAL - /* must free any old value to avoid memory leakage */ - if (!scan->xs_orderbynulls[i]) - pfree(DatumGetPointer(scan->xs_orderbyvals[i])); -#endif if (distances && !distances[i].isnull) { scan->xs_orderbyvals[i] = Float8GetDatum(distances[i].value); diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c index 4da5a3c1d161d..188c27b4925f7 100644 --- a/src/backend/access/nbtree/nbtcompare.c +++ b/src/backend/access/nbtree/nbtcompare.c @@ -278,32 +278,12 @@ btint8cmp(PG_FUNCTION_ARGS) PG_RETURN_INT32(A_LESS_THAN_B); } -#if SIZEOF_DATUM < 8 -static int -btint8fastcmp(Datum x, Datum y, SortSupport ssup) -{ - int64 a = DatumGetInt64(x); - int64 b = DatumGetInt64(y); - - if (a > b) - return A_GREATER_THAN_B; - else if (a == b) - return 0; - else - return A_LESS_THAN_B; -} -#endif - Datum btint8sortsupport(PG_FUNCTION_ARGS) { SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); -#if SIZEOF_DATUM >= 8 ssup->comparator = ssup_datum_signed_cmp; -#else - ssup->comparator = btint8fastcmp; -#endif PG_RETURN_VOID(); } @@ -555,7 +535,7 @@ btcharcmp(PG_FUNCTION_ARGS) static Datum char_decrement(Relation rel, Datum existing, bool *underflow) { - uint8 cexisting = UInt8GetDatum(existing); + uint8 cexisting = DatumGetUInt8(existing); if (cexisting == 0) { @@ -571,7 +551,7 @@ char_decrement(Relation rel, Datum existing, bool *underflow) static Datum char_increment(Relation rel, Datum existing, bool *overflow) { - uint8 cexisting = UInt8GetDatum(existing); + uint8 cexisting = DatumGetUInt8(existing); if (cexisting == UCHAR_MAX) { diff --git a/src/backend/access/nbtree/nbtdedup.c b/src/backend/access/nbtree/nbtdedup.c index 08884116aecbe..ab0b6946cb031 100644 --- a/src/backend/access/nbtree/nbtdedup.c +++ b/src/backend/access/nbtree/nbtdedup.c @@ -16,6 +16,7 @@ #include "access/nbtree.h" #include "access/nbtxlog.h" +#include "access/tableam.h" #include "access/xloginsert.h" #include "miscadmin.h" #include "utils/rel.h" diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index aa82cede30aa4..be60781fc98ec 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -17,6 +17,7 @@ #include "access/nbtree.h" #include "access/nbtxlog.h" +#include "access/tableam.h" #include "access/transam.h" #include "access/xloginsert.h" #include "common/int.h" diff --git a/src/backend/access/nbtree/nbtpreprocesskeys.c b/src/backend/access/nbtree/nbtpreprocesskeys.c index 21c519cd108ed..936b93f157a8b 100644 --- a/src/backend/access/nbtree/nbtpreprocesskeys.c +++ b/src/backend/access/nbtree/nbtpreprocesskeys.c @@ -16,11 +16,13 @@ #include "postgres.h" #include "access/nbtree.h" +#include "access/relscan.h" #include "common/int.h" #include "lib/qunique.h" #include "utils/array.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/rel.h" typedef struct BTScanKeyPreproc { diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 9d70e89c1f3ce..8828a7a8f8952 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -44,6 +44,7 @@ #include "access/parallel.h" #include "access/relscan.h" #include "access/table.h" +#include "access/tableam.h" #include "access/xact.h" #include "catalog/index.h" #include "commands/progress.h" diff --git a/src/backend/access/nbtree/nbtsplitloc.c b/src/backend/access/nbtree/nbtsplitloc.c index e6c9aaa0454dd..b88c396195a42 100644 --- a/src/backend/access/nbtree/nbtsplitloc.c +++ b/src/backend/access/nbtree/nbtsplitloc.c @@ -15,6 +15,7 @@ #include "postgres.h" #include "access/nbtree.h" +#include "access/tableam.h" #include "common/int.h" typedef enum diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 9aed207995f52..edfea2acaff66 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -19,10 +19,13 @@ #include "access/nbtree.h" #include "access/reloptions.h" +#include "access/relscan.h" #include "commands/progress.h" #include "miscadmin.h" #include "utils/datum.h" #include "utils/lsyscache.h" +#include "utils/rel.h" + #define LOOK_AHEAD_REQUIRED_RECHECKS 3 #define LOOK_AHEAD_DEFAULT_DISTANCE 5 diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index d31dd56732d2f..69ea668bb0d92 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -143,7 +143,7 @@ _bt_clear_incomplete_split(XLogReaderState *record, uint8 block_id) if (XLogReadBufferForRedo(record, block_id, &buf) == BLK_NEEDS_REDO) { - Page page = (Page) BufferGetPage(buf); + Page page = BufferGetPage(buf); BTPageOpaque pageop = BTPageGetOpaque(page); Assert(P_INCOMPLETE_SPLIT(pageop)); @@ -287,7 +287,7 @@ btree_xlog_split(bool newitemonleft, XLogReaderState *record) /* Reconstruct right (new) sibling page from scratch */ rbuf = XLogInitBufferForRedo(record, 1); datapos = XLogRecGetBlockData(record, 1, &datalen); - rpage = (Page) BufferGetPage(rbuf); + rpage = BufferGetPage(rbuf); _bt_pageinit(rpage, BufferGetPageSize(rbuf)); ropaque = BTPageGetOpaque(rpage); @@ -314,7 +314,7 @@ btree_xlog_split(bool newitemonleft, XLogReaderState *record) * checking possible. See also _bt_restore_page(), which does the * same for the right page. */ - Page origpage = (Page) BufferGetPage(buf); + Page origpage = BufferGetPage(buf); BTPageOpaque oopaque = BTPageGetOpaque(origpage); OffsetNumber off; IndexTuple newitem = NULL, @@ -439,7 +439,7 @@ btree_xlog_split(bool newitemonleft, XLogReaderState *record) if (XLogReadBufferForRedo(record, 2, &sbuf) == BLK_NEEDS_REDO) { - Page spage = (Page) BufferGetPage(sbuf); + Page spage = BufferGetPage(sbuf); BTPageOpaque spageop = BTPageGetOpaque(spage); spageop->btpo_prev = rightpagenumber; @@ -470,7 +470,7 @@ btree_xlog_dedup(XLogReaderState *record) if (XLogReadBufferForRedo(record, 0, &buf) == BLK_NEEDS_REDO) { char *ptr = XLogRecGetBlockData(record, 0, NULL); - Page page = (Page) BufferGetPage(buf); + Page page = BufferGetPage(buf); BTPageOpaque opaque = BTPageGetOpaque(page); OffsetNumber offnum, minoff, @@ -614,7 +614,7 @@ btree_xlog_vacuum(XLogReaderState *record) { char *ptr = XLogRecGetBlockData(record, 0, NULL); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (xlrec->nupdated > 0) { @@ -680,7 +680,7 @@ btree_xlog_delete(XLogReaderState *record) { char *ptr = XLogRecGetBlockData(record, 0, NULL); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (xlrec->nupdated > 0) { @@ -740,7 +740,7 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogReaderState *record) OffsetNumber nextoffset; BlockNumber rightsib; - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); pageop = BTPageGetOpaque(page); poffset = xlrec->poffset; @@ -769,7 +769,7 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogReaderState *record) /* Rewrite the leaf page as a halfdead page */ buffer = XLogInitBufferForRedo(record, 0); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); _bt_pageinit(page, BufferGetPageSize(buffer)); pageop = BTPageGetOpaque(page); @@ -836,7 +836,7 @@ btree_xlog_unlink_page(uint8 info, XLogReaderState *record) { if (XLogReadBufferForRedo(record, 1, &leftbuf) == BLK_NEEDS_REDO) { - page = (Page) BufferGetPage(leftbuf); + page = BufferGetPage(leftbuf); pageop = BTPageGetOpaque(page); pageop->btpo_next = rightsib; @@ -849,7 +849,7 @@ btree_xlog_unlink_page(uint8 info, XLogReaderState *record) /* Rewrite target page as empty deleted page */ target = XLogInitBufferForRedo(record, 0); - page = (Page) BufferGetPage(target); + page = BufferGetPage(target); _bt_pageinit(page, BufferGetPageSize(target)); pageop = BTPageGetOpaque(page); @@ -868,7 +868,7 @@ btree_xlog_unlink_page(uint8 info, XLogReaderState *record) /* Fix left-link of right sibling */ if (XLogReadBufferForRedo(record, 2, &rightbuf) == BLK_NEEDS_REDO) { - page = (Page) BufferGetPage(rightbuf); + page = BufferGetPage(rightbuf); pageop = BTPageGetOpaque(page); pageop->btpo_prev = leftsib; @@ -907,7 +907,7 @@ btree_xlog_unlink_page(uint8 info, XLogReaderState *record) Assert(!isleaf); leafbuf = XLogInitBufferForRedo(record, 3); - page = (Page) BufferGetPage(leafbuf); + page = BufferGetPage(leafbuf); _bt_pageinit(page, BufferGetPageSize(leafbuf)); pageop = BTPageGetOpaque(page); @@ -949,7 +949,7 @@ btree_xlog_newroot(XLogReaderState *record) Size len; buffer = XLogInitBufferForRedo(record, 0); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); _bt_pageinit(page, BufferGetPageSize(buffer)); pageop = BTPageGetOpaque(page); diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 95fea74e296f8..9b86c016acb37 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -785,7 +785,7 @@ SpGistGetInnerTypeSize(SpGistTypeDesc *att, Datum datum) else if (att->attlen > 0) size = att->attlen; else - size = VARSIZE_ANY(datum); + size = VARSIZE_ANY(DatumGetPointer(datum)); return MAXALIGN(size); } @@ -804,7 +804,7 @@ memcpyInnerDatum(void *target, SpGistTypeDesc *att, Datum datum) } else { - size = (att->attlen > 0) ? att->attlen : VARSIZE_ANY(datum); + size = (att->attlen > 0) ? att->attlen : VARSIZE_ANY(DatumGetPointer(datum)); memcpy(target, DatumGetPointer(datum), size); } } diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 2678f7ab7829a..8f8a1ad7796aa 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -626,7 +626,7 @@ spgvacuumpage(spgBulkDeleteState *bds, Buffer buffer) Page page; LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (PageIsNew(page)) { @@ -707,7 +707,7 @@ spgprocesspending(spgBulkDeleteState *bds) buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, RBM_NORMAL, bds->info->strategy); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); if (PageIsNew(page) || SpGistPageIsDeleted(page)) { diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index b7986e6f7131e..d4620c915d0cc 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -576,7 +576,7 @@ spgRedoPickSplit(XLogReaderState *record) { /* just re-init the source page */ srcBuffer = XLogInitBufferForRedo(record, 0); - srcPage = (Page) BufferGetPage(srcBuffer); + srcPage = BufferGetPage(srcBuffer); SpGistInitBuffer(srcBuffer, SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); @@ -629,7 +629,7 @@ spgRedoPickSplit(XLogReaderState *record) { /* just re-init the dest page */ destBuffer = XLogInitBufferForRedo(record, 1); - destPage = (Page) BufferGetPage(destBuffer); + destPage = BufferGetPage(destBuffer); SpGistInitBuffer(destBuffer, SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); @@ -642,7 +642,7 @@ spgRedoPickSplit(XLogReaderState *record) * full-page-image case, but for safety let's hold it till later. */ if (XLogReadBufferForRedo(record, 1, &destBuffer) == BLK_NEEDS_REDO) - destPage = (Page) BufferGetPage(destBuffer); + destPage = BufferGetPage(destBuffer); else destPage = NULL; /* don't do any page updates */ } diff --git a/src/backend/access/table/toast_helper.c b/src/backend/access/table/toast_helper.c index b60fab0a4d294..11f97d65367d5 100644 --- a/src/backend/access/table/toast_helper.c +++ b/src/backend/access/table/toast_helper.c @@ -330,7 +330,7 @@ toast_delete_external(Relation rel, const Datum *values, const bool *isnull, if (isnull[i]) continue; - else if (VARATT_IS_EXTERNAL_ONDISK(value)) + else if (VARATT_IS_EXTERNAL_ONDISK(DatumGetPointer(value))) toast_delete_datum(rel, value, is_speculative); } } diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 3cb09c3d5987c..8bf59d369f848 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -70,16 +70,11 @@ #include "access/multixact.h" #include "access/slru.h" -#include "access/transam.h" #include "access/twophase.h" #include "access/twophase_rmgr.h" -#include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" #include "access/xlogutils.h" -#include "commands/dbcommands.h" -#include "funcapi.h" -#include "lib/ilist.h" #include "miscadmin.h" #include "pg_trace.h" #include "pgstat.h" @@ -87,9 +82,9 @@ #include "storage/pmsignal.h" #include "storage/proc.h" #include "storage/procarray.h" -#include "utils/fmgrprotos.h" #include "utils/guc_hooks.h" #include "utils/injection_point.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" @@ -398,8 +393,6 @@ static int mXactCacheGetById(MultiXactId multi, MultiXactMember **members); static void mXactCachePut(MultiXactId multi, int nmembers, MultiXactMember *members); -static char *mxstatus_to_string(MultiXactStatus status); - /* management of SLRU infrastructure */ static bool MultiXactOffsetPagePrecedes(int64 page1, int64 page2); static bool MultiXactMemberPagePrecedes(int64 page1, int64 page2); @@ -1747,7 +1740,7 @@ mXactCachePut(MultiXactId multi, int nmembers, MultiXactMember *members) } } -static char * +char * mxstatus_to_string(MultiXactStatus status) { switch (status) @@ -2859,31 +2852,43 @@ find_multixact_start(MultiXactId multi, MultiXactOffset *result) } /* - * Determine how many multixacts, and how many multixact members, currently - * exist. Return false if unable to determine. + * GetMultiXactInfo + * + * Returns information about the current MultiXact state, as of: + * multixacts: Number of MultiXacts (nextMultiXactId - oldestMultiXactId) + * members: Number of member entries (nextOffset - oldestOffset) + * oldestMultiXactId: Oldest MultiXact ID still in use + * oldestOffset: Oldest offset still in use + * + * Returns false if unable to determine, the oldest offset being unknown. */ -static bool -ReadMultiXactCounts(uint32 *multixacts, MultiXactOffset *members) +bool +GetMultiXactInfo(uint32 *multixacts, MultiXactOffset *members, + MultiXactId *oldestMultiXactId, MultiXactOffset *oldestOffset) { MultiXactOffset nextOffset; - MultiXactOffset oldestOffset; - MultiXactId oldestMultiXactId; MultiXactId nextMultiXactId; bool oldestOffsetKnown; LWLockAcquire(MultiXactGenLock, LW_SHARED); nextOffset = MultiXactState->nextOffset; - oldestMultiXactId = MultiXactState->oldestMultiXactId; + *oldestMultiXactId = MultiXactState->oldestMultiXactId; nextMultiXactId = MultiXactState->nextMXact; - oldestOffset = MultiXactState->oldestOffset; + *oldestOffset = MultiXactState->oldestOffset; oldestOffsetKnown = MultiXactState->oldestOffsetKnown; LWLockRelease(MultiXactGenLock); if (!oldestOffsetKnown) + { + *members = 0; + *multixacts = 0; + *oldestMultiXactId = InvalidMultiXactId; + *oldestOffset = 0; return false; + } - *members = nextOffset - oldestOffset; - *multixacts = nextMultiXactId - oldestMultiXactId; + *members = nextOffset - *oldestOffset; + *multixacts = nextMultiXactId - *oldestMultiXactId; return true; } @@ -2922,9 +2927,11 @@ MultiXactMemberFreezeThreshold(void) uint32 victim_multixacts; double fraction; int result; + MultiXactId oldestMultiXactId; + MultiXactOffset oldestOffset; /* If we can't determine member space utilization, assume the worst. */ - if (!ReadMultiXactCounts(&multixacts, &members)) + if (!GetMultiXactInfo(&multixacts, &members, &oldestMultiXactId, &oldestOffset)) return 0; /* If member space utilization is low, no special action is required. */ @@ -3414,68 +3421,6 @@ multixact_redo(XLogReaderState *record) elog(PANIC, "multixact_redo: unknown op code %u", info); } -Datum -pg_get_multixact_members(PG_FUNCTION_ARGS) -{ - typedef struct - { - MultiXactMember *members; - int nmembers; - int iter; - } mxact; - MultiXactId mxid = PG_GETARG_TRANSACTIONID(0); - mxact *multi; - FuncCallContext *funccxt; - - if (mxid < FirstMultiXactId) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid MultiXactId: %u", mxid))); - - if (SRF_IS_FIRSTCALL()) - { - MemoryContext oldcxt; - TupleDesc tupdesc; - - funccxt = SRF_FIRSTCALL_INIT(); - oldcxt = MemoryContextSwitchTo(funccxt->multi_call_memory_ctx); - - multi = palloc(sizeof(mxact)); - /* no need to allow for old values here */ - multi->nmembers = GetMultiXactIdMembers(mxid, &multi->members, false, - false); - multi->iter = 0; - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - elog(ERROR, "return type must be a row type"); - funccxt->tuple_desc = tupdesc; - funccxt->attinmeta = TupleDescGetAttInMetadata(tupdesc); - funccxt->user_fctx = multi; - - MemoryContextSwitchTo(oldcxt); - } - - funccxt = SRF_PERCALL_SETUP(); - multi = (mxact *) funccxt->user_fctx; - - while (multi->iter < multi->nmembers) - { - HeapTuple tuple; - char *values[2]; - - values[0] = psprintf("%u", multi->members[multi->iter].xid); - values[1] = mxstatus_to_string(multi->members[multi->iter].status); - - tuple = BuildTupleFromCStrings(funccxt->attinmeta, values); - - multi->iter++; - pfree(values[0]); - SRF_RETURN_NEXT(funccxt, HeapTupleGetDatum(tuple)); - } - - SRF_RETURN_DONE(funccxt); -} - /* * Entrypoint for sync.c to sync offsets files. */ diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index 10ec259f38295..5d3fcd62c9443 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -246,6 +246,7 @@ SimpleLruAutotuneBuffers(int divisor, int max) * buffer_tranche_id: tranche ID to use for the SLRU's per-buffer LWLocks. * bank_tranche_id: tranche ID to use for the bank LWLocks. * sync_handler: which set of functions to use to handle sync requests + * long_segment_names: use short or long segment names */ void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, @@ -400,15 +401,15 @@ SimpleLruZeroPage(SlruCtl ctl, int64 pageno) /* * Assume this page is now the latest active page. * - * Note that because both this routine and SlruSelectLRUPage run with - * ControlLock held, it is not possible for this to be zeroing a page that - * SlruSelectLRUPage is going to evict simultaneously. Therefore, there's - * no memory barrier here. + * Note that because both this routine and SlruSelectLRUPage run with a + * SLRU bank lock held, it is not possible for this to be zeroing a page + * that SlruSelectLRUPage is going to evict simultaneously. Therefore, + * there's no memory barrier here. */ pg_atomic_write_u64(&shared->latest_page_number, pageno); /* update the stats counter of zeroed pages */ - pgstat_count_slru_page_zeroed(shared->slru_stats_idx); + pgstat_count_slru_blocks_zeroed(shared->slru_stats_idx); return slotno; } @@ -437,7 +438,7 @@ SimpleLruZeroLSNs(SlruCtl ctl, int slotno) * This is a convenience wrapper for the common case of zeroing a page and * immediately flushing it to disk. * - * Control lock is acquired and released here. + * SLRU bank lock is acquired and released here. */ void SimpleLruZeroAndWritePage(SlruCtl ctl, int64 pageno) @@ -560,7 +561,7 @@ SimpleLruReadPage(SlruCtl ctl, int64 pageno, bool write_ok, SlruRecentlyUsed(shared, slotno); /* update the stats counter of pages found in the SLRU */ - pgstat_count_slru_page_hit(shared->slru_stats_idx); + pgstat_count_slru_blocks_hit(shared->slru_stats_idx); return slotno; } @@ -605,7 +606,7 @@ SimpleLruReadPage(SlruCtl ctl, int64 pageno, bool write_ok, SlruRecentlyUsed(shared, slotno); /* update the stats counter of pages not found in SLRU */ - pgstat_count_slru_page_read(shared->slru_stats_idx); + pgstat_count_slru_blocks_read(shared->slru_stats_idx); return slotno; } @@ -644,11 +645,11 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int64 pageno, TransactionId xid) shared->page_number[slotno] == pageno && shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS) { - /* See comments for SlruRecentlyUsed macro */ + /* See comments for SlruRecentlyUsed() */ SlruRecentlyUsed(shared, slotno); /* update the stats counter of pages found in the SLRU */ - pgstat_count_slru_page_hit(shared->slru_stats_idx); + pgstat_count_slru_blocks_hit(shared->slru_stats_idx); return slotno; } @@ -778,7 +779,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int64 pageno) off_t endpos; /* update the stats counter of checked pages */ - pgstat_count_slru_page_exists(ctl->shared->slru_stats_idx); + pgstat_count_slru_blocks_exists(ctl->shared->slru_stats_idx); SlruFileName(ctl, path, segno); @@ -907,7 +908,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int64 pageno, int slotno, SlruWriteAll fdata) int fd = -1; /* update the stats counter of written pages */ - pgstat_count_slru_page_written(shared->slru_stats_idx); + pgstat_count_slru_blocks_written(shared->slru_stats_idx); /* * Honor the write-WAL-before-data rule, if appropriate, so that we do not diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 7918176fc588e..d8e2fce2c99b7 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -103,6 +103,7 @@ #include "storage/proc.h" #include "storage/procarray.h" #include "utils/builtins.h" +#include "utils/injection_point.h" #include "utils/memutils.h" #include "utils/timestamp.h" @@ -2332,12 +2333,17 @@ RecordTransactionCommitPrepared(TransactionId xid, replorigin = (replorigin_session_origin != InvalidRepOriginId && replorigin_session_origin != DoNotReplicateId); + /* Load the injection point before entering the critical section */ + INJECTION_POINT_LOAD("commit-after-delay-checkpoint"); + START_CRIT_SECTION(); /* See notes in RecordTransactionCommit */ Assert((MyProc->delayChkptFlags & DELAY_CHKPT_IN_COMMIT) == 0); MyProc->delayChkptFlags |= DELAY_CHKPT_IN_COMMIT; + INJECTION_POINT_CACHED("commit-after-delay-checkpoint", NULL); + /* * Ensures the DELAY_CHKPT_IN_COMMIT flag write is globally visible before * commit time is written. @@ -2809,3 +2815,58 @@ LookupGXactBySubid(Oid subid) return found; } + +/* + * TwoPhaseGetXidByLockingProc + * Return the oldest transaction ID from prepared transactions that are + * currently in the commit critical section. + * + * This function only considers transactions in the currently connected + * database. If no matching transactions are found, it returns + * InvalidTransactionId. + */ +TransactionId +TwoPhaseGetOldestXidInCommit(void) +{ + TransactionId oldestRunningXid = InvalidTransactionId; + + LWLockAcquire(TwoPhaseStateLock, LW_SHARED); + + for (int i = 0; i < TwoPhaseState->numPrepXacts; i++) + { + GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; + PGPROC *commitproc; + TransactionId xid; + + if (!gxact->valid) + continue; + + if (gxact->locking_backend == INVALID_PROC_NUMBER) + continue; + + /* + * Get the backend that is handling the transaction. It's safe to + * access this backend while holding TwoPhaseStateLock, as the backend + * can only be destroyed after either removing or unlocking the + * current global transaction, both of which require an exclusive + * TwoPhaseStateLock. + */ + commitproc = GetPGProcByNumber(gxact->locking_backend); + + if (MyDatabaseId != commitproc->databaseId) + continue; + + if ((commitproc->delayChkptFlags & DELAY_CHKPT_IN_COMMIT) == 0) + continue; + + xid = XidFromFullTransactionId(gxact->fxid); + + if (!TransactionIdIsValid(oldestRunningXid) || + TransactionIdPrecedes(xid, oldestRunningXid)) + oldestRunningXid = xid; + } + + LWLockRelease(TwoPhaseStateLock); + + return oldestRunningXid; +} diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index fe895787cb72d..f8c4dada7c93a 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -19,11 +19,11 @@ #include "access/transam.h" #include "access/xact.h" #include "access/xlogutils.h" -#include "commands/dbcommands.h" #include "miscadmin.h" #include "postmaster/autovacuum.h" #include "storage/pmsignal.h" #include "storage/proc.h" +#include "utils/lsyscache.h" #include "utils/syscache.h" diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index b0891998b243f..0baf0ac6160af 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -303,6 +303,11 @@ static bool doPageWrites; * so it's a plain spinlock. The other locks are held longer (potentially * over I/O operations), so we use LWLocks for them. These locks are: * + * WALBufMappingLock: must be held to replace a page in the WAL buffer cache. + * It is only held while initializing and changing the mapping. If the + * contents of the buffer being replaced haven't been written yet, the mapping + * lock is released while the write is done, and reacquired afterwards. + * * WALWriteLock: must be held to write WAL buffers to disk (XLogWrite or * XLogFlush). * @@ -468,37 +473,21 @@ typedef struct XLogCtlData pg_atomic_uint64 logFlushResult; /* last byte + 1 flushed */ /* - * First initialized page in the cache (first byte position). - */ - XLogRecPtr InitializedFrom; - - /* - * Latest reserved for initialization page in the cache (last byte - * position + 1). + * Latest initialized page in the cache (last byte position + 1). * - * To change the identity of a buffer, you need to advance - * InitializeReserved first. To change the identity of a buffer that's + * To change the identity of a buffer (and InitializedUpTo), you need to + * hold WALBufMappingLock. To change the identity of a buffer that's * still dirty, the old page needs to be written out first, and for that * you need WALWriteLock, and you need to ensure that there are no * in-progress insertions to the page by calling * WaitXLogInsertionsToFinish(). */ - pg_atomic_uint64 InitializeReserved; - - /* - * Latest initialized page in the cache (last byte position + 1). - * - * InitializedUpTo is updated after the buffer initialization. After - * update, waiters got notification using InitializedUpToCondVar. - */ - pg_atomic_uint64 InitializedUpTo; - ConditionVariable InitializedUpToCondVar; + XLogRecPtr InitializedUpTo; /* * These values do not change after startup, although the pointed-to pages - * and xlblocks values certainly do. xlblocks values are changed - * lock-free according to the check for the xlog write position and are - * accompanied by changes of InitializeReserved and InitializedUpTo. + * and xlblocks values certainly do. xlblocks values are protected by + * WALBufMappingLock. */ char *pages; /* buffers for unwritten XLOG pages */ pg_atomic_uint64 *xlblocks; /* 1st byte ptr-s + XLOG_BLCKSZ */ @@ -703,7 +692,7 @@ static void InitControlFile(uint64 sysidentifier, uint32 data_checksum_version); static void WriteControlFile(void); static void ReadControlFile(void); static void UpdateControlFile(void); -static char *str_time(pg_time_t tnow); +static char *str_time(pg_time_t tnow, char *buf, size_t bufsize); static int get_sync_bit(int method); @@ -821,9 +810,9 @@ XLogInsertRecord(XLogRecData *rdata, * fullPageWrites from changing until the insertion is finished. * * Step 2 can usually be done completely in parallel. If the required WAL - * page is not initialized yet, you have to go through AdvanceXLInsertBuffer, - * which will ensure it is initialized. But the WAL writer tries to do that - * ahead of insertions to avoid that from happening in the critical path. + * page is not initialized yet, you have to grab WALBufMappingLock to + * initialize it, but the WAL writer tries to do that ahead of insertions + * to avoid that from happening in the critical path. * *---------- */ @@ -2005,79 +1994,32 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic) XLogRecPtr NewPageEndPtr = InvalidXLogRecPtr; XLogRecPtr NewPageBeginPtr; XLogPageHeader NewPage; - XLogRecPtr ReservedPtr; int npages pg_attribute_unused() = 0; - /* - * We must run the loop below inside the critical section as we expect - * XLogCtl->InitializedUpTo to eventually keep up. The most of callers - * already run inside the critical section. Except for WAL writer, which - * passed 'opportunistic == true', and therefore we don't perform - * operations that could error out. - * - * Start an explicit critical section anyway though. - */ - Assert(CritSectionCount > 0 || opportunistic); - START_CRIT_SECTION(); + LWLockAcquire(WALBufMappingLock, LW_EXCLUSIVE); - /*-- - * Loop till we get all the pages in WAL buffer before 'upto' reserved for - * initialization. Multiple process can initialize different buffers with - * this loop in parallel as following. - * - * 1. Reserve page for initialization using XLogCtl->InitializeReserved. - * 2. Initialize the reserved page. - * 3. Attempt to advance XLogCtl->InitializedUpTo, + /* + * Now that we have the lock, check if someone initialized the page + * already. */ - ReservedPtr = pg_atomic_read_u64(&XLogCtl->InitializeReserved); - while (upto >= ReservedPtr || opportunistic) + while (upto >= XLogCtl->InitializedUpTo || opportunistic) { - Assert(ReservedPtr % XLOG_BLCKSZ == 0); + nextidx = XLogRecPtrToBufIdx(XLogCtl->InitializedUpTo); /* - * Get ending-offset of the buffer page we need to replace. - * - * We don't lookup into xlblocks, but rather calculate position we - * must wait to be written. If it was written, xlblocks will have this - * position (or uninitialized) + * Get ending-offset of the buffer page we need to replace (this may + * be zero if the buffer hasn't been used yet). Fall through if it's + * already written out. */ - if (ReservedPtr + XLOG_BLCKSZ > XLogCtl->InitializedFrom + XLOG_BLCKSZ * XLOGbuffers) - OldPageRqstPtr = ReservedPtr + XLOG_BLCKSZ - (XLogRecPtr) XLOG_BLCKSZ * XLOGbuffers; - else - OldPageRqstPtr = InvalidXLogRecPtr; - - if (LogwrtResult.Write < OldPageRqstPtr && opportunistic) + OldPageRqstPtr = pg_atomic_read_u64(&XLogCtl->xlblocks[nextidx]); + if (LogwrtResult.Write < OldPageRqstPtr) { /* - * If we just want to pre-initialize as much as we can without - * flushing, give up now. + * Nope, got work to do. If we just want to pre-initialize as much + * as we can without flushing, give up now. */ - upto = ReservedPtr - 1; - break; - } - - /* - * Attempt to reserve the page for initialization. Failure means that - * this page got reserved by another process. - */ - if (!pg_atomic_compare_exchange_u64(&XLogCtl->InitializeReserved, - &ReservedPtr, - ReservedPtr + XLOG_BLCKSZ)) - continue; - - /* - * Wait till page gets correctly initialized up to OldPageRqstPtr. - */ - nextidx = XLogRecPtrToBufIdx(ReservedPtr); - while (pg_atomic_read_u64(&XLogCtl->InitializedUpTo) < OldPageRqstPtr) - ConditionVariableSleep(&XLogCtl->InitializedUpToCondVar, WAIT_EVENT_WAL_BUFFER_INIT); - ConditionVariableCancelSleep(); - Assert(pg_atomic_read_u64(&XLogCtl->xlblocks[nextidx]) == OldPageRqstPtr); - - /* Fall through if it's already written out. */ - if (LogwrtResult.Write < OldPageRqstPtr) - { - /* Nope, got work to do. */ + if (opportunistic) + break; /* Advance shared memory write request position */ SpinLockAcquire(&XLogCtl->info_lck); @@ -2092,6 +2034,14 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic) RefreshXLogWriteResult(LogwrtResult); if (LogwrtResult.Write < OldPageRqstPtr) { + /* + * Must acquire write lock. Release WALBufMappingLock first, + * to make sure that all insertions that we need to wait for + * can finish (up to this same position). Otherwise we risk + * deadlock. + */ + LWLockRelease(WALBufMappingLock); + WaitXLogInsertionsToFinish(OldPageRqstPtr); LWLockAcquire(WALWriteLock, LW_EXCLUSIVE); @@ -2119,6 +2069,9 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic) */ pgstat_report_fixed = true; } + /* Re-acquire WALBufMappingLock and retry */ + LWLockAcquire(WALBufMappingLock, LW_EXCLUSIVE); + continue; } } @@ -2126,9 +2079,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic) * Now the next buffer slot is free and we can set it up to be the * next output page. */ - NewPageBeginPtr = ReservedPtr; + NewPageBeginPtr = XLogCtl->InitializedUpTo; NewPageEndPtr = NewPageBeginPtr + XLOG_BLCKSZ; + Assert(XLogRecPtrToBufIdx(NewPageBeginPtr) == nextidx); + NewPage = (XLogPageHeader) (XLogCtl->pages + nextidx * (Size) XLOG_BLCKSZ); /* @@ -2192,100 +2147,12 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic) */ pg_write_barrier(); - /*----- - * Update the value of XLogCtl->xlblocks[nextidx] and try to advance - * XLogCtl->InitializedUpTo in a lock-less manner. - * - * First, let's provide a formal proof of the algorithm. Let it be 'n' - * process with the following variables in shared memory: - * f - an array of 'n' boolean flags, - * v - atomic integer variable. - * - * Also, let - * i - a number of a process, - * j - local integer variable, - * CAS(var, oldval, newval) - compare-and-swap atomic operation - * returning true on success, - * write_barrier()/read_barrier() - memory barriers. - * - * The pseudocode for each process is the following. - * - * j := i - * f[i] := true - * write_barrier() - * while CAS(v, j, j + 1): - * j := j + 1 - * read_barrier() - * if not f[j]: - * break - * - * Let's prove that v eventually reaches the value of n. - * 1. Prove by contradiction. Assume v doesn't reach n and stucks - * on k, where k < n. - * 2. Process k attempts CAS(v, k, k + 1). 1). If, as we assumed, v - * gets stuck at k, then this CAS operation must fail. Therefore, - * v < k when process k attempts CAS(v, k, k + 1). - * 3. If, as we assumed, v gets stuck at k, then the value k of v - * must be achieved by some process m, where m < k. The process - * m must observe f[k] == false. Otherwise, it will later attempt - * CAS(v, k, k + 1) with success. - * 4. Therefore, corresponding read_barrier() (while j == k) on - * process m reached before write_barrier() of process k. But then - * process k attempts CAS(v, k, k + 1) after process m successfully - * incremented v to k, and that CAS operation must succeed. - * That leads to a contradiction. So, there is no such k (k < n) - * where v gets stuck. Q.E.D. - * - * To apply this proof to the code below, we assume - * XLogCtl->InitializedUpTo will play the role of v with XLOG_BLCKSZ - * granularity. We also assume setting XLogCtl->xlblocks[nextidx] to - * NewPageEndPtr to play the role of setting f[i] to true. Also, note - * that processes can't concurrently map different xlog locations to - * the same nextidx because we previously requested that - * XLogCtl->InitializedUpTo >= OldPageRqstPtr. So, a xlog buffer can - * be taken for initialization only once the previous initialization - * takes effect on XLogCtl->InitializedUpTo. - */ - pg_atomic_write_u64(&XLogCtl->xlblocks[nextidx], NewPageEndPtr); - - pg_write_barrier(); - - while (pg_atomic_compare_exchange_u64(&XLogCtl->InitializedUpTo, &NewPageBeginPtr, NewPageEndPtr)) - { - NewPageBeginPtr = NewPageEndPtr; - NewPageEndPtr = NewPageBeginPtr + XLOG_BLCKSZ; - nextidx = XLogRecPtrToBufIdx(NewPageBeginPtr); - - pg_read_barrier(); - - if (pg_atomic_read_u64(&XLogCtl->xlblocks[nextidx]) != NewPageEndPtr) - { - /* - * Page at nextidx wasn't initialized yet, so we can't move - * InitializedUpto further. It will be moved by backend which - * will initialize nextidx. - */ - ConditionVariableBroadcast(&XLogCtl->InitializedUpToCondVar); - break; - } - } + XLogCtl->InitializedUpTo = NewPageEndPtr; npages++; } - - END_CRIT_SECTION(); - - /* - * All the pages in WAL buffer before 'upto' were reserved for - * initialization. However, some pages might be reserved by concurrent - * processes. Wait till they finish initialization. - */ - while (upto >= pg_atomic_read_u64(&XLogCtl->InitializedUpTo)) - ConditionVariableSleep(&XLogCtl->InitializedUpToCondVar, WAIT_EVENT_WAL_BUFFER_INIT); - ConditionVariableCancelSleep(); - - pg_read_barrier(); + LWLockRelease(WALBufMappingLock); #ifdef WAL_DEBUG if (XLOG_DEBUG && npages > 0) @@ -4390,7 +4257,7 @@ WriteControlFile(void) ControlFile->toast_max_chunk_size = TOAST_MAX_CHUNK_SIZE; ControlFile->loblksize = LOBLKSIZE; - ControlFile->float8ByVal = FLOAT8PASSBYVAL; + ControlFile->float8ByVal = true; /* vestigial */ /* * Initialize the default 'char' signedness. @@ -4651,23 +4518,7 @@ ReadControlFile(void) "LOBLKSIZE", (int) LOBLKSIZE), errhint("It looks like you need to recompile or initdb."))); -#ifdef USE_FLOAT8_BYVAL - if (ControlFile->float8ByVal != true) - ereport(FATAL, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized without USE_FLOAT8_BYVAL" - " but the server was compiled with USE_FLOAT8_BYVAL."), - errhint("It looks like you need to recompile or initdb."))); -#else - if (ControlFile->float8ByVal != false) - ereport(FATAL, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with USE_FLOAT8_BYVAL" - " but the server was compiled without USE_FLOAT8_BYVAL."), - errhint("It looks like you need to recompile or initdb."))); -#endif + Assert(ControlFile->float8ByVal); /* vestigial, not worth an error msg */ wal_segment_size = ControlFile->xlog_seg_size; @@ -5194,10 +5045,6 @@ XLOGShmemInit(void) pg_atomic_init_u64(&XLogCtl->logWriteResult, InvalidXLogRecPtr); pg_atomic_init_u64(&XLogCtl->logFlushResult, InvalidXLogRecPtr); pg_atomic_init_u64(&XLogCtl->unloggedLSN, InvalidXLogRecPtr); - - pg_atomic_init_u64(&XLogCtl->InitializeReserved, InvalidXLogRecPtr); - pg_atomic_init_u64(&XLogCtl->InitializedUpTo, InvalidXLogRecPtr); - ConditionVariableInit(&XLogCtl->InitializedUpToCondVar); } /* @@ -5371,11 +5218,9 @@ BootStrapXLOG(uint32 data_checksum_version) } static char * -str_time(pg_time_t tnow) +str_time(pg_time_t tnow, char *buf, size_t bufsize) { - char *buf = palloc(128); - - pg_strftime(buf, 128, + pg_strftime(buf, bufsize, "%Y-%m-%d %H:%M:%S %Z", pg_localtime(&tnow, log_timezone)); @@ -5618,6 +5463,7 @@ StartupXLOG(void) XLogRecPtr missingContrecPtr; TransactionId oldestActiveXID; bool promoted = false; + char timebuf[128]; /* * We should have an aux process resource owner to use, and we should not @@ -5646,25 +5492,29 @@ StartupXLOG(void) */ ereport(IsPostmasterEnvironment ? LOG : NOTICE, (errmsg("database system was shut down at %s", - str_time(ControlFile->time)))); + str_time(ControlFile->time, + timebuf, sizeof(timebuf))))); break; case DB_SHUTDOWNED_IN_RECOVERY: ereport(LOG, (errmsg("database system was shut down in recovery at %s", - str_time(ControlFile->time)))); + str_time(ControlFile->time, + timebuf, sizeof(timebuf))))); break; case DB_SHUTDOWNING: ereport(LOG, (errmsg("database system shutdown was interrupted; last known up at %s", - str_time(ControlFile->time)))); + str_time(ControlFile->time, + timebuf, sizeof(timebuf))))); break; case DB_IN_CRASH_RECOVERY: ereport(LOG, (errmsg("database system was interrupted while in recovery at %s", - str_time(ControlFile->time)), + str_time(ControlFile->time, + timebuf, sizeof(timebuf))), errhint("This probably means that some data is corrupted and" " you will have to use the last backup for recovery."))); break; @@ -5672,7 +5522,8 @@ StartupXLOG(void) case DB_IN_ARCHIVE_RECOVERY: ereport(LOG, (errmsg("database system was interrupted while in recovery at log time %s", - str_time(ControlFile->checkPointCopy.time)), + str_time(ControlFile->checkPointCopy.time, + timebuf, sizeof(timebuf))), errhint("If this has occurred more than once some data might be corrupted" " and you might need to choose an earlier recovery target."))); break; @@ -5680,7 +5531,8 @@ StartupXLOG(void) case DB_IN_PRODUCTION: ereport(LOG, (errmsg("database system was interrupted; last known up at %s", - str_time(ControlFile->time)))); + str_time(ControlFile->time, + timebuf, sizeof(timebuf))))); break; default: @@ -6216,8 +6068,7 @@ StartupXLOG(void) memset(page + len, 0, XLOG_BLCKSZ - len); pg_atomic_write_u64(&XLogCtl->xlblocks[firstIdx], endOfRecoveryInfo->lastPageBeginPtr + XLOG_BLCKSZ); - pg_atomic_write_u64(&XLogCtl->InitializedUpTo, endOfRecoveryInfo->lastPageBeginPtr + XLOG_BLCKSZ); - XLogCtl->InitializedFrom = endOfRecoveryInfo->lastPageBeginPtr; + XLogCtl->InitializedUpTo = endOfRecoveryInfo->lastPageBeginPtr + XLOG_BLCKSZ; } else { @@ -6226,10 +6077,8 @@ StartupXLOG(void) * let the first attempt to insert a log record to initialize the next * buffer. */ - pg_atomic_write_u64(&XLogCtl->InitializedUpTo, EndOfLog); - XLogCtl->InitializedFrom = EndOfLog; + XLogCtl->InitializedUpTo = EndOfLog; } - pg_atomic_write_u64(&XLogCtl->InitializeReserved, pg_atomic_read_u64(&XLogCtl->InitializedUpTo)); /* * Update local and shared status. This is OK to do without any locks @@ -6325,6 +6174,12 @@ StartupXLOG(void) */ CompleteCommitTsInitialization(); + /* Clean up EndOfWalRecoveryInfo data to appease Valgrind leak checking */ + if (endOfRecoveryInfo->lastPage) + pfree(endOfRecoveryInfo->lastPage); + pfree(endOfRecoveryInfo->recoveryStopReason); + pfree(endOfRecoveryInfo); + /* * All done with end-of-recovery actions. * @@ -8530,6 +8385,14 @@ xlog_redo(XLogReaderState *record) checkPoint.ThisTimeLineID, replayTLI))); RecoveryRestartPoint(&checkPoint, record); + + /* + * After replaying a checkpoint record, free all smgr objects. + * Otherwise we would never do so for dropped relations, as the + * startup does not process shared invalidation messages or call + * AtEOXact_SMgr(). + */ + smgrdestroyall(); } else if (info == XLOG_CHECKPOINT_ONLINE) { @@ -8583,6 +8446,14 @@ xlog_redo(XLogReaderState *record) checkPoint.ThisTimeLineID, replayTLI))); RecoveryRestartPoint(&checkPoint, record); + + /* + * After replaying a checkpoint record, free all smgr objects. + * Otherwise we would never do so for dropped relations, as the + * startup does not process shared invalidation messages or call + * AtEOXact_SMgr(). + */ + smgrdestroyall(); } else if (info == XLOG_OVERWRITE_CONTRECORD) { @@ -9000,7 +8871,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, * work correctly, it is critical that sessionBackupState is only updated * after this block is over. */ - PG_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, DatumGetBool(true)); + PG_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, BoolGetDatum(true)); { bool gotUniqueStartpoint = false; DIR *tblspcdir; @@ -9239,7 +9110,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, state->starttime = (pg_time_t) time(NULL); } - PG_END_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, DatumGetBool(true)); + PG_END_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, BoolGetDatum(true)); state->started_in_recovery = backup_started_in_recovery; @@ -9579,7 +9450,7 @@ register_persistent_abort_backup_handler(void) if (already_done) return; - before_shmem_exit(do_pg_abort_backup, DatumGetBool(false)); + before_shmem_exit(do_pg_abort_backup, BoolGetDatum(false)); already_done = true; } diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index e8f3ba00caae7..346319338a0ee 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -1626,6 +1626,7 @@ ShutdownWalRecovery(void) close(readFile); readFile = -1; } + pfree(xlogreader->private_data); XLogReaderFree(xlogreader); XLogPrefetcherFree(xlogprefetcher); @@ -4833,10 +4834,10 @@ check_recovery_target_lsn(char **newval, void **extra, GucSource source) { XLogRecPtr lsn; XLogRecPtr *myextra; - bool have_error = false; + ErrorSaveContext escontext = {T_ErrorSaveContext}; - lsn = pg_lsn_in_internal(*newval, &have_error); - if (have_error) + lsn = pg_lsn_in_safe(*newval, (Node *) &escontext); + if (escontext.error_occurred) return false; myextra = (XLogRecPtr *) guc_malloc(LOG, sizeof(XLogRecPtr)); diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 27ea52fdfee66..38176d9688e4c 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -523,7 +523,7 @@ XLogReadBufferExtended(RelFileLocator rlocator, ForkNumber forknum, if (mode == RBM_NORMAL) { /* check that page has been initialized */ - Page page = (Page) BufferGetPage(buffer); + Page page = BufferGetPage(buffer); /* * We assume that PageIsNew is safe without a lock. During recovery, diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c index f0f88838dc21a..bb7d90aa5d963 100644 --- a/src/backend/backup/basebackup.c +++ b/src/backend/backup/basebackup.c @@ -1048,7 +1048,7 @@ SendBaseBackup(BaseBackupCmd *cmd, IncrementalBackupInfo *ib) sink = bbsink_zstd_new(sink, &opt.compression_specification); /* Set up progress reporting. */ - sink = bbsink_progress_new(sink, opt.progress); + sink = bbsink_progress_new(sink, opt.progress, opt.incremental); /* * Perform the base backup, but make sure we clean up the bbsink even if diff --git a/src/backend/backup/basebackup_copy.c b/src/backend/backup/basebackup_copy.c index 18b0b5a52d3f8..eb45d3bcb663b 100644 --- a/src/backend/backup/basebackup_copy.c +++ b/src/backend/backup/basebackup_copy.c @@ -143,7 +143,7 @@ bbsink_copystream_begin_backup(bbsink *sink) buf = palloc(mysink->base.bbs_buffer_length + MAXIMUM_ALIGNOF); mysink->msgbuffer = buf + (MAXIMUM_ALIGNOF - 1); mysink->base.bbs_buffer = buf + MAXIMUM_ALIGNOF; - mysink->msgbuffer[0] = 'd'; /* archive or manifest data */ + mysink->msgbuffer[0] = PqMsg_CopyData; /* archive or manifest data */ /* Tell client the backup start location. */ SendXlogRecPtrResult(state->startptr, state->starttli); @@ -170,7 +170,7 @@ bbsink_copystream_begin_archive(bbsink *sink, const char *archive_name) ti = list_nth(state->tablespaces, state->tablespace_num); pq_beginmessage(&buf, PqMsg_CopyData); - pq_sendbyte(&buf, 'n'); /* New archive */ + pq_sendbyte(&buf, PqBackupMsg_NewArchive); pq_sendstring(&buf, archive_name); pq_sendstring(&buf, ti->path == NULL ? "" : ti->path); pq_endmessage(&buf); @@ -191,7 +191,7 @@ bbsink_copystream_archive_contents(bbsink *sink, size_t len) if (mysink->send_to_client) { /* Add one because we're also sending a leading type byte. */ - pq_putmessage('d', mysink->msgbuffer, len + 1); + pq_putmessage(PqMsg_CopyData, mysink->msgbuffer, len + 1); } /* Consider whether to send a progress report to the client. */ @@ -221,7 +221,7 @@ bbsink_copystream_archive_contents(bbsink *sink, size_t len) mysink->last_progress_report_time = now; pq_beginmessage(&buf, PqMsg_CopyData); - pq_sendbyte(&buf, 'p'); /* Progress report */ + pq_sendbyte(&buf, PqBackupMsg_ProgressReport); pq_sendint64(&buf, state->bytes_done); pq_endmessage(&buf); pq_flush_if_writable(); @@ -247,7 +247,7 @@ bbsink_copystream_end_archive(bbsink *sink) mysink->bytes_done_at_last_time_check = state->bytes_done; mysink->last_progress_report_time = GetCurrentTimestamp(); pq_beginmessage(&buf, PqMsg_CopyData); - pq_sendbyte(&buf, 'p'); /* Progress report */ + pq_sendbyte(&buf, PqBackupMsg_ProgressReport); pq_sendint64(&buf, state->bytes_done); pq_endmessage(&buf); pq_flush_if_writable(); @@ -262,7 +262,7 @@ bbsink_copystream_begin_manifest(bbsink *sink) StringInfoData buf; pq_beginmessage(&buf, PqMsg_CopyData); - pq_sendbyte(&buf, 'm'); /* Manifest */ + pq_sendbyte(&buf, PqBackupMsg_Manifest); pq_endmessage(&buf); } @@ -277,7 +277,7 @@ bbsink_copystream_manifest_contents(bbsink *sink, size_t len) if (mysink->send_to_client) { /* Add one because we're also sending a leading type byte. */ - pq_putmessage('d', mysink->msgbuffer, len + 1); + pq_putmessage(PqMsg_CopyData, mysink->msgbuffer, len + 1); } } diff --git a/src/backend/backup/basebackup_progress.c b/src/backend/backup/basebackup_progress.c index 1d22b541f89af..dac205936229b 100644 --- a/src/backend/backup/basebackup_progress.c +++ b/src/backend/backup/basebackup_progress.c @@ -56,7 +56,7 @@ static const bbsink_ops bbsink_progress_ops = { * forwards data to a successor sink. */ bbsink * -bbsink_progress_new(bbsink *next, bool estimate_backup_size) +bbsink_progress_new(bbsink *next, bool estimate_backup_size, bool incremental) { bbsink *sink; @@ -69,10 +69,15 @@ bbsink_progress_new(bbsink *next, bool estimate_backup_size) /* * Report that a base backup is in progress, and set the total size of the * backup to -1, which will get translated to NULL. If we're estimating - * the backup size, we'll insert the real estimate when we have it. + * the backup size, we'll insert the real estimate when we have it. Also, + * the backup type is set. */ pgstat_progress_start_command(PROGRESS_COMMAND_BASEBACKUP, InvalidOid); pgstat_progress_update_param(PROGRESS_BASEBACKUP_BACKUP_TOTAL, -1); + pgstat_progress_update_param(PROGRESS_BASEBACKUP_BACKUP_TYPE, + incremental + ? PROGRESS_BASEBACKUP_BACKUP_TYPE_INCREMENTAL + : PROGRESS_BASEBACKUP_BACKUP_TYPE_FULL); return sink; } diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 9ca8a88dc9104..cd139bd65a668 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -64,7 +64,6 @@ #include "catalog/pg_proc.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/extension.h" @@ -659,6 +658,20 @@ ExecGrantStmt_oids(InternalGrant *istmt) * objectNamesToOids * * Turn a list of object names of a given type into an Oid list. + * + * XXX This function intentionally takes only an AccessShareLock. In the face + * of concurrent DDL, we might easily latch onto an old version of an object, + * causing the GRANT or REVOKE statement to fail. But it does prevent the + * object from disappearing altogether. To do better, we would need to use a + * self-exclusive lock, perhaps ShareUpdateExclusiveLock, here and before + * *every* CatalogTupleUpdate() of a row that GRANT/REVOKE can affect. + * Besides that additional work, this could have operational costs. For + * example, it would make GRANT ALL TABLES IN SCHEMA terminate every + * autovacuum running in the schema and consume a shared lock table entry per + * table in the schema. The user-visible benefit of that additional work is + * just changing "ERROR: tuple concurrently updated" to blocking. That's not + * nothing, but it might not outweigh autovacuum termination and lock table + * consumption spikes. */ static List * objectNamesToOids(ObjectType objtype, List *objnames, bool is_grant) diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl index df3231fcd41c2..6c02aee726754 100644 --- a/src/backend/catalog/genbki.pl +++ b/src/backend/catalog/genbki.pl @@ -1054,8 +1054,7 @@ sub morph_row_for_schemapg } # Expand booleans from 'f'/'t' to 'false'/'true'. - # Some values might be other macros (eg FLOAT8PASSBYVAL), - # don't change. + # Some values might be other macros, if so don't change. elsif ($atttype eq 'bool') { $row->{$attname} = 'true' if $row->{$attname} eq 't'; diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index d97d632a7ef55..8bd4d6c3d4346 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -41,7 +41,6 @@ #include "catalog/pg_ts_parser.h" #include "catalog/pg_ts_template.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "common/hashfn_unstable.h" #include "funcapi.h" #include "mb/pg_wchar.h" diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index b63fd57dc04bb..91f3018fd0a8c 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -62,7 +62,6 @@ #include "catalog/pg_ts_template.h" #include "catalog/pg_type.h" #include "catalog/pg_user_mapping.h" -#include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/extension.h" @@ -4283,8 +4282,8 @@ pg_identify_object(PG_FUNCTION_ARGS) nspAttnum = get_object_attnum_namespace(address.classId); if (nspAttnum != InvalidAttrNumber) { - schema_oid = heap_getattr(objtup, nspAttnum, - RelationGetDescr(catalog), &isnull); + schema_oid = DatumGetObjectId(heap_getattr(objtup, nspAttnum, + RelationGetDescr(catalog), &isnull)); if (isnull) elog(ERROR, "invalid null namespace in object %u/%u/%d", address.classId, address.objectId, address.objectSubId); diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index a05f8a87c1f83..c62e8acd41375 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -654,7 +654,7 @@ AggregateCreate(const char *aggName, for (i = 0; i < Natts_pg_aggregate; i++) { nulls[i] = false; - values[i] = (Datum) NULL; + values[i] = (Datum) 0; replaces[i] = true; } values[Anum_pg_aggregate_aggfnoid - 1] = ObjectIdGetDatum(procOid); diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index 2d5ac1ea8138b..6002fd0002fc9 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -179,7 +179,7 @@ CreateConstraintEntry(const char *constraintName, for (i = 0; i < Natts_pg_constraint; i++) { nulls[i] = false; - values[i] = (Datum) NULL; + values[i] = (Datum) 0; } conOid = GetNewOidWithIndex(conDesc, ConstraintOidIndexId, diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c index 04cc375caea8c..090f680d1908f 100644 --- a/src/backend/catalog/pg_conversion.c +++ b/src/backend/catalog/pg_conversion.c @@ -87,7 +87,7 @@ ConversionCreate(const char *conname, Oid connamespace, for (i = 0; i < Natts_pg_conversion; i++) { nulls[i] = false; - values[i] = (Datum) NULL; + values[i] = (Datum) 0; } /* form a tuple */ diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c index a1634e58eecdd..da9c2a46cfaa1 100644 --- a/src/backend/catalog/pg_enum.c +++ b/src/backend/catalog/pg_enum.c @@ -110,12 +110,6 @@ EnumValuesCreate(Oid enumTypeOid, List *vals) num_elems = list_length(vals); - /* - * We do not bother to check the list of values for duplicates --- if you - * have any, you'll get a less-than-friendly unique-index violation. It is - * probably not worth trying harder. - */ - pg_enum = table_open(EnumRelationId, RowExclusiveLock); /* @@ -164,6 +158,7 @@ EnumValuesCreate(Oid enumTypeOid, List *vals) { char *lab = strVal(lfirst(lc)); Name enumlabel = palloc0(NAMEDATALEN); + ListCell *lc2; /* * labels are stored in a name field, for easier syscache lookup, so @@ -176,6 +171,24 @@ EnumValuesCreate(Oid enumTypeOid, List *vals) errdetail("Labels must be %d bytes or less.", NAMEDATALEN - 1))); + /* + * Check for duplicate labels. The unique index on pg_enum would catch + * that anyway, but we prefer a friendlier error message. + */ + foreach(lc2, vals) + { + /* Only need to compare lc to earlier entries */ + if (lc2 == lc) + break; + + if (strcmp(lab, strVal(lfirst(lc2))) == 0) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("enum label \"%s\" used more than once", + lab))); + } + + /* OK, construct a tuple for this label */ ExecClearTuple(slot[slotCount]); memset(slot[slotCount]->tts_isnull, false, diff --git a/src/backend/catalog/pg_namespace.c b/src/backend/catalog/pg_namespace.c index 6f5634a4de69b..616bcc7852113 100644 --- a/src/backend/catalog/pg_namespace.c +++ b/src/backend/catalog/pg_namespace.c @@ -76,7 +76,7 @@ NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp) for (i = 0; i < Natts_pg_namespace; i++) { nulls[i] = false; - values[i] = (Datum) NULL; + values[i] = (Datum) 0; } nspoid = GetNewOidWithIndex(nspdesc, NamespaceOidIndexId, diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index bfcfa643464ac..44d2ccb6788e9 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -225,7 +225,7 @@ OperatorShellMake(const char *operatorName, for (i = 0; i < Natts_pg_operator; ++i) { nulls[i] = false; - values[i] = (Datum) NULL; /* redundant, but safe */ + values[i] = (Datum) 0; /* redundant, but safe */ } /* @@ -453,7 +453,7 @@ OperatorCreate(const char *operatorName, for (i = 0; i < Natts_pg_operator; ++i) { - values[i] = (Datum) NULL; + values[i] = (Datum) 0; replaces[i] = true; nulls[i] = false; } diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index 5fdcf24d5f8de..75b17fed15e5b 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -1212,6 +1212,6 @@ oid_array_to_list(Datum datum) deconstruct_array_builtin(array, OIDOID, &values, NULL, &nelems); for (i = 0; i < nelems; i++) - result = lappend_oid(result, values[i]); + result = lappend_oid(result, DatumGetObjectId(values[i])); return result; } diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c index d6f94db5d999b..b911efcf9cb6a 100644 --- a/src/backend/catalog/pg_publication.c +++ b/src/backend/catalog/pg_publication.c @@ -1001,7 +1001,7 @@ GetSchemaPublicationRelations(Oid schemaid, PublicationPartOpt pub_partopt) ScanKeyInit(&key[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber, F_OIDEQ, - schemaid); + ObjectIdGetDatum(schemaid)); /* get all the relations present in the specified schema */ scan = table_beginscan_catalog(classRel, 1, key); diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 536191284e803..16e3e5c7457db 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -47,7 +47,6 @@ #include "catalog/pg_type.h" #include "catalog/pg_user_mapping.h" #include "commands/alter.h" -#include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/policy.h" @@ -61,6 +60,7 @@ #include "storage/lmgr.h" #include "utils/acl.h" #include "utils/fmgroids.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/syscache.h" @@ -956,12 +956,12 @@ copyTemplateDependencies(Oid templateDbId, Oid newDbId) shdep = (Form_pg_shdepend) GETSTRUCT(tup); slot[slot_stored_count]->tts_values[Anum_pg_shdepend_dbid - 1] = ObjectIdGetDatum(newDbId); - slot[slot_stored_count]->tts_values[Anum_pg_shdepend_classid - 1] = shdep->classid; - slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objid - 1] = shdep->objid; - slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objsubid - 1] = shdep->objsubid; - slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refclassid - 1] = shdep->refclassid; - slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refobjid - 1] = shdep->refobjid; - slot[slot_stored_count]->tts_values[Anum_pg_shdepend_deptype - 1] = shdep->deptype; + slot[slot_stored_count]->tts_values[Anum_pg_shdepend_classid - 1] = ObjectIdGetDatum(shdep->classid); + slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objid - 1] = ObjectIdGetDatum(shdep->objid); + slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objsubid - 1] = Int32GetDatum(shdep->objsubid); + slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refclassid - 1] = ObjectIdGetDatum(shdep->refclassid); + slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refobjid - 1] = ObjectIdGetDatum(shdep->refobjid); + slot[slot_stored_count]->tts_values[Anum_pg_shdepend_deptype - 1] = CharGetDatum(shdep->deptype); ExecStoreVirtualTuple(slot[slot_stored_count]); slot_stored_count++; diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c index 63c2992d19f75..b885890de3738 100644 --- a/src/backend/catalog/pg_subscription.c +++ b/src/backend/catalog/pg_subscription.c @@ -104,6 +104,8 @@ GetSubscription(Oid subid, bool missing_ok) sub->runasowner = subform->subrunasowner; sub->failover = subform->subfailover; sub->retaindeadtuples = subform->subretaindeadtuples; + sub->maxretention = subform->submaxretention; + sub->retentionactive = subform->subretentionactive; /* Get conninfo */ datum = SysCacheGetAttrNotNull(SUBSCRIPTIONOID, @@ -320,7 +322,7 @@ AddSubscriptionRelState(Oid subid, Oid relid, char state, */ void UpdateSubscriptionRelState(Oid subid, Oid relid, char state, - XLogRecPtr sublsn) + XLogRecPtr sublsn, bool already_locked) { Relation rel; HeapTuple tup; @@ -328,9 +330,24 @@ UpdateSubscriptionRelState(Oid subid, Oid relid, char state, Datum values[Natts_pg_subscription_rel]; bool replaces[Natts_pg_subscription_rel]; - LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock); + if (already_locked) + { +#ifdef USE_ASSERT_CHECKING + LOCKTAG tag; - rel = table_open(SubscriptionRelRelationId, RowExclusiveLock); + Assert(CheckRelationOidLockedByMe(SubscriptionRelRelationId, + RowExclusiveLock, true)); + SET_LOCKTAG_OBJECT(tag, InvalidOid, SubscriptionRelationId, subid, 0); + Assert(LockHeldByMe(&tag, AccessShareLock, true)); +#endif + + rel = table_open(SubscriptionRelRelationId, NoLock); + } + else + { + LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock); + rel = table_open(SubscriptionRelRelationId, RowExclusiveLock); + } /* Try finding existing mapping. */ tup = SearchSysCacheCopy2(SUBSCRIPTIONRELMAP, @@ -583,3 +600,42 @@ GetSubscriptionRelations(Oid subid, bool not_ready) return res; } + +/* + * Update the dead tuple retention status for the given subscription. + */ +void +UpdateDeadTupleRetentionStatus(Oid subid, bool active) +{ + Relation rel; + bool nulls[Natts_pg_subscription]; + bool replaces[Natts_pg_subscription]; + Datum values[Natts_pg_subscription]; + HeapTuple tup; + + /* Look up the subscription in the catalog */ + rel = table_open(SubscriptionRelationId, RowExclusiveLock); + tup = SearchSysCacheCopy1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid)); + + if (!HeapTupleIsValid(tup)) + elog(ERROR, "cache lookup failed for subscription %u", subid); + + LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock); + + /* Form a new tuple. */ + memset(values, 0, sizeof(values)); + memset(nulls, false, sizeof(nulls)); + memset(replaces, false, sizeof(replaces)); + + /* Set the subscription to disabled. */ + values[Anum_pg_subscription_subretentionactive - 1] = active; + replaces[Anum_pg_subscription_subretentionactive - 1] = true; + + /* Update the catalog */ + tup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, + replaces); + CatalogTupleUpdate(rel, &tup->t_self, tup); + heap_freetuple(tup); + + table_close(rel, NoLock); +} diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c index b36f81afb9d3f..3cd9b69edc575 100644 --- a/src/backend/catalog/pg_type.c +++ b/src/backend/catalog/pg_type.c @@ -80,7 +80,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId) for (i = 0; i < Natts_pg_type; ++i) { nulls[i] = false; - values[i] = (Datum) NULL; /* redundant, but safe */ + values[i] = (Datum) 0; /* redundant, but safe */ } /* @@ -285,8 +285,7 @@ TypeCreate(Oid newTypeOid, errmsg("alignment \"%c\" is invalid for passed-by-value type of size %d", alignment, internalSize))); } -#if SIZEOF_DATUM == 8 - else if (internalSize == (int16) sizeof(Datum)) + else if (internalSize == (int16) sizeof(int64)) { if (alignment != TYPALIGN_DOUBLE) ereport(ERROR, @@ -294,7 +293,6 @@ TypeCreate(Oid newTypeOid, errmsg("alignment \"%c\" is invalid for passed-by-value type of size %d", alignment, internalSize))); } -#endif else ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index 227df90f89c97..c58e9418ac313 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -546,7 +546,7 @@ RelationCopyStorage(SMgrRelation src, SMgrRelation dst, ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid page in block %u of relation %s", + errmsg("invalid page in block %u of relation \"%s\"", blkno, relpath.str))); } @@ -586,7 +586,7 @@ RelFileLocatorSkippingWAL(RelFileLocator rlocator) Size EstimatePendingSyncsSpace(void) { - long entries; + int64 entries; entries = pendingSyncHash ? hash_get_num_entries(pendingSyncHash) : 0; return mul_size(1 + entries, sizeof(RelFileLocator)); diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index f6eca09ee153a..c77fa0234bb7d 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -1327,7 +1327,10 @@ CREATE VIEW pg_stat_progress_basebackup AS CASE S.param2 WHEN -1 THEN NULL ELSE S.param2 END AS backup_total, S.param3 AS backup_streamed, S.param4 AS tablespaces_total, - S.param5 AS tablespaces_streamed + S.param5 AS tablespaces_streamed, + CASE S.param6 WHEN 1 THEN 'full' + WHEN 2 THEN 'incremental' + END AS backup_type FROM pg_stat_get_progress_info('BASEBACKUP') AS S; @@ -1386,8 +1389,8 @@ REVOKE ALL ON pg_subscription FROM public; GRANT SELECT (oid, subdbid, subskiplsn, subname, subowner, subenabled, subbinary, substream, subtwophasestate, subdisableonerr, subpasswordrequired, subrunasowner, subfailover, - subretaindeadtuples, subslotname, subsynccommit, - subpublications, suborigin) + subretaindeadtuples, submaxretention, subretentionactive, + subslotname, subsynccommit, subpublications, suborigin) ON pg_subscription TO public; CREATE VIEW pg_stat_subscription_stats AS @@ -1399,6 +1402,7 @@ CREATE VIEW pg_stat_subscription_stats AS ss.confl_insert_exists, ss.confl_update_origin_differs, ss.confl_update_exists, + ss.confl_update_deleted, ss.confl_update_missing, ss.confl_delete_origin_differs, ss.confl_delete_missing, diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index c801c869c1cfc..cb75e11fced62 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -220,7 +220,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) Assert(!isnull); ownerId = DatumGetObjectId(datum); - if (!has_privs_of_role(GetUserId(), DatumGetObjectId(ownerId))) + if (!has_privs_of_role(GetUserId(), ownerId)) aclcheck_error(ACLCHECK_NOT_OWNER, get_object_type(classId, objectId), old_name); diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 7111d5d5334f2..8ea2913d90632 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -29,7 +29,6 @@ #include "catalog/index.h" #include "catalog/indexing.h" #include "catalog/pg_inherits.h" -#include "commands/dbcommands.h" #include "commands/progress.h" #include "commands/tablecmds.h" #include "commands/vacuum.h" @@ -690,8 +689,8 @@ do_analyze_rel(Relation onerel, const VacuumParams params, * only do it for inherited stats. (We're never called for not-inherited * stats on partitioned tables anyway.) * - * Reset the changes_since_analyze counter only if we analyzed all - * columns; otherwise, there is still work for auto-analyze to do. + * Reset the mod_since_analyze counter only if we analyzed all columns; + * otherwise, there is still work for auto-analyze to do. */ if (!inh) pgstat_report_analyze(onerel, totalrows, totaldeadrows, diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c index f67a8b95d29de..5c783cc61f1d7 100644 --- a/src/backend/commands/comment.c +++ b/src/backend/commands/comment.c @@ -20,10 +20,10 @@ #include "access/table.h" #include "catalog/indexing.h" #include "catalog/objectaddress.h" +#include "catalog/pg_database.h" #include "catalog/pg_description.h" #include "catalog/pg_shdescription.h" #include "commands/comment.h" -#include "commands/dbcommands.h" #include "miscadmin.h" #include "utils/builtins.h" #include "utils/fmgroids.h" diff --git a/src/backend/commands/copyfrom.c b/src/backend/commands/copyfrom.c index fbbbc09a97b17..12781963b4f95 100644 --- a/src/backend/commands/copyfrom.c +++ b/src/backend/commands/copyfrom.c @@ -919,7 +919,7 @@ CopyFrom(CopyFromState cstate) ExecInitResultRelation(estate, resultRelInfo, 1); /* Verify the named relation is a valid target for INSERT */ - CheckValidResultRel(resultRelInfo, CMD_INSERT, NIL); + CheckValidResultRel(resultRelInfo, CMD_INSERT, ONCONFLICT_NONE, NIL); ExecOpenIndices(resultRelInfo, false); diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 502a45163c8ae..2793fd837715d 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -64,6 +64,7 @@ #include "utils/acl.h" #include "utils/builtins.h" #include "utils/fmgroids.h" +#include "utils/lsyscache.h" #include "utils/pg_locale.h" #include "utils/relmapper.h" #include "utils/snapmgr.h" @@ -1052,7 +1053,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) dbctype = src_ctype; if (dblocprovider == '\0') dblocprovider = src_locprovider; - if (dblocale == NULL) + if (dblocale == NULL && dblocprovider == src_locprovider) dblocale = src_locale; if (dbicurules == NULL) dbicurules = src_icurules; @@ -3204,30 +3205,6 @@ get_database_oid(const char *dbname, bool missing_ok) } -/* - * get_database_name - given a database OID, look up the name - * - * Returns a palloc'd string, or NULL if no such database. - */ -char * -get_database_name(Oid dbid) -{ - HeapTuple dbtuple; - char *result; - - dbtuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbid)); - if (HeapTupleIsValid(dbtuple)) - { - result = pstrdup(NameStr(((Form_pg_database) GETSTRUCT(dbtuple))->datname)); - ReleaseSysCache(dbtuple); - } - else - result = NULL; - - return result; -} - - /* * While dropping a database the pg_database row is marked invalid, but the * catalog contents still exist. Connections to such a database are not diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index edc2c988e2934..631fb0525f1e7 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -2021,8 +2021,8 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) elog(ERROR, "cache lookup failed for object %u/%u", addr.classId, addr.objectId); schema_oid = - heap_getattr(objtup, nspAttnum, - RelationGetDescr(catalog), &isnull); + DatumGetObjectId(heap_getattr(objtup, nspAttnum, + RelationGetDescr(catalog), &isnull)); if (isnull) elog(ERROR, "invalid null namespace in object %u/%u/%d", diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 7e2792ead715b..8345bc0264b23 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -3582,6 +3582,7 @@ static void show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es) { Plan *plan = ((PlanState *) mstate)->plan; + Memoize *mplan = (Memoize *) plan; ListCell *lc; List *context; StringInfoData keystr; @@ -3602,7 +3603,7 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es) plan, ancestors); - foreach(lc, ((Memoize *) plan)->param_exprs) + foreach(lc, mplan->param_exprs) { Node *expr = (Node *) lfirst(lc); @@ -3618,6 +3619,24 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es) pfree(keystr.data); + if (es->costs) + { + if (es->format == EXPLAIN_FORMAT_TEXT) + { + ExplainIndentText(es); + appendStringInfo(es->str, "Estimates: capacity=%u distinct keys=%.0f lookups=%.0f hit percent=%.2f%%\n", + mplan->est_entries, mplan->est_unique_keys, + mplan->est_calls, mplan->est_hit_ratio * 100.0); + } + else + { + ExplainPropertyUInteger("Estimated Capacity", NULL, mplan->est_entries, es); + ExplainPropertyFloat("Estimated Distinct Lookup Keys", NULL, mplan->est_unique_keys, 0, es); + ExplainPropertyFloat("Estimated Lookups", NULL, mplan->est_calls, 0, es); + ExplainPropertyFloat("Estimated Hit Percent", NULL, mplan->est_hit_ratio * 100.0, 2, es); + } + } + if (!es->analyze) return; diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index fcd5fcd8915e3..77f8461f42eee 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -1588,7 +1588,7 @@ ImportForeignSchema(ImportForeignSchemaStmt *stmt) pstmt->utilityStmt = (Node *) cstmt; pstmt->stmt_location = rs->stmt_location; pstmt->stmt_len = rs->stmt_len; - pstmt->cached_plan_type = PLAN_CACHE_NONE; + pstmt->planOrigin = PLAN_STMT_INTERNAL; /* Execute statement */ ProcessUtility(pstmt, cmd, false, diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 6f753ab6d7a0d..ca2bde62e82ff 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -38,7 +38,6 @@ #include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" #include "commands/comment.h" -#include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/progress.h" diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 1bf7eaae5b362..3de5687461c85 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -29,7 +29,6 @@ #include "catalog/pg_publication.h" #include "catalog/pg_publication_namespace.h" #include "catalog/pg_publication_rel.h" -#include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/publicationcmds.h" @@ -2113,20 +2112,20 @@ AlterPublicationOwner_oid(Oid pubid, Oid newOwnerId) static char defGetGeneratedColsOption(DefElem *def) { - char *sval; + char *sval = ""; /* - * If no parameter value given, assume "stored" is meant. + * A parameter value is required. */ - if (!def->arg) - return PUBLISH_GENCOLS_STORED; - - sval = defGetString(def); + if (def->arg) + { + sval = defGetString(def); - if (pg_strcasecmp(sval, "none") == 0) - return PUBLISH_GENCOLS_NONE; - if (pg_strcasecmp(sval, "stored") == 0) - return PUBLISH_GENCOLS_STORED; + if (pg_strcasecmp(sval, "none") == 0) + return PUBLISH_GENCOLS_NONE; + if (pg_strcasecmp(sval, "stored") == 0) + return PUBLISH_GENCOLS_STORED; + } ereport(ERROR, errcode(ERRCODE_SYNTAX_ERROR), diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index c00f1a11384f1..3cc1472103a7a 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -25,7 +25,6 @@ #include "catalog/pg_authid.h" #include "catalog/pg_database.h" #include "catalog/pg_namespace.h" -#include "commands/dbcommands.h" #include "commands/event_trigger.h" #include "commands/schemacmds.h" #include "miscadmin.h" @@ -34,6 +33,7 @@ #include "tcop/utility.h" #include "utils/acl.h" #include "utils/builtins.h" +#include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" @@ -215,7 +215,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString, wrapper->utilityStmt = stmt; wrapper->stmt_location = stmt_location; wrapper->stmt_len = stmt_len; - wrapper->cached_plan_type = PLAN_CACHE_NONE; + wrapper->planOrigin = PLAN_STMT_INTERNAL; /* do this step */ ProcessUtility(wrapper, diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 451ae6f7f6940..636d3c3ec737b 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -106,7 +106,9 @@ static Form_pg_sequence_data read_seq_tuple(Relation rel, static void init_params(ParseState *pstate, List *options, bool for_identity, bool isInit, Form_pg_sequence seqform, - Form_pg_sequence_data seqdataform, + int64 *last_value, + bool *reset_state, + bool *is_called, bool *need_seq_rewrite, List **owned_by); static void do_setval(Oid relid, int64 next, bool iscalled); @@ -121,7 +123,9 @@ ObjectAddress DefineSequence(ParseState *pstate, CreateSeqStmt *seq) { FormData_pg_sequence seqform; - FormData_pg_sequence_data seqdataform; + int64 last_value; + bool reset_state; + bool is_called; bool need_seq_rewrite; List *owned_by; CreateStmt *stmt = makeNode(CreateStmt); @@ -164,7 +168,7 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq) /* Check and set all option values */ init_params(pstate, seq->options, seq->for_identity, true, - &seqform, &seqdataform, + &seqform, &last_value, &reset_state, &is_called, &need_seq_rewrite, &owned_by); /* @@ -179,7 +183,7 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq) { case SEQ_COL_LASTVAL: coldef = makeColumnDef("last_value", INT8OID, -1, InvalidOid); - value[i - 1] = Int64GetDatumFast(seqdataform.last_value); + value[i - 1] = Int64GetDatumFast(last_value); break; case SEQ_COL_LOG: coldef = makeColumnDef("log_cnt", INT8OID, -1, InvalidOid); @@ -448,6 +452,9 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt) ObjectAddress address; Relation rel; HeapTuple seqtuple; + bool reset_state = false; + bool is_called; + int64 last_value; HeapTuple newdatatuple; /* Open and lock sequence, and check for ownership along the way. */ @@ -481,12 +488,14 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt) /* copy the existing sequence data tuple, so it can be modified locally */ newdatatuple = heap_copytuple(&datatuple); newdataform = (Form_pg_sequence_data) GETSTRUCT(newdatatuple); + last_value = newdataform->last_value; + is_called = newdataform->is_called; UnlockReleaseBuffer(buf); /* Check and set new values */ init_params(pstate, stmt->options, stmt->for_identity, false, - seqform, newdataform, + seqform, &last_value, &reset_state, &is_called, &need_seq_rewrite, &owned_by); /* If needed, rewrite the sequence relation itself */ @@ -513,6 +522,10 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt) /* * Insert the modified tuple into the new storage file. */ + newdataform->last_value = last_value; + newdataform->is_called = is_called; + if (reset_state) + newdataform->log_cnt = 0; fill_seq_with_data(seqrel, newdatatuple); } @@ -1236,17 +1249,19 @@ read_seq_tuple(Relation rel, Buffer *buf, HeapTuple seqdatatuple) /* * init_params: process the options list of CREATE or ALTER SEQUENCE, and * store the values into appropriate fields of seqform, for changes that go - * into the pg_sequence catalog, and fields of seqdataform for changes to the - * sequence relation itself. Set *need_seq_rewrite to true if we changed any - * parameters that require rewriting the sequence's relation (interesting for - * ALTER SEQUENCE). Also set *owned_by to any OWNED BY option, or to NIL if - * there is none. + * into the pg_sequence catalog, and fields for changes to the sequence + * relation itself (*is_called, *last_value and *reset_state). Set + * *need_seq_rewrite to true if we changed any parameters that require + * rewriting the sequence's relation (interesting for ALTER SEQUENCE). Also + * set *owned_by to any OWNED BY option, or to NIL if there is none. Set + * *reset_state to true if the internal state of the sequence needs to be + * reset, affecting future nextval() calls, for example with WAL logging. * * If isInit is true, fill any unspecified options with default values; * otherwise, do not change existing options that aren't explicitly overridden. * * Note: we force a sequence rewrite whenever we change parameters that affect - * generation of future sequence values, even if the seqdataform per se is not + * generation of future sequence values, even if the metadata per se is not * changed. This allows ALTER SEQUENCE to behave transactionally. Currently, * the only option that doesn't cause that is OWNED BY. It's *necessary* for * ALTER SEQUENCE OWNED BY to not rewrite the sequence, because that would @@ -1257,7 +1272,9 @@ static void init_params(ParseState *pstate, List *options, bool for_identity, bool isInit, Form_pg_sequence seqform, - Form_pg_sequence_data seqdataform, + int64 *last_value, + bool *reset_state, + bool *is_called, bool *need_seq_rewrite, List **owned_by) { @@ -1363,11 +1380,11 @@ init_params(ParseState *pstate, List *options, bool for_identity, } /* - * We must reset log_cnt when isInit or when changing any parameters that - * would affect future nextval allocations. + * We must reset the state of the sequence when isInit or when changing + * any parameters that would affect future nextval allocations. */ if (isInit) - seqdataform->log_cnt = 0; + *reset_state = true; /* AS type */ if (as_type != NULL) @@ -1416,7 +1433,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("INCREMENT must not be zero"))); - seqdataform->log_cnt = 0; + *reset_state = true; } else if (isInit) { @@ -1428,7 +1445,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, { seqform->seqcycle = boolVal(is_cycled->arg); Assert(BoolIsValid(seqform->seqcycle)); - seqdataform->log_cnt = 0; + *reset_state = true; } else if (isInit) { @@ -1439,7 +1456,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, if (max_value != NULL && max_value->arg) { seqform->seqmax = defGetInt64(max_value); - seqdataform->log_cnt = 0; + *reset_state = true; } else if (isInit || max_value != NULL || reset_max_value) { @@ -1455,7 +1472,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, } else seqform->seqmax = -1; /* descending seq */ - seqdataform->log_cnt = 0; + *reset_state = true; } /* Validate maximum value. No need to check INT8 as seqmax is an int64 */ @@ -1471,7 +1488,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, if (min_value != NULL && min_value->arg) { seqform->seqmin = defGetInt64(min_value); - seqdataform->log_cnt = 0; + *reset_state = true; } else if (isInit || min_value != NULL || reset_min_value) { @@ -1487,7 +1504,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, } else seqform->seqmin = 1; /* ascending seq */ - seqdataform->log_cnt = 0; + *reset_state = true; } /* Validate minimum value. No need to check INT8 as seqmin is an int64 */ @@ -1538,30 +1555,30 @@ init_params(ParseState *pstate, List *options, bool for_identity, if (restart_value != NULL) { if (restart_value->arg != NULL) - seqdataform->last_value = defGetInt64(restart_value); + *last_value = defGetInt64(restart_value); else - seqdataform->last_value = seqform->seqstart; - seqdataform->is_called = false; - seqdataform->log_cnt = 0; + *last_value = seqform->seqstart; + *is_called = false; + *reset_state = true; } else if (isInit) { - seqdataform->last_value = seqform->seqstart; - seqdataform->is_called = false; + *last_value = seqform->seqstart; + *is_called = false; } /* crosscheck RESTART (or current value, if changing MIN/MAX) */ - if (seqdataform->last_value < seqform->seqmin) + if (*last_value < seqform->seqmin) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("RESTART value (%" PRId64 ") cannot be less than MINVALUE (%" PRId64 ")", - seqdataform->last_value, + *last_value, seqform->seqmin))); - if (seqdataform->last_value > seqform->seqmax) + if (*last_value > seqform->seqmax) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("RESTART value (%" PRId64 ") cannot be greater than MAXVALUE (%" PRId64 ")", - seqdataform->last_value, + *last_value, seqform->seqmax))); /* CACHE */ @@ -1573,7 +1590,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("CACHE (%" PRId64 ") must be greater than zero", seqform->seqcache))); - seqdataform->log_cnt = 0; + *reset_state = true; } else if (isInit) { @@ -1903,7 +1920,7 @@ seq_redo(XLogReaderState *record) elog(PANIC, "seq_redo: unknown op code %u", info); buffer = XLogInitBufferForRedo(record, 0); - page = (Page) BufferGetPage(buffer); + page = BufferGetPage(buffer); /* * We always reinit the page. However, since this WAL record type is also diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index cd6c3684482f9..750d262fccade 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -30,7 +30,6 @@ #include "catalog/pg_subscription.h" #include "catalog/pg_subscription_rel.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/subscriptioncmds.h" @@ -73,8 +72,9 @@ #define SUBOPT_RUN_AS_OWNER 0x00001000 #define SUBOPT_FAILOVER 0x00002000 #define SUBOPT_RETAIN_DEAD_TUPLES 0x00004000 -#define SUBOPT_LSN 0x00008000 -#define SUBOPT_ORIGIN 0x00010000 +#define SUBOPT_MAX_RETENTION_DURATION 0x00008000 +#define SUBOPT_LSN 0x00010000 +#define SUBOPT_ORIGIN 0x00020000 /* check if the 'val' has 'bits' set */ #define IsSet(val, bits) (((val) & (bits)) == (bits)) @@ -101,6 +101,7 @@ typedef struct SubOpts bool runasowner; bool failover; bool retaindeadtuples; + int32 maxretention; char *origin; XLogRecPtr lsn; } SubOpts; @@ -169,6 +170,8 @@ parse_subscription_options(ParseState *pstate, List *stmt_options, opts->failover = false; if (IsSet(supported_opts, SUBOPT_RETAIN_DEAD_TUPLES)) opts->retaindeadtuples = false; + if (IsSet(supported_opts, SUBOPT_MAX_RETENTION_DURATION)) + opts->maxretention = 0; if (IsSet(supported_opts, SUBOPT_ORIGIN)) opts->origin = pstrdup(LOGICALREP_ORIGIN_ANY); @@ -323,6 +326,15 @@ parse_subscription_options(ParseState *pstate, List *stmt_options, opts->specified_opts |= SUBOPT_RETAIN_DEAD_TUPLES; opts->retaindeadtuples = defGetBoolean(defel); } + else if (IsSet(supported_opts, SUBOPT_MAX_RETENTION_DURATION) && + strcmp(defel->defname, "max_retention_duration") == 0) + { + if (IsSet(opts->specified_opts, SUBOPT_MAX_RETENTION_DURATION)) + errorConflictingDefElem(defel, pstate); + + opts->specified_opts |= SUBOPT_MAX_RETENTION_DURATION; + opts->maxretention = defGetInt32(defel); + } else if (IsSet(supported_opts, SUBOPT_ORIGIN) && strcmp(defel->defname, "origin") == 0) { @@ -580,7 +592,8 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, SUBOPT_STREAMING | SUBOPT_TWOPHASE_COMMIT | SUBOPT_DISABLE_ON_ERR | SUBOPT_PASSWORD_REQUIRED | SUBOPT_RUN_AS_OWNER | SUBOPT_FAILOVER | - SUBOPT_RETAIN_DEAD_TUPLES | SUBOPT_ORIGIN); + SUBOPT_RETAIN_DEAD_TUPLES | + SUBOPT_MAX_RETENTION_DURATION | SUBOPT_ORIGIN); parse_subscription_options(pstate, stmt->options, supported_opts, &opts); /* @@ -638,7 +651,7 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, /* Check if name is used */ subid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid, - MyDatabaseId, CStringGetDatum(stmt->subname)); + ObjectIdGetDatum(MyDatabaseId), CStringGetDatum(stmt->subname)); if (OidIsValid(subid)) { ereport(ERROR, @@ -647,9 +660,13 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, stmt->subname))); } - /* Ensure that we can enable retain_dead_tuples */ - if (opts.retaindeadtuples) - CheckSubDeadTupleRetention(true, !opts.enabled, WARNING); + /* + * Ensure that system configuration paramters are set appropriately to + * support retain_dead_tuples and max_retention_duration. + */ + CheckSubDeadTupleRetention(true, !opts.enabled, WARNING, + opts.retaindeadtuples, opts.retaindeadtuples, + (opts.maxretention > 0)); if (!IsSet(opts.specified_opts, SUBOPT_SLOT_NAME) && opts.slot_name == NULL) @@ -693,6 +710,10 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, values[Anum_pg_subscription_subfailover - 1] = BoolGetDatum(opts.failover); values[Anum_pg_subscription_subretaindeadtuples - 1] = BoolGetDatum(opts.retaindeadtuples); + values[Anum_pg_subscription_submaxretention - 1] = + Int32GetDatum(opts.maxretention); + values[Anum_pg_subscription_subretentionactive - 1] = + Int32GetDatum(opts.retaindeadtuples); values[Anum_pg_subscription_subconninfo - 1] = CStringGetTextDatum(conninfo); if (opts.slot_name) @@ -833,7 +854,17 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, pgstat_create_subscription(subid); - if (opts.enabled) + /* + * Notify the launcher to start the apply worker if the subscription is + * enabled, or to create the conflict detection slot if retain_dead_tuples + * is enabled. + * + * Creating the conflict detection slot is essential even when the + * subscription is not enabled. This ensures that dead tuples are + * retained, which is necessary for accurately identifying the type of + * conflict during replication. + */ + if (opts.enabled || opts.retaindeadtuples) ApplyLauncherWakeupAtCommit(); ObjectAddressSet(myself, SubscriptionRelationId, subid); @@ -1176,6 +1207,8 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, bool update_two_phase = false; bool check_pub_rdt = false; bool retain_dead_tuples; + int max_retention; + bool retention_active; char *origin; Subscription *sub; Form_pg_subscription form; @@ -1185,7 +1218,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, rel = table_open(SubscriptionRelationId, RowExclusiveLock); /* Fetch the existing tuple. */ - tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, MyDatabaseId, + tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, ObjectIdGetDatum(MyDatabaseId), CStringGetDatum(stmt->subname)); if (!HeapTupleIsValid(tup)) @@ -1206,6 +1239,8 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, retain_dead_tuples = sub->retaindeadtuples; origin = sub->origin; + max_retention = sub->maxretention; + retention_active = sub->retentionactive; /* * Don't allow non-superuser modification of a subscription with @@ -1235,7 +1270,9 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, SUBOPT_DISABLE_ON_ERR | SUBOPT_PASSWORD_REQUIRED | SUBOPT_RUN_AS_OWNER | SUBOPT_FAILOVER | - SUBOPT_RETAIN_DEAD_TUPLES | SUBOPT_ORIGIN); + SUBOPT_RETAIN_DEAD_TUPLES | + SUBOPT_MAX_RETENTION_DURATION | + SUBOPT_ORIGIN); parse_subscription_options(pstate, stmt->options, supported_opts, &opts); @@ -1401,6 +1438,29 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, BoolGetDatum(opts.retaindeadtuples); replaces[Anum_pg_subscription_subretaindeadtuples - 1] = true; + /* + * Update the retention status only if there's a change in + * the retain_dead_tuples option value. + * + * Automatically marking retention as active when + * retain_dead_tuples is enabled may not always be ideal, + * especially if retention was previously stopped and the + * user toggles retain_dead_tuples without adjusting the + * publisher workload. However, this behavior provides a + * convenient way for users to manually refresh the + * retention status. Since retention will be stopped again + * unless the publisher workload is reduced, this approach + * is acceptable for now. + */ + if (opts.retaindeadtuples != sub->retaindeadtuples) + { + values[Anum_pg_subscription_subretentionactive - 1] = + BoolGetDatum(opts.retaindeadtuples); + replaces[Anum_pg_subscription_subretentionactive - 1] = true; + + retention_active = opts.retaindeadtuples; + } + CheckAlterSubOption(sub, "retain_dead_tuples", false, isTopLevel); /* @@ -1417,13 +1477,6 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, errmsg("cannot alter retain_dead_tuples when logical replication worker is still running"), errhint("Try again after some time."))); - /* - * Remind the user that enabling subscription will prevent - * the accumulation of dead tuples. - */ - if (opts.retaindeadtuples) - CheckSubDeadTupleRetention(true, !sub->enabled, NOTICE); - /* * Notify the launcher to manage the replication slot for * conflict detection. This ensures that replication slot @@ -1436,6 +1489,27 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, retain_dead_tuples = opts.retaindeadtuples; } + if (IsSet(opts.specified_opts, SUBOPT_MAX_RETENTION_DURATION)) + { + values[Anum_pg_subscription_submaxretention - 1] = + Int32GetDatum(opts.maxretention); + replaces[Anum_pg_subscription_submaxretention - 1] = true; + + max_retention = opts.maxretention; + } + + /* + * Ensure that system configuration paramters are set + * appropriately to support retain_dead_tuples and + * max_retention_duration. + */ + if (IsSet(opts.specified_opts, SUBOPT_RETAIN_DEAD_TUPLES) || + IsSet(opts.specified_opts, SUBOPT_MAX_RETENTION_DURATION)) + CheckSubDeadTupleRetention(true, !sub->enabled, NOTICE, + retain_dead_tuples, + retention_active, + (max_retention > 0)); + if (IsSet(opts.specified_opts, SUBOPT_ORIGIN)) { values[Anum_pg_subscription_suborigin - 1] = @@ -1473,9 +1547,9 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, * subscription in case it was disabled after creation. See * comments atop CheckSubDeadTupleRetention() for details. */ - if (sub->retaindeadtuples) - CheckSubDeadTupleRetention(opts.enabled, !opts.enabled, - WARNING); + CheckSubDeadTupleRetention(opts.enabled, !opts.enabled, + WARNING, sub->retaindeadtuples, + sub->retentionactive, false); values[Anum_pg_subscription_subenabled - 1] = BoolGetDatum(opts.enabled); @@ -1803,12 +1877,14 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) bool must_use_password; /* - * Lock pg_subscription with AccessExclusiveLock to ensure that the - * launcher doesn't restart new worker during dropping the subscription + * The launcher may concurrently start a new worker for this subscription. + * During initialization, the worker checks for subscription validity and + * exits if the subscription has already been dropped. See + * InitializeLogRepWorker. */ - rel = table_open(SubscriptionRelationId, AccessExclusiveLock); + rel = table_open(SubscriptionRelationId, RowExclusiveLock); - tup = SearchSysCache2(SUBSCRIPTIONNAME, MyDatabaseId, + tup = SearchSysCache2(SUBSCRIPTIONNAME, ObjectIdGetDatum(MyDatabaseId), CStringGetDatum(stmt->subname)); if (!HeapTupleIsValid(tup)) @@ -2193,7 +2269,7 @@ AlterSubscriptionOwner(const char *name, Oid newOwnerId) rel = table_open(SubscriptionRelationId, RowExclusiveLock); - tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, MyDatabaseId, + tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, ObjectIdGetDatum(MyDatabaseId), CStringGetDatum(name)); if (!HeapTupleIsValid(tup)) @@ -2466,38 +2542,54 @@ check_pub_dead_tuple_retention(WalReceiverConn *wrconn) * this setting can be adjusted after subscription creation. Without it, the * apply worker will simply skip conflict detection. * - * Issue a WARNING or NOTICE if the subscription is disabled. Do not raise an - * ERROR since users can only modify retain_dead_tuples for disabled - * subscriptions. And as long as the subscription is enabled promptly, it will - * not pose issues. + * Issue a WARNING or NOTICE if the subscription is disabled and the retention + * is active. Do not raise an ERROR since users can only modify + * retain_dead_tuples for disabled subscriptions. And as long as the + * subscription is enabled promptly, it will not pose issues. + * + * Issue a NOTICE to inform users that max_retention_duration is + * ineffective when retain_dead_tuples is disabled for a subscription. An ERROR + * is not issued because setting max_retention_duration causes no harm, + * even when it is ineffective. */ void CheckSubDeadTupleRetention(bool check_guc, bool sub_disabled, - int elevel_for_sub_disabled) + int elevel_for_sub_disabled, + bool retain_dead_tuples, bool retention_active, + bool max_retention_set) { Assert(elevel_for_sub_disabled == NOTICE || elevel_for_sub_disabled == WARNING); - if (check_guc && wal_level < WAL_LEVEL_REPLICA) - ereport(ERROR, - errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("\"wal_level\" is insufficient to create the replication slot required by retain_dead_tuples"), - errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")); - - if (check_guc && !track_commit_timestamp) - ereport(WARNING, - errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("commit timestamp and origin data required for detecting conflicts won't be retained"), - errhint("Consider setting \"%s\" to true.", - "track_commit_timestamp")); - - if (sub_disabled) - ereport(elevel_for_sub_disabled, + if (retain_dead_tuples) + { + if (check_guc && wal_level < WAL_LEVEL_REPLICA) + ereport(ERROR, + errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("\"wal_level\" is insufficient to create the replication slot required by retain_dead_tuples"), + errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")); + + if (check_guc && !track_commit_timestamp) + ereport(WARNING, + errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("commit timestamp and origin data required for detecting conflicts won't be retained"), + errhint("Consider setting \"%s\" to true.", + "track_commit_timestamp")); + + if (sub_disabled && retention_active) + ereport(elevel_for_sub_disabled, + errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("deleted rows to detect conflicts would not be removed until the subscription is enabled"), + (elevel_for_sub_disabled > NOTICE) + ? errhint("Consider setting %s to false.", + "retain_dead_tuples") : 0); + } + else if (max_retention_set) + { + ereport(NOTICE, errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("deleted rows to detect conflicts would not be removed until the subscription is enabled"), - (elevel_for_sub_disabled > NOTICE) - ? errhint("Consider setting %s to false.", - "retain_dead_tuples") : 0); + errmsg("max_retention_duration is ineffective when retain_dead_tuples is disabled")); + } } /* diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index cb811520c2959..3be2e051d32fb 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -42,6 +42,7 @@ #include "catalog/pg_foreign_table.h" #include "catalog/pg_inherits.h" #include "catalog/pg_largeobject.h" +#include "catalog/pg_largeobject_metadata.h" #include "catalog/pg_namespace.h" #include "catalog/pg_opclass.h" #include "catalog/pg_policy.h" @@ -2389,12 +2390,15 @@ truncate_check_rel(Oid relid, Form_pg_class reltuple) /* * Most system catalogs can't be truncated at all, or at least not unless * allow_system_table_mods=on. As an exception, however, we allow - * pg_largeobject to be truncated as part of pg_upgrade, because we need - * to change its relfilenode to match the old cluster, and allowing a - * TRUNCATE command to be executed is the easiest way of doing that. + * pg_largeobject and pg_largeobject_metadata to be truncated as part of + * pg_upgrade, because we need to change its relfilenode to match the old + * cluster, and allowing a TRUNCATE command to be executed is the easiest + * way of doing that. */ if (!allowSystemTableMods && IsSystemClass(relid, reltuple) - && (!IsBinaryUpgrade || relid != LargeObjectRelationId)) + && (!IsBinaryUpgrade || + (relid != LargeObjectRelationId && + relid != LargeObjectMetadataRelationId))) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied: \"%s\" is a system catalog", @@ -8985,7 +8989,7 @@ ATExecSetStatistics(Relation rel, const char *colName, int16 colNum, Node *newVa memset(repl_null, false, sizeof(repl_null)); memset(repl_repl, false, sizeof(repl_repl)); if (!newtarget_default) - repl_val[Anum_pg_attribute_attstattarget - 1] = newtarget; + repl_val[Anum_pg_attribute_attstattarget - 1] = Int16GetDatum(newtarget); else repl_null[Anum_pg_attribute_attstattarget - 1] = true; repl_repl[Anum_pg_attribute_attstattarget - 1] = true; @@ -21750,7 +21754,8 @@ refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, Relation partitionTb errmsg("cannot attach index \"%s\" as a partition of index \"%s\"", RelationGetRelationName(partIdx), RelationGetRelationName(parentIdx)), - errdetail("Another index is already attached for partition \"%s\".", + errdetail("Another index \"%s\" is already attached for partition \"%s\".", + get_rel_name(existingIdx), RelationGetRelationName(partitionTbl)))); } diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 7dc121f73f17e..579ac8d76ae73 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -30,7 +30,6 @@ #include "catalog/pg_proc.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "commands/trigger.h" #include "executor/executor.h" #include "miscadmin.h" @@ -872,7 +871,7 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString, CStringGetDatum(trigname)); values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid); values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype); - values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when; + values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(trigger_fires_when); values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal); values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid); values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid); @@ -2285,6 +2284,8 @@ FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc) { Trigger *trigger = &trigdesc->triggers[i]; + if (!TRIGGER_FOR_ROW(trigger->tgtype)) + continue; if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL) return trigger->tgname; } @@ -2545,6 +2546,15 @@ ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo, { TriggerDesc *trigdesc = relinfo->ri_TrigDesc; + if (relinfo->ri_FdwRoutine && transition_capture && + transition_capture->tcs_insert_new_table) + { + Assert(relinfo->ri_RootResultRelInfo); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot collect transition tuples from child foreign tables"))); + } + if ((trigdesc && trigdesc->trig_insert_after_row) || (transition_capture && transition_capture->tcs_insert_new_table)) AfterTriggerSaveEvent(estate, relinfo, NULL, NULL, @@ -2797,6 +2807,15 @@ ExecARDeleteTriggers(EState *estate, { TriggerDesc *trigdesc = relinfo->ri_TrigDesc; + if (relinfo->ri_FdwRoutine && transition_capture && + transition_capture->tcs_delete_old_table) + { + Assert(relinfo->ri_RootResultRelInfo); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot collect transition tuples from child foreign tables"))); + } + if ((trigdesc && trigdesc->trig_delete_after_row) || (transition_capture && transition_capture->tcs_delete_old_table)) { @@ -3134,6 +3153,16 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, { TriggerDesc *trigdesc = relinfo->ri_TrigDesc; + if (relinfo->ri_FdwRoutine && transition_capture && + (transition_capture->tcs_update_old_table || + transition_capture->tcs_update_new_table)) + { + Assert(relinfo->ri_RootResultRelInfo); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot collect transition tuples from child foreign tables"))); + } + if ((trigdesc && trigdesc->trig_update_after_row) || (transition_capture && (transition_capture->tcs_update_old_table || diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c index ab16d42ad56ba..dc7df736fb826 100644 --- a/src/backend/commands/tsearchcmds.c +++ b/src/backend/commands/tsearchcmds.c @@ -1058,10 +1058,10 @@ DefineTSConfiguration(List *names, List *parameters, ObjectAddress *copied) memset(slot[slot_stored_count]->tts_isnull, false, slot[slot_stored_count]->tts_tupleDescriptor->natts * sizeof(bool)); - slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapcfg - 1] = cfgOid; - slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_maptokentype - 1] = cfgmap->maptokentype; - slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapseqno - 1] = cfgmap->mapseqno; - slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapdict - 1] = cfgmap->mapdict; + slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapcfg - 1] = ObjectIdGetDatum(cfgOid); + slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_maptokentype - 1] = Int32GetDatum(cfgmap->maptokentype); + slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapseqno - 1] = Int32GetDatum(cfgmap->mapseqno); + slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapdict - 1] = ObjectIdGetDatum(cfgmap->mapdict); ExecStoreVirtualTuple(slot[slot_stored_count]); slot_stored_count++; diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 26d985193aea4..c6de04819f174 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -126,7 +126,7 @@ static Oid findTypeSubscriptingFunction(List *procname, Oid typeOid); static Oid findRangeSubOpclass(List *opcname, Oid subtype); static Oid findRangeCanonicalFunction(List *procname, Oid typeOid); static Oid findRangeSubtypeDiffFunction(List *procname, Oid subtype); -static void validateDomainCheckConstraint(Oid domainoid, const char *ccbin); +static void validateDomainCheckConstraint(Oid domainoid, const char *ccbin, LOCKMODE lockmode); static void validateDomainNotNullConstraint(Oid domainoid); static List *get_rels_with_domain(Oid domainOid, LOCKMODE lockmode); static void checkEnumOwner(HeapTuple tup); @@ -2986,7 +2986,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint, * to. */ if (!constr->skip_validation) - validateDomainCheckConstraint(domainoid, ccbin); + validateDomainCheckConstraint(domainoid, ccbin, ShareLock); /* * We must send out an sinval message for the domain, to ensure that @@ -3098,7 +3098,12 @@ AlterDomainValidateConstraint(List *names, const char *constrName) val = SysCacheGetAttrNotNull(CONSTROID, tuple, Anum_pg_constraint_conbin); conbin = TextDatumGetCString(val); - validateDomainCheckConstraint(domainoid, conbin); + /* + * Locking related relations with ShareUpdateExclusiveLock is ok because + * not-yet-valid constraints are still enforced against concurrent inserts + * or updates. + */ + validateDomainCheckConstraint(domainoid, conbin, ShareUpdateExclusiveLock); /* * Now update the catalog, while we have the door open. @@ -3191,9 +3196,16 @@ validateDomainNotNullConstraint(Oid domainoid) /* * Verify that all columns currently using the domain satisfy the given check * constraint expression. + * + * It is used to validate existing constraints and to add newly created check + * constraints to a domain. + * + * The lockmode is used for relations using the domain. It should be + * ShareLock when adding a new constraint to domain. It can be + * ShareUpdateExclusiveLock when validating an existing constraint. */ static void -validateDomainCheckConstraint(Oid domainoid, const char *ccbin) +validateDomainCheckConstraint(Oid domainoid, const char *ccbin, LOCKMODE lockmode) { Expr *expr = (Expr *) stringToNode(ccbin); List *rels; @@ -3210,9 +3222,7 @@ validateDomainCheckConstraint(Oid domainoid, const char *ccbin) exprstate = ExecPrepareExpr(expr, estate); /* Fetch relation list with attributes based on this domain */ - /* ShareLock is sufficient to prevent concurrent data changes */ - - rels = get_rels_with_domain(domainoid, ShareLock); + rels = get_rels_with_domain(domainoid, lockmode); foreach(rt, rels) { diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 0d638e29d0066..1e3d4ab0e20e7 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -1924,7 +1924,7 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, */ if ((popt->specified & GRANT_ROLE_SPECIFIED_INHERIT) != 0) new_record[Anum_pg_auth_members_inherit_option - 1] = - popt->inherit; + BoolGetDatum(popt->inherit); else { HeapTuple mrtup; @@ -1935,14 +1935,14 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, elog(ERROR, "cache lookup failed for role %u", memberid); mrform = (Form_pg_authid) GETSTRUCT(mrtup); new_record[Anum_pg_auth_members_inherit_option - 1] = - mrform->rolinherit; + BoolGetDatum(mrform->rolinherit); ReleaseSysCache(mrtup); } /* get an OID for the new row and insert it */ objectId = GetNewOidWithIndex(pg_authmem_rel, AuthMemOidIndexId, Anum_pg_auth_members_oid); - new_record[Anum_pg_auth_members_oid - 1] = objectId; + new_record[Anum_pg_auth_members_oid - 1] = ObjectIdGetDatum(objectId); tuple = heap_form_tuple(pg_authmem_dsc, new_record, new_record_nulls); CatalogTupleInsert(pg_authmem_rel, tuple); diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index 1a37737d4a235..0e1a74976f7d3 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -2815,7 +2815,7 @@ ExecJustHashVarImpl(ExprState *state, TupleTableSlot *slot, bool *isnull) *isnull = false; if (!fcinfo->args[0].isnull) - return DatumGetUInt32(hashop->d.hashdatum.fn_addr(fcinfo)); + return hashop->d.hashdatum.fn_addr(fcinfo); else return (Datum) 0; } @@ -2849,7 +2849,7 @@ ExecJustHashVarVirtImpl(ExprState *state, TupleTableSlot *slot, bool *isnull) *isnull = false; if (!fcinfo->args[0].isnull) - return DatumGetUInt32(hashop->d.hashdatum.fn_addr(fcinfo)); + return hashop->d.hashdatum.fn_addr(fcinfo); else return (Datum) 0; } @@ -2892,7 +2892,7 @@ ExecJustHashOuterVarStrict(ExprState *state, ExprContext *econtext, if (!fcinfo->args[0].isnull) { *isnull = false; - return DatumGetUInt32(hashop->d.hashdatum.fn_addr(fcinfo)); + return hashop->d.hashdatum.fn_addr(fcinfo); } else { @@ -4393,7 +4393,7 @@ ExecEvalHashedScalarArrayOp(ExprState *state, ExprEvalStep *op, ExprContext *eco * is the equality function and we need not-equals. */ if (!inclause) - result = !result; + result = BoolGetDatum(!DatumGetBool(result)); } } diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index b540074935386..75087204f0c69 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -156,6 +156,12 @@ execTuplesHashPrepare(int numCols, * * Note that the keyColIdx, hashfunctions, and collations arrays must be * allocated in storage that will live as long as the hashtable does. + * + * LookupTupleHashEntry, FindTupleHashEntry, and related functions may leak + * memory in the tempcxt. It is caller's responsibility to reset that context + * reasonably often, typically once per tuple. (We do it that way, rather + * than managing an extra context within the hashtable, because in many cases + * the caller can specify a tempcxt that it needs to reset per-tuple anyway.) */ TupleHashTable BuildTupleHashTable(PlanState *parent, diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 0391798dd2c33..ff12e2e136438 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -84,7 +84,6 @@ static void ExecutePlan(QueryDesc *queryDesc, uint64 numberTuples, ScanDirection direction, DestReceiver *dest); -static bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo); static bool ExecCheckPermissionsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols, AclMode requiredPerms); @@ -643,7 +642,7 @@ ExecCheckPermissions(List *rangeTable, List *rteperminfos, * ExecCheckOneRelPerms * Check access permissions for a single relation. */ -static bool +bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo) { AclMode requiredPerms; @@ -1037,6 +1036,9 @@ InitPlan(QueryDesc *queryDesc, int eflags) * Generally the parser and/or planner should have noticed any such mistake * already, but let's make sure. * + * For INSERT ON CONFLICT, the result relation is required to support the + * onConflictAction, regardless of whether a conflict actually occurs. + * * For MERGE, mergeActions is the list of actions that may be performed. The * result relation is required to support every action, regardless of whether * or not they are all executed. @@ -1046,7 +1048,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) */ void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation, - List *mergeActions) + OnConflictAction onConflictAction, List *mergeActions) { Relation resultRel = resultRelInfo->ri_RelationDesc; FdwRoutine *fdwroutine; @@ -1059,7 +1061,23 @@ CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation, { case RELKIND_RELATION: case RELKIND_PARTITIONED_TABLE: - CheckCmdReplicaIdentity(resultRel, operation); + + /* + * For MERGE, check that the target relation supports each action. + * For other operations, just check the operation itself. + */ + if (operation == CMD_MERGE) + foreach_node(MergeAction, action, mergeActions) + CheckCmdReplicaIdentity(resultRel, action->commandType); + else + CheckCmdReplicaIdentity(resultRel, operation); + + /* + * For INSERT ON CONFLICT DO UPDATE, additionally check that the + * target relation supports UPDATE. + */ + if (onConflictAction == ONCONFLICT_UPDATE) + CheckCmdReplicaIdentity(resultRel, CMD_UPDATE); break; case RELKIND_SEQUENCE: ereport(ERROR, diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index fc76f22fb8238..f098a5557cf07 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -189,7 +189,7 @@ ExecSerializePlan(Plan *plan, EState *estate) pstmt->permInfos = estate->es_rteperminfos; pstmt->resultRelations = NIL; pstmt->appendRelations = NIL; - pstmt->cached_plan_type = PLAN_CACHE_NONE; + pstmt->planOrigin = PLAN_STMT_INTERNAL; /* * Transfer only parallel-safe subplans, leaving a NULL "hole" in the list diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index 514eae1037dc3..1f2da072632e3 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -360,8 +360,12 @@ ExecFindPartition(ModifyTableState *mtstate, true, false); if (rri) { + ModifyTable *node = (ModifyTable *) mtstate->ps.plan; + /* Verify this ResultRelInfo allows INSERTs */ - CheckValidResultRel(rri, CMD_INSERT, NIL); + CheckValidResultRel(rri, CMD_INSERT, + node ? node->onConflictAction : ONCONFLICT_NONE, + NIL); /* * Initialize information needed to insert this and @@ -527,7 +531,8 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, * partition-key becomes a DELETE+INSERT operation, so this check is still * required when the operation is CMD_UPDATE. */ - CheckValidResultRel(leaf_part_rri, CMD_INSERT, NIL); + CheckValidResultRel(leaf_part_rri, CMD_INSERT, + node ? node->onConflictAction : ONCONFLICT_NONE, NIL); /* * Open partition indices. The user may have asked to check for conflicts diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index f262e7a66f771..b409d4ecbf525 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -14,12 +14,14 @@ #include "postgres.h" +#include "access/commit_ts.h" #include "access/genam.h" #include "access/gist.h" #include "access/relscan.h" #include "access/tableam.h" #include "access/transam.h" #include "access/xact.h" +#include "access/heapam.h" #include "catalog/pg_am_d.h" #include "commands/trigger.h" #include "executor/executor.h" @@ -36,7 +38,7 @@ static bool tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2, - TypeCacheEntry **eq); + TypeCacheEntry **eq, Bitmapset *columns); /* * Setup a ScanKey for a search in the relation 'rel' for a tuple 'key' that @@ -221,7 +223,7 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid, if (eq == NULL) eq = palloc0(sizeof(*eq) * outslot->tts_tupleDescriptor->natts); - if (!tuples_equal(outslot, searchslot, eq)) + if (!tuples_equal(outslot, searchslot, eq, NULL)) continue; } @@ -277,10 +279,13 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid, /* * Compare the tuples in the slots by checking if they have equal values. + * + * If 'columns' is not null, only the columns specified within it will be + * considered for the equality check, ignoring all other columns. */ static bool tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2, - TypeCacheEntry **eq) + TypeCacheEntry **eq, Bitmapset *columns) { int attrnum; @@ -305,6 +310,14 @@ tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2, if (att->attisdropped || att->attgenerated) continue; + /* + * Ignore columns that are not listed for checking. + */ + if (columns && + !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, + columns)) + continue; + /* * If one value is NULL and other is not, then they are certainly not * equal @@ -380,7 +393,7 @@ RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, /* Try to find the tuple */ while (table_scan_getnextslot(scan, ForwardScanDirection, scanslot)) { - if (!tuples_equal(scanslot, searchslot, eq)) + if (!tuples_equal(scanslot, searchslot, eq, NULL)) continue; found = true; @@ -455,6 +468,236 @@ BuildConflictIndexInfo(ResultRelInfo *resultRelInfo, Oid conflictindex) } } +/* + * If the tuple is recently dead and was deleted by a transaction with a newer + * commit timestamp than previously recorded, update the associated transaction + * ID, commit time, and origin. This helps ensure that conflict detection uses + * the most recent and relevant deletion metadata. + */ +static void +update_most_recent_deletion_info(TupleTableSlot *scanslot, + TransactionId oldestxmin, + TransactionId *delete_xid, + TimestampTz *delete_time, + RepOriginId *delete_origin) +{ + BufferHeapTupleTableSlot *hslot; + HeapTuple tuple; + Buffer buf; + bool recently_dead = false; + TransactionId xmax; + TimestampTz localts; + RepOriginId localorigin; + + hslot = (BufferHeapTupleTableSlot *) scanslot; + + tuple = ExecFetchSlotHeapTuple(scanslot, false, NULL); + buf = hslot->buffer; + + LockBuffer(buf, BUFFER_LOCK_SHARE); + + /* + * We do not consider HEAPTUPLE_DEAD status because it indicates either + * tuples whose inserting transaction was aborted (meaning there is no + * commit timestamp or origin), or tuples deleted by a transaction older + * than oldestxmin, making it safe to ignore them during conflict + * detection (See comments atop worker.c for details). + */ + if (HeapTupleSatisfiesVacuum(tuple, oldestxmin, buf) == HEAPTUPLE_RECENTLY_DEAD) + recently_dead = true; + + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + + if (!recently_dead) + return; + + xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data); + if (!TransactionIdIsValid(xmax)) + return; + + /* Select the dead tuple with the most recent commit timestamp */ + if (TransactionIdGetCommitTsData(xmax, &localts, &localorigin) && + TimestampDifferenceExceeds(*delete_time, localts, 0)) + { + *delete_xid = xmax; + *delete_time = localts; + *delete_origin = localorigin; + } +} + +/* + * Searches the relation 'rel' for the most recently deleted tuple that matches + * the values in 'searchslot' and is not yet removable by VACUUM. The function + * returns the transaction ID, origin, and commit timestamp of the transaction + * that deleted this tuple. + * + * 'oldestxmin' acts as a cutoff transaction ID. Tuples deleted by transactions + * with IDs >= 'oldestxmin' are considered recently dead and are eligible for + * conflict detection. + * + * Instead of stopping at the first match, we scan all matching dead tuples to + * identify most recent deletion. This is crucial because only the latest + * deletion is relevant for resolving conflicts. + * + * For example, consider a scenario on the subscriber where a row is deleted, + * re-inserted, and then deleted again only on the subscriber: + * + * - (pk, 1) - deleted at 9:00, + * - (pk, 1) - deleted at 9:02, + * + * Now, a remote update arrives: (pk, 1) -> (pk, 2), timestamped at 9:01. + * + * If we mistakenly return the older deletion (9:00), the system may wrongly + * apply the remote update using a last-update-wins strategy. Instead, we must + * recognize the more recent deletion at 9:02 and skip the update. See + * comments atop worker.c for details. Note, as of now, conflict resolution + * is not implemented. Consequently, the system may incorrectly report the + * older tuple as the conflicted one, leading to misleading results. + * + * The commit timestamp of the deleting transaction is used to determine which + * tuple was deleted most recently. + */ +bool +RelationFindDeletedTupleInfoSeq(Relation rel, TupleTableSlot *searchslot, + TransactionId oldestxmin, + TransactionId *delete_xid, + RepOriginId *delete_origin, + TimestampTz *delete_time) +{ + TupleTableSlot *scanslot; + TableScanDesc scan; + TypeCacheEntry **eq; + Bitmapset *indexbitmap; + TupleDesc desc PG_USED_FOR_ASSERTS_ONLY = RelationGetDescr(rel); + + Assert(equalTupleDescs(desc, searchslot->tts_tupleDescriptor)); + + *delete_xid = InvalidTransactionId; + *delete_origin = InvalidRepOriginId; + *delete_time = 0; + + /* + * If the relation has a replica identity key or a primary key that is + * unusable for locating deleted tuples (see + * IsIndexUsableForFindingDeletedTuple), a full table scan becomes + * necessary. In such cases, comparing the entire tuple is not required, + * since the remote tuple might not include all column values. Instead, + * the indexed columns alone are sufficient to identify the target tuple + * (see logicalrep_rel_mark_updatable). + */ + indexbitmap = RelationGetIndexAttrBitmap(rel, + INDEX_ATTR_BITMAP_IDENTITY_KEY); + + /* fallback to PK if no replica identity */ + if (!indexbitmap) + indexbitmap = RelationGetIndexAttrBitmap(rel, + INDEX_ATTR_BITMAP_PRIMARY_KEY); + + eq = palloc0(sizeof(*eq) * searchslot->tts_tupleDescriptor->natts); + + /* + * Start a heap scan using SnapshotAny to identify dead tuples that are + * not visible under a standard MVCC snapshot. Tuples from transactions + * not yet committed or those just committed prior to the scan are + * excluded in update_most_recent_deletion_info(). + */ + scan = table_beginscan(rel, SnapshotAny, 0, NULL); + scanslot = table_slot_create(rel, NULL); + + table_rescan(scan, NULL); + + /* Try to find the tuple */ + while (table_scan_getnextslot(scan, ForwardScanDirection, scanslot)) + { + if (!tuples_equal(scanslot, searchslot, eq, indexbitmap)) + continue; + + update_most_recent_deletion_info(scanslot, oldestxmin, delete_xid, + delete_time, delete_origin); + } + + table_endscan(scan); + ExecDropSingleTupleTableSlot(scanslot); + + return *delete_time != 0; +} + +/* + * Similar to RelationFindDeletedTupleInfoSeq() but using index scan to locate + * the deleted tuple. + */ +bool +RelationFindDeletedTupleInfoByIndex(Relation rel, Oid idxoid, + TupleTableSlot *searchslot, + TransactionId oldestxmin, + TransactionId *delete_xid, + RepOriginId *delete_origin, + TimestampTz *delete_time) +{ + Relation idxrel; + ScanKeyData skey[INDEX_MAX_KEYS]; + int skey_attoff; + IndexScanDesc scan; + TupleTableSlot *scanslot; + TypeCacheEntry **eq = NULL; + bool isIdxSafeToSkipDuplicates; + TupleDesc desc PG_USED_FOR_ASSERTS_ONLY = RelationGetDescr(rel); + + Assert(equalTupleDescs(desc, searchslot->tts_tupleDescriptor)); + Assert(OidIsValid(idxoid)); + + *delete_xid = InvalidTransactionId; + *delete_time = 0; + *delete_origin = InvalidRepOriginId; + + isIdxSafeToSkipDuplicates = (GetRelationIdentityOrPK(rel) == idxoid); + + scanslot = table_slot_create(rel, NULL); + + idxrel = index_open(idxoid, RowExclusiveLock); + + /* Build scan key. */ + skey_attoff = build_replindex_scan_key(skey, rel, idxrel, searchslot); + + /* + * Start an index scan using SnapshotAny to identify dead tuples that are + * not visible under a standard MVCC snapshot. Tuples from transactions + * not yet committed or those just committed prior to the scan are + * excluded in update_most_recent_deletion_info(). + */ + scan = index_beginscan(rel, idxrel, SnapshotAny, NULL, skey_attoff, 0); + + index_rescan(scan, skey, skey_attoff, NULL, 0); + + /* Try to find the tuple */ + while (index_getnext_slot(scan, ForwardScanDirection, scanslot)) + { + /* + * Avoid expensive equality check if the index is primary key or + * replica identity index. + */ + if (!isIdxSafeToSkipDuplicates) + { + if (eq == NULL) + eq = palloc0(sizeof(*eq) * scanslot->tts_tupleDescriptor->natts); + + if (!tuples_equal(scanslot, searchslot, eq, NULL)) + continue; + } + + update_most_recent_deletion_info(scanslot, oldestxmin, delete_xid, + delete_time, delete_origin); + } + + index_endscan(scan); + + index_close(idxrel, NoLock); + + ExecDropSingleTupleTableSlot(scanslot); + + return *delete_time != 0; +} + /* * Find the tuple that violates the passed unique index (conflictindex). * @@ -609,10 +852,10 @@ ExecSimpleRelationInsert(ResultRelInfo *resultRelInfo, conflictindexes, false); /* - * Checks the conflict indexes to fetch the conflicting local tuple - * and reports the conflict. We perform this check here, instead of + * Checks the conflict indexes to fetch the conflicting local row and + * reports the conflict. We perform this check here, instead of * performing an additional index scan before the actual insertion and - * reporting the conflict if any conflicting tuples are found. This is + * reporting the conflict if any conflicting rows are found. This is * to avoid the overhead of executing the extra scan for each INSERT * operation, even when no conflict arises, which could introduce * significant overhead to replication, particularly in cases where diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 359aafea681b9..630d708d2a3f0 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -143,6 +143,7 @@ typedef struct SQLFunctionCache { SQLFunctionHashEntry *func; /* associated SQLFunctionHashEntry */ + bool active; /* are we executing this cache entry? */ bool lazyEvalOK; /* true if lazyEval is safe */ bool shutdown_reg; /* true if registered shutdown callback */ bool lazyEval; /* true if using lazyEval for result query */ @@ -556,6 +557,28 @@ init_sql_fcache(FunctionCallInfo fcinfo, bool lazyEvalOK) finfo->fn_extra = fcache; } + /* + * If the SQLFunctionCache is marked as active, we must have errored out + * of a prior execution. Reset state. (It might seem that we could also + * reach this during recursive invocation of a SQL function, but we won't + * because that case won't involve re-use of the same FmgrInfo.) + */ + if (fcache->active) + { + /* + * In general, this stanza should clear all the same fields that + * ShutdownSQLFunction would. Note we must clear fcache->cplan + * without doing ReleaseCachedPlan, because error cleanup from the + * prior execution would have taken care of releasing that plan. + * Likewise, if tstore is still set then it is pointing at garbage. + */ + fcache->cplan = NULL; + fcache->eslist = NULL; + fcache->tstore = NULL; + fcache->shutdown_reg = false; + fcache->active = false; + } + /* * If we are resuming execution of a set-returning function, just keep * using the same cache. We do not ask funccache.c to re-validate the @@ -1597,6 +1620,9 @@ fmgr_sql(PG_FUNCTION_ARGS) */ fcache = init_sql_fcache(fcinfo, lazyEvalOK); + /* Mark fcache as active */ + fcache->active = true; + /* Remember info that we might need later to construct tuplestore */ fcache->tscontext = tscontext; fcache->randomAccess = randomAccess; @@ -1853,6 +1879,9 @@ fmgr_sql(PG_FUNCTION_ARGS) if (es == NULL) fcache->eslist = NULL; + /* Mark fcache as inactive */ + fcache->active = false; + error_context_stack = sqlerrcontext.previous; return result; @@ -2454,7 +2483,7 @@ check_sql_stmt_retval(List *queryTreeList, rte = makeNode(RangeTblEntry); rte->rtekind = RTE_SUBQUERY; rte->subquery = parse; - rte->eref = rte->alias = makeAlias("*SELECT*", colnames); + rte->eref = makeAlias("unnamed_subquery", colnames); rte->lateral = false; rte->inh = false; rte->inFromCl = true; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 377e016d73225..a4f3d30f307cc 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -267,7 +267,6 @@ #include "utils/acl.h" #include "utils/builtins.h" #include "utils/datum.h" -#include "utils/dynahash.h" #include "utils/expandeddatum.h" #include "utils/injection_point.h" #include "utils/logtape.h" @@ -2115,7 +2114,7 @@ hash_choose_num_partitions(double input_groups, double hashentrysize, npartitions = (int) dpartitions; /* ceil(log2(npartitions)) */ - partition_bits = my_log2(npartitions); + partition_bits = pg_ceil_log2_32(npartitions); /* make sure that we don't exhaust the hash bits */ if (partition_bits + used_bits >= 32) diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 8d2201ab67fa5..a3415db4e20f5 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -36,7 +36,6 @@ #include "executor/nodeHashjoin.h" #include "miscadmin.h" #include "port/pg_bitutils.h" -#include "utils/dynahash.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/syscache.h" @@ -340,7 +339,7 @@ MultiExecParallelHash(HashState *node) */ hashtable->curbatch = -1; hashtable->nbuckets = pstate->nbuckets; - hashtable->log2_nbuckets = my_log2(hashtable->nbuckets); + hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets); hashtable->totalTuples = pstate->total_tuples; /* @@ -480,7 +479,7 @@ ExecHashTableCreate(HashState *state) &nbuckets, &nbatch, &num_skew_mcvs); /* nbuckets must be a power of 2 */ - log2_nbuckets = my_log2(nbuckets); + log2_nbuckets = pg_ceil_log2_32(nbuckets); Assert(nbuckets == (1 << log2_nbuckets)); /* @@ -3499,7 +3498,7 @@ ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno) dsa_get_address(hashtable->area, hashtable->batches[batchno].shared->buckets); hashtable->nbuckets = hashtable->parallel_state->nbuckets; - hashtable->log2_nbuckets = my_log2(hashtable->nbuckets); + hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets); hashtable->current_chunk = NULL; hashtable->current_chunk_shared = InvalidDsaPointer; hashtable->batches[batchno].at_least_one_chunk = false; diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 7c6c2c1f6e42a..4c5647ac38a1c 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -3402,7 +3402,7 @@ ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, * the tuple moved, and setting our current * resultRelInfo to that. */ - if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid)) + if (ItemPointerIndicatesMovedPartitions(tupleid)) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("tuple to be merged was already moved to another partition due to concurrent update"))); @@ -3450,12 +3450,13 @@ ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo, if (ItemPointerIsValid(&lockedtid)) UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid, InplaceUpdateTupleLock); - LockTuple(resultRelInfo->ri_RelationDesc, &context->tmfd.ctid, + LockTuple(resultRelInfo->ri_RelationDesc, tupleid, InplaceUpdateTupleLock); - lockedtid = context->tmfd.ctid; + lockedtid = *tupleid; } + if (!table_tuple_fetch_row_version(resultRelationDesc, - &context->tmfd.ctid, + tupleid, SnapshotAny, resultRelInfo->ri_oldTupleSlot)) elog(ERROR, "failed to fetch the target tuple"); @@ -4811,7 +4812,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * Verify result relation is a valid target for the current operation */ - CheckValidResultRel(resultRelInfo, operation, mergeActions); + CheckValidResultRel(resultRelInfo, operation, node->onConflictAction, + mergeActions); resultRelInfo++; i++; diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index ed35c58c2c346..94047d29430d6 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -131,8 +131,12 @@ ExecSeqScanWithQual(PlanState *pstate) { SeqScanState *node = castNode(SeqScanState, pstate); + /* + * Use pg_assume() for != NULL tests to make the compiler realize no + * runtime check for the field is needed in ExecScanExtended(). + */ Assert(pstate->state->es_epq_active == NULL); - Assert(pstate->qual != NULL); + pg_assume(pstate->qual != NULL); Assert(pstate->ps_ProjInfo == NULL); return ExecScanExtended(&node->ss, @@ -153,7 +157,7 @@ ExecSeqScanWithProject(PlanState *pstate) Assert(pstate->state->es_epq_active == NULL); Assert(pstate->qual == NULL); - Assert(pstate->ps_ProjInfo != NULL); + pg_assume(pstate->ps_ProjInfo != NULL); return ExecScanExtended(&node->ss, (ExecScanAccessMtd) SeqNext, @@ -173,8 +177,8 @@ ExecSeqScanWithQualProject(PlanState *pstate) SeqScanState *node = castNode(SeqScanState, pstate); Assert(pstate->state->es_epq_active == NULL); - Assert(pstate->qual != NULL); - Assert(pstate->ps_ProjInfo != NULL); + pg_assume(pstate->qual != NULL); + pg_assume(pstate->ps_ProjInfo != NULL); return ExecScanExtended(&node->ss, (ExecScanAccessMtd) SeqNext, diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index f7f6fc2da0b95..53fb56f7388e8 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -102,6 +102,7 @@ ExecHashSubPlan(SubPlanState *node, ExprContext *econtext, bool *isNull) { + bool result = false; SubPlan *subplan = node->subplan; PlanState *planstate = node->planstate; TupleTableSlot *slot; @@ -132,14 +133,6 @@ ExecHashSubPlan(SubPlanState *node, node->projLeft->pi_exprContext = econtext; slot = ExecProject(node->projLeft); - /* - * Note: because we are typically called in a per-tuple context, we have - * to explicitly clear the projected tuple before returning. Otherwise, - * we'll have a double-free situation: the per-tuple context will probably - * be reset before we're called again, and then the tuple slot will think - * it still needs to free the tuple. - */ - /* * If the LHS is all non-null, probe for an exact match in the main hash * table. If we find one, the result is TRUE. Otherwise, scan the @@ -161,19 +154,10 @@ ExecHashSubPlan(SubPlanState *node, slot, node->cur_eq_comp, node->lhs_hash_expr) != NULL) - { - ExecClearTuple(slot); - return BoolGetDatum(true); - } - if (node->havenullrows && - findPartialMatch(node->hashnulls, slot, node->cur_eq_funcs)) - { - ExecClearTuple(slot); + result = true; + else if (node->havenullrows && + findPartialMatch(node->hashnulls, slot, node->cur_eq_funcs)) *isNull = true; - return BoolGetDatum(false); - } - ExecClearTuple(slot); - return BoolGetDatum(false); } /* @@ -186,34 +170,31 @@ ExecHashSubPlan(SubPlanState *node, * aren't provably unequal to the LHS; if so, the result is UNKNOWN. * Otherwise, the result is FALSE. */ - if (node->hashnulls == NULL) - { - ExecClearTuple(slot); - return BoolGetDatum(false); - } - if (slotAllNulls(slot)) - { - ExecClearTuple(slot); + else if (node->hashnulls == NULL) + /* just return FALSE */ ; + else if (slotAllNulls(slot)) *isNull = true; - return BoolGetDatum(false); - } /* Scan partly-null table first, since more likely to get a match */ - if (node->havenullrows && - findPartialMatch(node->hashnulls, slot, node->cur_eq_funcs)) - { - ExecClearTuple(slot); + else if (node->havenullrows && + findPartialMatch(node->hashnulls, slot, node->cur_eq_funcs)) *isNull = true; - return BoolGetDatum(false); - } - if (node->havehashrows && - findPartialMatch(node->hashtable, slot, node->cur_eq_funcs)) - { - ExecClearTuple(slot); + else if (node->havehashrows && + findPartialMatch(node->hashtable, slot, node->cur_eq_funcs)) *isNull = true; - return BoolGetDatum(false); - } + + /* + * Note: because we are typically called in a per-tuple context, we have + * to explicitly clear the projected tuple before returning. Otherwise, + * we'll have a double-free situation: the per-tuple context will probably + * be reset before we're called again, and then the tuple slot will think + * it still needs to free the tuple. + */ ExecClearTuple(slot); - return BoolGetDatum(false); + + /* Also must reset the innerecontext after each hashtable lookup. */ + ResetExprContext(node->innerecontext); + + return BoolGetDatum(result); } /* @@ -548,7 +529,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) 0, node->planstate->state->es_query_cxt, node->hashtablecxt, - node->hashtempcxt, + innerecontext->ecxt_per_tuple_memory, false); if (!subplan->unknownEqFalse) @@ -577,7 +558,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) 0, node->planstate->state->es_query_cxt, node->hashtablecxt, - node->hashtempcxt, + innerecontext->ecxt_per_tuple_memory, false); } else @@ -639,7 +620,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) /* * Reset innerecontext after each inner tuple to free any memory used - * during ExecProject. + * during ExecProject and hashtable lookup. */ ResetExprContext(innerecontext); } @@ -858,7 +839,6 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) sstate->hashtable = NULL; sstate->hashnulls = NULL; sstate->hashtablecxt = NULL; - sstate->hashtempcxt = NULL; sstate->innerecontext = NULL; sstate->keyColIdx = NULL; sstate->tab_eq_funcoids = NULL; @@ -914,11 +894,6 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) AllocSetContextCreate(CurrentMemoryContext, "Subplan HashTable Context", ALLOCSET_DEFAULT_SIZES); - /* and a small one for the hash tables to use as temp storage */ - sstate->hashtempcxt = - AllocSetContextCreate(CurrentMemoryContext, - "Subplan HashTable Temp Context", - ALLOCSET_SMALL_SIZES); /* and a short-lived exprcontext for function evaluation */ sstate->innerecontext = CreateExprContext(estate); diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index ecb2e4ccaa1ca..50fcd0237768e 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -1258,7 +1258,7 @@ SPI_getbinval(HeapTuple tuple, TupleDesc tupdesc, int fnumber, bool *isnull) { SPI_result = SPI_ERROR_NOATTRIBUTE; *isnull = true; - return (Datum) NULL; + return (Datum) 0; } return heap_getattr(tuple, fnumber, tupdesc, isnull); diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile index e8c12060b93df..68677ba42e189 100644 --- a/src/backend/jit/llvm/Makefile +++ b/src/backend/jit/llvm/Makefile @@ -31,7 +31,7 @@ endif # All files in this directory use LLVM. CFLAGS += $(LLVM_CFLAGS) CXXFLAGS += $(LLVM_CXXFLAGS) -override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS) +override CPPFLAGS += $(LLVM_CPPFLAGS) SHLIB_LINK += $(LLVM_LIBS) # Because this module includes C++ files, we need to use a C++ diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index e5171467de18d..25f739a6a17d4 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -858,7 +858,6 @@ RemoveSocketFiles(void) (void) unlink(sock_path); } /* Since we're about to exit, no need to reclaim storage */ - sock_paths = NIL; } diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c index f1a08bc32ca17..5f39949a36773 100644 --- a/src/backend/libpq/pqmq.c +++ b/src/backend/libpq/pqmq.c @@ -23,7 +23,7 @@ #include "tcop/tcopprot.h" #include "utils/builtins.h" -static shm_mq_handle *pq_mq_handle; +static shm_mq_handle *pq_mq_handle = NULL; static bool pq_mq_busy = false; static pid_t pq_mq_parallel_leader_pid = 0; static ProcNumber pq_mq_parallel_leader_proc_number = INVALID_PROC_NUMBER; @@ -66,7 +66,11 @@ pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh) static void pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg) { - pq_mq_handle = NULL; + if (pq_mq_handle != NULL) + { + pfree(pq_mq_handle); + pq_mq_handle = NULL; + } whereToSendOutput = DestNone; } @@ -131,8 +135,11 @@ mq_putmessage(char msgtype, const char *s, size_t len) if (pq_mq_busy) { if (pq_mq_handle != NULL) + { shm_mq_detach(pq_mq_handle); - pq_mq_handle = NULL; + pfree(pq_mq_handle); + pq_mq_handle = NULL; + } return EOF; } @@ -152,8 +159,6 @@ mq_putmessage(char msgtype, const char *s, size_t len) iov[1].data = s; iov[1].len = len; - Assert(pq_mq_handle != NULL); - for (;;) { /* @@ -161,6 +166,7 @@ mq_putmessage(char msgtype, const char *s, size_t len) * that the shared memory value is updated before we send the parallel * message signal right after this. */ + Assert(pq_mq_handle != NULL); result = shm_mq_sendv(pq_mq_handle, iov, 2, true, true); if (pq_mq_parallel_leader_pid != 0) diff --git a/src/backend/meson.build b/src/backend/meson.build index 2b0db21480470..b831a541652bc 100644 --- a/src/backend/meson.build +++ b/src/backend/meson.build @@ -169,7 +169,7 @@ backend_mod_code = declare_dependency( compile_args: pg_mod_c_args, include_directories: postgres_inc, link_args: pg_mod_link_args, - sources: generated_headers + generated_backend_headers, + sources: generated_backend_headers_stamp, dependencies: backend_mod_deps, ) diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index bf512cf806ff7..b4ecf0b039017 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -1343,7 +1343,7 @@ bms_next_member(const Bitmapset *a, int prevbit) * * Returns largest member less than "prevbit", or -2 if there is none. * "prevbit" must NOT be more than one above the highest possible bit that can - * be set at the Bitmapset at its current size. + * be set in the Bitmapset at its current size. * * To ease finding the highest set bit for the initial loop, the special * prevbit value of -1 can be passed to have the function find the highest @@ -1379,6 +1379,10 @@ bms_prev_member(const Bitmapset *a, int prevbit) if (a == NULL || prevbit == 0) return -2; + /* Validate callers didn't give us something out of range */ + Assert(prevbit <= a->nwords * BITS_PER_BITMAPWORD); + Assert(prevbit >= -1); + /* transform -1 to the highest possible bit we could have set */ if (prevbit == -1) prevbit = a->nwords * BITS_PER_BITMAPWORD - 1; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index 48b5d13b9b62c..2f933e95cb95e 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -630,7 +630,7 @@ readDatum(bool typbyval) } } else if (length <= 0) - res = (Datum) NULL; + res = (Datum) 0; else { s = (char *) palloc(length); diff --git a/src/backend/optimizer/README b/src/backend/optimizer/README index 9c724ccfabf83..843368096fd0d 100644 --- a/src/backend/optimizer/README +++ b/src/backend/optimizer/README @@ -640,7 +640,6 @@ RelOptInfo - a relation or joined relations GroupResultPath - childless Result plan node (used for degenerate grouping) MaterialPath - a Material plan node MemoizePath - a Memoize plan node for caching tuples from sub-paths - UniquePath - remove duplicate rows (either by hashing or sorting) GatherPath - collect the results of parallel workers GatherMergePath - collect parallel results, preserving their common sort order ProjectionPath - a Result plan node with child (used for projection) @@ -648,7 +647,7 @@ RelOptInfo - a relation or joined relations SortPath - a Sort plan node applied to some sub-path IncrementalSortPath - an IncrementalSort plan node applied to some sub-path GroupPath - a Group plan node applied to some sub-path - UpperUniquePath - a Unique plan node applied to some sub-path + UniquePath - a Unique plan node applied to some sub-path AggPath - an Agg plan node applied to some sub-path GroupingSetsPath - an Agg plan node used to implement GROUPING SETS MinMaxAggPath - a Result plan node with subplans performing MIN/MAX diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 1f04a2c182ca9..94077e6a006d5 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -2572,13 +2572,13 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, Cost input_startup_cost = mpath->subpath->startup_cost; Cost input_total_cost = mpath->subpath->total_cost; double tuples = mpath->subpath->rows; - double calls = mpath->calls; + Cardinality est_calls = mpath->est_calls; int width = mpath->subpath->pathtarget->width; double hash_mem_bytes; double est_entry_bytes; - double est_cache_entries; - double ndistinct; + Cardinality est_cache_entries; + Cardinality ndistinct; double evict_ratio; double hit_ratio; Cost startup_cost; @@ -2604,7 +2604,7 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, est_cache_entries = floor(hash_mem_bytes / est_entry_bytes); /* estimate on the distinct number of parameter values */ - ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL, + ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL, &estinfo); /* @@ -2616,7 +2616,10 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, * certainly mean a MemoizePath will never survive add_path(). */ if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0) - ndistinct = calls; + ndistinct = est_calls; + + /* Remember the ndistinct estimate for EXPLAIN */ + mpath->est_unique_keys = ndistinct; /* * Since we've already estimated the maximum number of entries we can @@ -2644,9 +2647,12 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, * must look at how many scans are estimated in total for this node and * how many of those scans we expect to get a cache hit. */ - hit_ratio = ((calls - ndistinct) / calls) * + hit_ratio = ((est_calls - ndistinct) / est_calls) * (est_cache_entries / Max(ndistinct, est_cache_entries)); + /* Remember the hit ratio estimate for EXPLAIN */ + mpath->est_hit_ratio = hit_ratio; + Assert(hit_ratio >= 0 && hit_ratio <= 1.0); /* @@ -3960,10 +3966,12 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, * when we should not. Can we do better without expensive selectivity * computations? * - * The whole issue is moot if we are working from a unique-ified outer - * input, or if we know we don't need to mark/restore at all. + * The whole issue is moot if we know we don't need to mark/restore at + * all, or if we are working from a unique-ified outer input. */ - if (IsA(outer_path, UniquePath) || path->skip_mark_restore) + if (path->skip_mark_restore || + RELATION_WAS_MADE_UNIQUE(outer_path->parent, extra->sjinfo, + path->jpath.jointype)) rescannedtuples = 0; else { @@ -4358,7 +4366,8 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path, * because we avoid contaminating the cache with a value that's wrong for * non-unique-ified paths. */ - if (IsA(inner_path, UniquePath)) + if (RELATION_WAS_MADE_UNIQUE(inner_path->parent, extra->sjinfo, + path->jpath.jointype)) { innerbucketsize = 1.0 / virtualbuckets; innermcvfreq = 0.0; @@ -4561,10 +4570,24 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan) { QualCost sp_cost; - /* Figure any cost for evaluating the testexpr */ + /* + * Figure any cost for evaluating the testexpr. + * + * Usually, SubPlan nodes are built very early, before we have constructed + * any RelOptInfos for the parent query level, which means the parent root + * does not yet contain enough information to safely consult statistics. + * Therefore, we pass root as NULL here. cost_qual_eval() is already + * well-equipped to handle a NULL root. + * + * One exception is SubPlan nodes built for the initplans of MIN/MAX + * aggregates from indexes (cf. SS_make_initplan_from_plan). In this + * case, having a NULL root is safe because testexpr will be NULL. + * Besides, an initplan will by definition not consult anything from the + * parent plan. + */ cost_qual_eval(&sp_cost, make_ands_implicit((Expr *) subplan->testexpr), - root); + NULL); if (subplan->useHashTable) { diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index 601354ea3e056..4f5c98f0091d1 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -4142,47 +4142,26 @@ ec_member_matches_indexcol(PlannerInfo *root, RelOptInfo *rel, * a set of equality conditions, because the conditions constrain all * columns of some unique index. * - * The conditions can be represented in either or both of two ways: - * 1. A list of RestrictInfo nodes, where the caller has already determined - * that each condition is a mergejoinable equality with an expression in - * this relation on one side, and an expression not involving this relation - * on the other. The transient outer_is_left flag is used to identify which - * side we should look at: left side if outer_is_left is false, right side - * if it is true. - * 2. A list of expressions in this relation, and a corresponding list of - * equality operators. The caller must have already checked that the operators - * represent equality. (Note: the operators could be cross-type; the - * expressions should correspond to their RHS inputs.) + * The conditions are provided as a list of RestrictInfo nodes, where the + * caller has already determined that each condition is a mergejoinable + * equality with an expression in this relation on one side, and an + * expression not involving this relation on the other. The transient + * outer_is_left flag is used to identify which side we should look at: + * left side if outer_is_left is false, right side if it is true. * * The caller need only supply equality conditions arising from joins; * this routine automatically adds in any usable baserestrictinfo clauses. * (Note that the passed-in restrictlist will be destructively modified!) + * + * If extra_clauses isn't NULL, return baserestrictinfo clauses which were used + * to derive uniqueness. */ bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, - List *restrictlist, - List *exprlist, List *oprlist) -{ - return relation_has_unique_index_ext(root, rel, restrictlist, - exprlist, oprlist, NULL); -} - -/* - * relation_has_unique_index_ext - * Same as relation_has_unique_index_for(), but supports extra_clauses - * parameter. If extra_clauses isn't NULL, return baserestrictinfo clauses - * which were used to derive uniqueness. - */ -bool -relation_has_unique_index_ext(PlannerInfo *root, RelOptInfo *rel, - List *restrictlist, - List *exprlist, List *oprlist, - List **extra_clauses) + List *restrictlist, List **extra_clauses) { ListCell *ic; - Assert(list_length(exprlist) == list_length(oprlist)); - /* Short-circuit if no indexes... */ if (rel->indexlist == NIL) return false; @@ -4225,7 +4204,7 @@ relation_has_unique_index_ext(PlannerInfo *root, RelOptInfo *rel, } /* Short-circuit the easy case */ - if (restrictlist == NIL && exprlist == NIL) + if (restrictlist == NIL) return false; /* Examine each index of the relation ... */ @@ -4247,14 +4226,12 @@ relation_has_unique_index_ext(PlannerInfo *root, RelOptInfo *rel, continue; /* - * Try to find each index column in the lists of conditions. This is + * Try to find each index column in the list of conditions. This is * O(N^2) or worse, but we expect all the lists to be short. */ for (c = 0; c < ind->nkeycolumns; c++) { - bool matched = false; ListCell *lc; - ListCell *lc2; foreach(lc, restrictlist) { @@ -4284,8 +4261,6 @@ relation_has_unique_index_ext(PlannerInfo *root, RelOptInfo *rel, if (match_index_to_operand(rexpr, c, ind)) { - matched = true; /* column is unique */ - if (bms_membership(rinfo->clause_relids) == BMS_SINGLETON) { MemoryContext oldMemCtx = @@ -4303,43 +4278,11 @@ relation_has_unique_index_ext(PlannerInfo *root, RelOptInfo *rel, MemoryContextSwitchTo(oldMemCtx); } - break; + break; /* found a match; column is unique */ } } - if (matched) - continue; - - forboth(lc, exprlist, lc2, oprlist) - { - Node *expr = (Node *) lfirst(lc); - Oid opr = lfirst_oid(lc2); - - /* See if the expression matches the index key */ - if (!match_index_to_operand(expr, c, ind)) - continue; - - /* - * The equality operator must be a member of the index - * opfamily, else it is not asserting the right kind of - * equality behavior for this index. We assume the caller - * determined it is an equality operator, so we don't need to - * check any more tightly than this. - */ - if (!op_in_opfamily(opr, ind->opfamily[c])) - continue; - - /* - * XXX at some point we may need to check collations here too. - * For the moment we assume all collations reduce to the same - * notion of equality. - */ - - matched = true; /* column is unique */ - break; - } - - if (!matched) + if (lc == NULL) break; /* no match; this index doesn't help us */ } diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index ebedc5574ca9c..3b9407eb2eb79 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -112,12 +112,12 @@ static void generate_mergejoin_paths(PlannerInfo *root, * "flipped around" if we are considering joining the rels in the opposite * direction from what's indicated in sjinfo. * - * Also, this routine and others in this module accept the special JoinTypes - * JOIN_UNIQUE_OUTER and JOIN_UNIQUE_INNER to indicate that we should - * unique-ify the outer or inner relation and then apply a regular inner - * join. These values are not allowed to propagate outside this module, - * however. Path cost estimation code may need to recognize that it's - * dealing with such a case --- the combination of nominal jointype INNER + * Also, this routine accepts the special JoinTypes JOIN_UNIQUE_OUTER and + * JOIN_UNIQUE_INNER to indicate that the outer or inner relation has been + * unique-ified and a regular inner join should then be applied. These values + * are not allowed to propagate outside this routine, however. Path cost + * estimation code, as well as match_unsorted_outer, may need to recognize that + * it's dealing with such a case --- the combination of nominal jointype INNER * with sjinfo->jointype == JOIN_SEMI indicates that. */ void @@ -129,6 +129,7 @@ add_paths_to_joinrel(PlannerInfo *root, SpecialJoinInfo *sjinfo, List *restrictlist) { + JoinType save_jointype = jointype; JoinPathExtraData extra; bool mergejoin_allowed = true; ListCell *lc; @@ -165,10 +166,10 @@ add_paths_to_joinrel(PlannerInfo *root, * reduce_unique_semijoins would've simplified it), so there's no point in * calling innerrel_is_unique. However, if the LHS covers all of the * semijoin's min_lefthand, then it's appropriate to set inner_unique - * because the path produced by create_unique_path will be unique relative - * to the LHS. (If we have an LHS that's only part of the min_lefthand, - * that is *not* true.) For JOIN_UNIQUE_OUTER, pass JOIN_INNER to avoid - * letting that value escape this module. + * because the unique relation produced by create_unique_paths will be + * unique relative to the LHS. (If we have an LHS that's only part of the + * min_lefthand, that is *not* true.) For JOIN_UNIQUE_OUTER, pass + * JOIN_INNER to avoid letting that value escape this module. */ switch (jointype) { @@ -199,6 +200,13 @@ add_paths_to_joinrel(PlannerInfo *root, break; } + /* + * If the outer or inner relation has been unique-ified, handle as a plain + * inner join. + */ + if (jointype == JOIN_UNIQUE_OUTER || jointype == JOIN_UNIQUE_INNER) + jointype = JOIN_INNER; + /* * Find potential mergejoin clauses. We can skip this if we are not * interested in doing a mergejoin. However, mergejoin may be our only @@ -329,7 +337,7 @@ add_paths_to_joinrel(PlannerInfo *root, joinrel->fdwroutine->GetForeignJoinPaths) joinrel->fdwroutine->GetForeignJoinPaths(root, joinrel, outerrel, innerrel, - jointype, &extra); + save_jointype, &extra); /* * 6. Finally, give extensions a chance to manipulate the path list. They @@ -339,7 +347,7 @@ add_paths_to_joinrel(PlannerInfo *root, */ if (set_join_pathlist_hook) set_join_pathlist_hook(root, joinrel, outerrel, innerrel, - jointype, &extra); + save_jointype, &extra); } /* @@ -1364,7 +1372,6 @@ sort_inner_and_outer(PlannerInfo *root, JoinType jointype, JoinPathExtraData *extra) { - JoinType save_jointype = jointype; Path *outer_path; Path *inner_path; Path *cheapest_partial_outer = NULL; @@ -1402,38 +1409,16 @@ sort_inner_and_outer(PlannerInfo *root, PATH_PARAM_BY_REL(inner_path, outerrel)) return; - /* - * If unique-ification is requested, do it and then handle as a plain - * inner join. - */ - if (jointype == JOIN_UNIQUE_OUTER) - { - outer_path = (Path *) create_unique_path(root, outerrel, - outer_path, extra->sjinfo); - Assert(outer_path); - jointype = JOIN_INNER; - } - else if (jointype == JOIN_UNIQUE_INNER) - { - inner_path = (Path *) create_unique_path(root, innerrel, - inner_path, extra->sjinfo); - Assert(inner_path); - jointype = JOIN_INNER; - } - /* * If the joinrel is parallel-safe, we may be able to consider a partial - * merge join. However, we can't handle JOIN_UNIQUE_OUTER, because the - * outer path will be partial, and therefore we won't be able to properly - * guarantee uniqueness. Similarly, we can't handle JOIN_FULL, JOIN_RIGHT - * and JOIN_RIGHT_ANTI, because they can produce false null extended rows. + * merge join. However, we can't handle JOIN_FULL, JOIN_RIGHT and + * JOIN_RIGHT_ANTI, because they can produce false null extended rows. * Also, the resulting path must not be parameterized. */ if (joinrel->consider_parallel && - save_jointype != JOIN_UNIQUE_OUTER && - save_jointype != JOIN_FULL && - save_jointype != JOIN_RIGHT && - save_jointype != JOIN_RIGHT_ANTI && + jointype != JOIN_FULL && + jointype != JOIN_RIGHT && + jointype != JOIN_RIGHT_ANTI && outerrel->partial_pathlist != NIL && bms_is_empty(joinrel->lateral_relids)) { @@ -1441,7 +1426,7 @@ sort_inner_and_outer(PlannerInfo *root, if (inner_path->parallel_safe) cheapest_safe_inner = inner_path; - else if (save_jointype != JOIN_UNIQUE_INNER) + else cheapest_safe_inner = get_cheapest_parallel_safe_total_inner(innerrel->pathlist); } @@ -1580,13 +1565,9 @@ generate_mergejoin_paths(PlannerInfo *root, List *trialsortkeys; Path *cheapest_startup_inner; Path *cheapest_total_inner; - JoinType save_jointype = jointype; int num_sortkeys; int sortkeycnt; - if (jointype == JOIN_UNIQUE_OUTER || jointype == JOIN_UNIQUE_INNER) - jointype = JOIN_INNER; - /* Look for useful mergeclauses (if any) */ mergeclauses = find_mergeclauses_for_outer_pathkeys(root, @@ -1636,10 +1617,6 @@ generate_mergejoin_paths(PlannerInfo *root, extra, is_partial); - /* Can't do anything else if inner path needs to be unique'd */ - if (save_jointype == JOIN_UNIQUE_INNER) - return; - /* * Look for presorted inner paths that satisfy the innersortkey list --- * or any truncation thereof, if we are allowed to build a mergejoin using @@ -1819,7 +1796,6 @@ match_unsorted_outer(PlannerInfo *root, JoinType jointype, JoinPathExtraData *extra) { - JoinType save_jointype = jointype; bool nestjoinOK; bool useallclauses; Path *inner_cheapest_total = innerrel->cheapest_total_path; @@ -1855,12 +1831,6 @@ match_unsorted_outer(PlannerInfo *root, nestjoinOK = false; useallclauses = true; break; - case JOIN_UNIQUE_OUTER: - case JOIN_UNIQUE_INNER: - jointype = JOIN_INNER; - nestjoinOK = true; - useallclauses = false; - break; default: elog(ERROR, "unrecognized join type: %d", (int) jointype); @@ -1873,24 +1843,20 @@ match_unsorted_outer(PlannerInfo *root, * If inner_cheapest_total is parameterized by the outer rel, ignore it; * we will consider it below as a member of cheapest_parameterized_paths, * but the other possibilities considered in this routine aren't usable. + * + * Furthermore, if the inner side is a unique-ified relation, we cannot + * generate any valid paths here, because the inner rel's dependency on + * the outer rel makes unique-ification meaningless. */ if (PATH_PARAM_BY_REL(inner_cheapest_total, outerrel)) + { inner_cheapest_total = NULL; - /* - * If we need to unique-ify the inner path, we will consider only the - * cheapest-total inner. - */ - if (save_jointype == JOIN_UNIQUE_INNER) - { - /* No way to do this with an inner path parameterized by outer rel */ - if (inner_cheapest_total == NULL) + if (RELATION_WAS_MADE_UNIQUE(innerrel, extra->sjinfo, jointype)) return; - inner_cheapest_total = (Path *) - create_unique_path(root, innerrel, inner_cheapest_total, extra->sjinfo); - Assert(inner_cheapest_total); } - else if (nestjoinOK) + + if (nestjoinOK) { /* * Consider materializing the cheapest inner path, unless @@ -1914,20 +1880,6 @@ match_unsorted_outer(PlannerInfo *root, if (PATH_PARAM_BY_REL(outerpath, innerrel)) continue; - /* - * If we need to unique-ify the outer path, it's pointless to consider - * any but the cheapest outer. (XXX we don't consider parameterized - * outers, nor inners, for unique-ified cases. Should we?) - */ - if (save_jointype == JOIN_UNIQUE_OUTER) - { - if (outerpath != outerrel->cheapest_total_path) - continue; - outerpath = (Path *) create_unique_path(root, outerrel, - outerpath, extra->sjinfo); - Assert(outerpath); - } - /* * The result will have this sort order (even if it is implemented as * a nestloop, and even if some of the mergeclauses are implemented by @@ -1936,21 +1888,7 @@ match_unsorted_outer(PlannerInfo *root, merge_pathkeys = build_join_pathkeys(root, joinrel, jointype, outerpath->pathkeys); - if (save_jointype == JOIN_UNIQUE_INNER) - { - /* - * Consider nestloop join, but only with the unique-ified cheapest - * inner path - */ - try_nestloop_path(root, - joinrel, - outerpath, - inner_cheapest_total, - merge_pathkeys, - jointype, - extra); - } - else if (nestjoinOK) + if (nestjoinOK) { /* * Consider nestloop joins using this outer path and various @@ -2001,17 +1939,13 @@ match_unsorted_outer(PlannerInfo *root, extra); } - /* Can't do anything else if outer path needs to be unique'd */ - if (save_jointype == JOIN_UNIQUE_OUTER) - continue; - /* Can't do anything else if inner rel is parameterized by outer */ if (inner_cheapest_total == NULL) continue; /* Generate merge join paths */ generate_mergejoin_paths(root, joinrel, innerrel, outerpath, - save_jointype, extra, useallclauses, + jointype, extra, useallclauses, inner_cheapest_total, merge_pathkeys, false); } @@ -2019,41 +1953,35 @@ match_unsorted_outer(PlannerInfo *root, /* * Consider partial nestloop and mergejoin plan if outerrel has any * partial path and the joinrel is parallel-safe. However, we can't - * handle JOIN_UNIQUE_OUTER, because the outer path will be partial, and - * therefore we won't be able to properly guarantee uniqueness. Nor can - * we handle joins needing lateral rels, since partial paths must not be - * parameterized. Similarly, we can't handle JOIN_FULL, JOIN_RIGHT and + * handle joins needing lateral rels, since partial paths must not be + * parameterized. Similarly, we can't handle JOIN_FULL, JOIN_RIGHT and * JOIN_RIGHT_ANTI, because they can produce false null extended rows. */ if (joinrel->consider_parallel && - save_jointype != JOIN_UNIQUE_OUTER && - save_jointype != JOIN_FULL && - save_jointype != JOIN_RIGHT && - save_jointype != JOIN_RIGHT_ANTI && + jointype != JOIN_FULL && + jointype != JOIN_RIGHT && + jointype != JOIN_RIGHT_ANTI && outerrel->partial_pathlist != NIL && bms_is_empty(joinrel->lateral_relids)) { if (nestjoinOK) consider_parallel_nestloop(root, joinrel, outerrel, innerrel, - save_jointype, extra); + jointype, extra); /* * If inner_cheapest_total is NULL or non parallel-safe then find the - * cheapest total parallel safe path. If doing JOIN_UNIQUE_INNER, we - * can't use any alternative inner path. + * cheapest total parallel safe path. */ if (inner_cheapest_total == NULL || !inner_cheapest_total->parallel_safe) { - if (save_jointype == JOIN_UNIQUE_INNER) - return; - - inner_cheapest_total = get_cheapest_parallel_safe_total_inner(innerrel->pathlist); + inner_cheapest_total = + get_cheapest_parallel_safe_total_inner(innerrel->pathlist); } if (inner_cheapest_total) consider_parallel_mergejoin(root, joinrel, outerrel, innerrel, - save_jointype, extra, + jointype, extra, inner_cheapest_total); } } @@ -2118,24 +2046,17 @@ consider_parallel_nestloop(PlannerInfo *root, JoinType jointype, JoinPathExtraData *extra) { - JoinType save_jointype = jointype; Path *inner_cheapest_total = innerrel->cheapest_total_path; Path *matpath = NULL; ListCell *lc1; - if (jointype == JOIN_UNIQUE_INNER) - jointype = JOIN_INNER; - /* - * Consider materializing the cheapest inner path, unless: 1) we're doing - * JOIN_UNIQUE_INNER, because in this case we have to unique-ify the - * cheapest inner path, 2) enable_material is off, 3) the cheapest inner - * path is not parallel-safe, 4) the cheapest inner path is parameterized - * by the outer rel, or 5) the cheapest inner path materializes its output - * anyway. + * Consider materializing the cheapest inner path, unless: 1) + * enable_material is off, 2) the cheapest inner path is not + * parallel-safe, 3) the cheapest inner path is parameterized by the outer + * rel, or 4) the cheapest inner path materializes its output anyway. */ - if (save_jointype != JOIN_UNIQUE_INNER && - enable_material && inner_cheapest_total->parallel_safe && + if (enable_material && inner_cheapest_total->parallel_safe && !PATH_PARAM_BY_REL(inner_cheapest_total, outerrel) && !ExecMaterializesOutput(inner_cheapest_total->pathtype)) { @@ -2169,23 +2090,6 @@ consider_parallel_nestloop(PlannerInfo *root, if (!innerpath->parallel_safe) continue; - /* - * If we're doing JOIN_UNIQUE_INNER, we can only use the inner's - * cheapest_total_path, and we have to unique-ify it. (We might - * be able to relax this to allow other safe, unparameterized - * inner paths, but right now create_unique_path is not on board - * with that.) - */ - if (save_jointype == JOIN_UNIQUE_INNER) - { - if (innerpath != innerrel->cheapest_total_path) - continue; - innerpath = (Path *) create_unique_path(root, innerrel, - innerpath, - extra->sjinfo); - Assert(innerpath); - } - try_partial_nestloop_path(root, joinrel, outerpath, innerpath, pathkeys, jointype, extra); @@ -2227,7 +2131,6 @@ hash_inner_and_outer(PlannerInfo *root, JoinType jointype, JoinPathExtraData *extra) { - JoinType save_jointype = jointype; bool isouterjoin = IS_OUTER_JOIN(jointype); List *hashclauses; ListCell *l; @@ -2290,6 +2193,8 @@ hash_inner_and_outer(PlannerInfo *root, Path *cheapest_startup_outer = outerrel->cheapest_startup_path; Path *cheapest_total_outer = outerrel->cheapest_total_path; Path *cheapest_total_inner = innerrel->cheapest_total_path; + ListCell *lc1; + ListCell *lc2; /* * If either cheapest-total path is parameterized by the other rel, we @@ -2301,114 +2206,64 @@ hash_inner_and_outer(PlannerInfo *root, PATH_PARAM_BY_REL(cheapest_total_inner, outerrel)) return; - /* Unique-ify if need be; we ignore parameterized possibilities */ - if (jointype == JOIN_UNIQUE_OUTER) - { - cheapest_total_outer = (Path *) - create_unique_path(root, outerrel, - cheapest_total_outer, extra->sjinfo); - Assert(cheapest_total_outer); - jointype = JOIN_INNER; - try_hashjoin_path(root, - joinrel, - cheapest_total_outer, - cheapest_total_inner, - hashclauses, - jointype, - extra); - /* no possibility of cheap startup here */ - } - else if (jointype == JOIN_UNIQUE_INNER) - { - cheapest_total_inner = (Path *) - create_unique_path(root, innerrel, - cheapest_total_inner, extra->sjinfo); - Assert(cheapest_total_inner); - jointype = JOIN_INNER; + /* + * Consider the cheapest startup outer together with the cheapest + * total inner, and then consider pairings of cheapest-total paths + * including parameterized ones. There is no use in generating + * parameterized paths on the basis of possibly cheap startup cost, so + * this is sufficient. + */ + if (cheapest_startup_outer != NULL) try_hashjoin_path(root, joinrel, - cheapest_total_outer, + cheapest_startup_outer, cheapest_total_inner, hashclauses, jointype, extra); - if (cheapest_startup_outer != NULL && - cheapest_startup_outer != cheapest_total_outer) - try_hashjoin_path(root, - joinrel, - cheapest_startup_outer, - cheapest_total_inner, - hashclauses, - jointype, - extra); - } - else + + foreach(lc1, outerrel->cheapest_parameterized_paths) { + Path *outerpath = (Path *) lfirst(lc1); + /* - * For other jointypes, we consider the cheapest startup outer - * together with the cheapest total inner, and then consider - * pairings of cheapest-total paths including parameterized ones. - * There is no use in generating parameterized paths on the basis - * of possibly cheap startup cost, so this is sufficient. + * We cannot use an outer path that is parameterized by the inner + * rel. */ - ListCell *lc1; - ListCell *lc2; - - if (cheapest_startup_outer != NULL) - try_hashjoin_path(root, - joinrel, - cheapest_startup_outer, - cheapest_total_inner, - hashclauses, - jointype, - extra); + if (PATH_PARAM_BY_REL(outerpath, innerrel)) + continue; - foreach(lc1, outerrel->cheapest_parameterized_paths) + foreach(lc2, innerrel->cheapest_parameterized_paths) { - Path *outerpath = (Path *) lfirst(lc1); + Path *innerpath = (Path *) lfirst(lc2); /* - * We cannot use an outer path that is parameterized by the - * inner rel. + * We cannot use an inner path that is parameterized by the + * outer rel, either. */ - if (PATH_PARAM_BY_REL(outerpath, innerrel)) + if (PATH_PARAM_BY_REL(innerpath, outerrel)) continue; - foreach(lc2, innerrel->cheapest_parameterized_paths) - { - Path *innerpath = (Path *) lfirst(lc2); - - /* - * We cannot use an inner path that is parameterized by - * the outer rel, either. - */ - if (PATH_PARAM_BY_REL(innerpath, outerrel)) - continue; + if (outerpath == cheapest_startup_outer && + innerpath == cheapest_total_inner) + continue; /* already tried it */ - if (outerpath == cheapest_startup_outer && - innerpath == cheapest_total_inner) - continue; /* already tried it */ - - try_hashjoin_path(root, - joinrel, - outerpath, - innerpath, - hashclauses, - jointype, - extra); - } + try_hashjoin_path(root, + joinrel, + outerpath, + innerpath, + hashclauses, + jointype, + extra); } } /* * If the joinrel is parallel-safe, we may be able to consider a - * partial hash join. However, we can't handle JOIN_UNIQUE_OUTER, - * because the outer path will be partial, and therefore we won't be - * able to properly guarantee uniqueness. Also, the resulting path - * must not be parameterized. + * partial hash join. However, the resulting path must not be + * parameterized. */ if (joinrel->consider_parallel && - save_jointype != JOIN_UNIQUE_OUTER && outerrel->partial_pathlist != NIL && bms_is_empty(joinrel->lateral_relids)) { @@ -2421,11 +2276,9 @@ hash_inner_and_outer(PlannerInfo *root, /* * Can we use a partial inner plan too, so that we can build a - * shared hash table in parallel? We can't handle - * JOIN_UNIQUE_INNER because we can't guarantee uniqueness. + * shared hash table in parallel? */ if (innerrel->partial_pathlist != NIL && - save_jointype != JOIN_UNIQUE_INNER && enable_parallel_hash) { cheapest_partial_inner = @@ -2441,19 +2294,18 @@ hash_inner_and_outer(PlannerInfo *root, * Normally, given that the joinrel is parallel-safe, the cheapest * total inner path will also be parallel-safe, but if not, we'll * have to search for the cheapest safe, unparameterized inner - * path. If doing JOIN_UNIQUE_INNER, we can't use any alternative - * inner path. If full, right, right-semi or right-anti join, we - * can't use parallelism (building the hash table in each backend) + * path. If full, right, right-semi or right-anti join, we can't + * use parallelism (building the hash table in each backend) * because no one process has all the match bits. */ - if (save_jointype == JOIN_FULL || - save_jointype == JOIN_RIGHT || - save_jointype == JOIN_RIGHT_SEMI || - save_jointype == JOIN_RIGHT_ANTI) + if (jointype == JOIN_FULL || + jointype == JOIN_RIGHT || + jointype == JOIN_RIGHT_SEMI || + jointype == JOIN_RIGHT_ANTI) cheapest_safe_inner = NULL; else if (cheapest_total_inner->parallel_safe) cheapest_safe_inner = cheapest_total_inner; - else if (save_jointype != JOIN_UNIQUE_INNER) + else cheapest_safe_inner = get_cheapest_parallel_safe_total_inner(innerrel->pathlist); diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index aad41b940091d..535248aa52516 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -19,6 +19,7 @@ #include "optimizer/joininfo.h" #include "optimizer/pathnode.h" #include "optimizer/paths.h" +#include "optimizer/planner.h" #include "partitioning/partbounds.h" #include "utils/memutils.h" @@ -444,8 +445,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, } else if (sjinfo->jointype == JOIN_SEMI && bms_equal(sjinfo->syn_righthand, rel2->relids) && - create_unique_path(root, rel2, rel2->cheapest_total_path, - sjinfo) != NULL) + create_unique_paths(root, rel2, sjinfo) != NULL) { /*---------- * For a semijoin, we can join the RHS to anything else by @@ -477,8 +477,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, } else if (sjinfo->jointype == JOIN_SEMI && bms_equal(sjinfo->syn_righthand, rel1->relids) && - create_unique_path(root, rel1, rel1->cheapest_total_path, - sjinfo) != NULL) + create_unique_paths(root, rel1, sjinfo) != NULL) { /* Reversed semijoin case */ if (match_sjinfo) @@ -886,6 +885,8 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, RelOptInfo *joinrel, SpecialJoinInfo *sjinfo, List *restrictlist) { + RelOptInfo *unique_rel2; + /* * Consider paths using each rel as both outer and inner. Depending on * the join type, a provably empty outer or inner rel might mean the join @@ -991,14 +992,13 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, /* * If we know how to unique-ify the RHS and one input rel is * exactly the RHS (not a superset) we can consider unique-ifying - * it and then doing a regular join. (The create_unique_path + * it and then doing a regular join. (The create_unique_paths * check here is probably redundant with what join_is_legal did, * but if so the check is cheap because it's cached. So test * anyway to be sure.) */ if (bms_equal(sjinfo->syn_righthand, rel2->relids) && - create_unique_path(root, rel2, rel2->cheapest_total_path, - sjinfo) != NULL) + (unique_rel2 = create_unique_paths(root, rel2, sjinfo)) != NULL) { if (is_dummy_rel(rel1) || is_dummy_rel(rel2) || restriction_is_constant_false(restrictlist, joinrel, false)) @@ -1006,10 +1006,10 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, mark_dummy_rel(joinrel); break; } - add_paths_to_joinrel(root, joinrel, rel1, rel2, + add_paths_to_joinrel(root, joinrel, rel1, unique_rel2, JOIN_UNIQUE_INNER, sjinfo, restrictlist); - add_paths_to_joinrel(root, joinrel, rel2, rel1, + add_paths_to_joinrel(root, joinrel, unique_rel2, rel1, JOIN_UNIQUE_OUTER, sjinfo, restrictlist); } diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c index 4d55c2ea59162..2a3dea88a94fb 100644 --- a/src/backend/optimizer/plan/analyzejoins.c +++ b/src/backend/optimizer/plan/analyzejoins.c @@ -631,6 +631,7 @@ remove_leftjoinrel_from_query(PlannerInfo *root, int relid, * remove_join_clause_from_rels will touch it.) */ root->simple_rel_array[relid] = NULL; + root->simple_rte_array[relid] = NULL; /* And nuke the RelOptInfo, just in case there's another access path */ pfree(rel); @@ -990,11 +991,10 @@ rel_is_distinct_for(PlannerInfo *root, RelOptInfo *rel, List *clause_list, { /* * Examine the indexes to see if we have a matching unique index. - * relation_has_unique_index_ext automatically adds any usable + * relation_has_unique_index_for automatically adds any usable * restriction clauses for the rel, so we needn't do that here. */ - if (relation_has_unique_index_ext(root, rel, clause_list, NIL, NIL, - extra_clauses)) + if (relation_has_unique_index_for(root, rel, clause_list, extra_clauses)) return true; } else if (rel->rtekind == RTE_SUBQUERY) @@ -1979,10 +1979,12 @@ remove_self_join_rel(PlannerInfo *root, PlanRowMark *kmark, PlanRowMark *rmark, * remove_join_clause_from_rels will touch it.) */ root->simple_rel_array[toRemove->relid] = NULL; + root->simple_rte_array[toRemove->relid] = NULL; /* And nuke the RelOptInfo, just in case there's another access path. */ pfree(toRemove); + /* * Now repeat construction of attr_needed bits coming from all other * sources. @@ -2142,21 +2144,21 @@ remove_self_joins_one_group(PlannerInfo *root, Relids relids) while ((r = bms_next_member(relids, r)) > 0) { - RelOptInfo *inner = root->simple_rel_array[r]; + RelOptInfo *rrel = root->simple_rel_array[r]; k = r; while ((k = bms_next_member(relids, k)) > 0) { Relids joinrelids = NULL; - RelOptInfo *outer = root->simple_rel_array[k]; + RelOptInfo *krel = root->simple_rel_array[k]; List *restrictlist; List *selfjoinquals; List *otherjoinquals; ListCell *lc; bool jinfo_check = true; - PlanRowMark *omark = NULL; - PlanRowMark *imark = NULL; + PlanRowMark *kmark = NULL; + PlanRowMark *rmark = NULL; List *uclauses = NIL; /* A sanity check: the relations have the same Oid. */ @@ -2194,21 +2196,21 @@ remove_self_joins_one_group(PlannerInfo *root, Relids relids) { PlanRowMark *rowMark = (PlanRowMark *) lfirst(lc); - if (rowMark->rti == k) + if (rowMark->rti == r) { - Assert(imark == NULL); - imark = rowMark; + Assert(rmark == NULL); + rmark = rowMark; } - else if (rowMark->rti == r) + else if (rowMark->rti == k) { - Assert(omark == NULL); - omark = rowMark; + Assert(kmark == NULL); + kmark = rowMark; } - if (omark && imark) + if (kmark && rmark) break; } - if (omark && imark && omark->markType != imark->markType) + if (kmark && rmark && kmark->markType != rmark->markType) continue; /* @@ -2229,8 +2231,8 @@ remove_self_joins_one_group(PlannerInfo *root, Relids relids) * build_joinrel_restrictlist() routine. */ restrictlist = generate_join_implied_equalities(root, joinrelids, - inner->relids, - outer, NULL); + rrel->relids, + krel, NULL); if (restrictlist == NIL) continue; @@ -2240,7 +2242,7 @@ remove_self_joins_one_group(PlannerInfo *root, Relids relids) * otherjoinquals. */ split_selfjoin_quals(root, restrictlist, &selfjoinquals, - &otherjoinquals, inner->relid, outer->relid); + &otherjoinquals, rrel->relid, krel->relid); Assert(list_length(restrictlist) == (list_length(selfjoinquals) + list_length(otherjoinquals))); @@ -2251,17 +2253,17 @@ remove_self_joins_one_group(PlannerInfo *root, Relids relids) * degenerate case works only if both sides have the same clause. * So doesn't matter which side to add. */ - selfjoinquals = list_concat(selfjoinquals, outer->baserestrictinfo); + selfjoinquals = list_concat(selfjoinquals, krel->baserestrictinfo); /* - * Determine if the inner table can duplicate outer rows. We must - * bypass the unique rel cache here since we're possibly using a - * subset of join quals. We can use 'force_cache' == true when all - * join quals are self-join quals. Otherwise, we could end up - * putting false negatives in the cache. + * Determine if the rrel can duplicate outer rows. We must bypass + * the unique rel cache here since we're possibly using a subset + * of join quals. We can use 'force_cache' == true when all join + * quals are self-join quals. Otherwise, we could end up putting + * false negatives in the cache. */ - if (!innerrel_is_unique_ext(root, joinrelids, inner->relids, - outer, JOIN_INNER, selfjoinquals, + if (!innerrel_is_unique_ext(root, joinrelids, rrel->relids, + krel, JOIN_INNER, selfjoinquals, list_length(otherjoinquals) == 0, &uclauses)) continue; @@ -2277,14 +2279,14 @@ remove_self_joins_one_group(PlannerInfo *root, Relids relids) * expressions, or we won't match the same row on each side of the * join. */ - if (!match_unique_clauses(root, inner, uclauses, outer->relid)) + if (!match_unique_clauses(root, rrel, uclauses, krel->relid)) continue; /* - * We can remove either relation, so remove the inner one in order - * to simplify this loop. + * Remove rrel ReloptInfo from the planner structures and the + * corresponding row mark. */ - remove_self_join_rel(root, omark, imark, outer, inner, restrictlist); + remove_self_join_rel(root, kmark, rmark, krel, rrel, restrictlist); result = bms_add_member(result, r); diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 8a9f1d7a943a8..6791cbeb416ed 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -95,8 +95,6 @@ static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path int flags); static Memoize *create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags); -static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path, - int flags); static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path); static Plan *create_projection_plan(PlannerInfo *root, ProjectionPath *best_path, @@ -106,8 +104,7 @@ static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags) static IncrementalSort *create_incrementalsort_plan(PlannerInfo *root, IncrementalSortPath *best_path, int flags); static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path); -static Unique *create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path, - int flags); +static Unique *create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags); static Agg *create_agg_plan(PlannerInfo *root, AggPath *best_path); static Plan *create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path); static Result *create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path); @@ -284,7 +281,10 @@ static Material *make_material(Plan *lefttree); static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations, List *param_exprs, bool singlerow, bool binary_mode, - uint32 est_entries, Bitmapset *keyparamids); + uint32 est_entries, Bitmapset *keyparamids, + Cardinality est_calls, + Cardinality est_unique_keys, + double est_hit_ratio); static WindowAgg *make_windowagg(List *tlist, WindowClause *wc, int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations, int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations, @@ -293,9 +293,9 @@ static WindowAgg *make_windowagg(List *tlist, WindowClause *wc, static Group *make_group(List *tlist, List *qual, int numGroupCols, AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations, Plan *lefttree); -static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList); static Unique *make_unique_from_pathkeys(Plan *lefttree, - List *pathkeys, int numCols); + List *pathkeys, int numCols, + Relids relids); static Gather *make_gather(List *qptlist, List *qpqual, int nworkers, int rescan_param, bool single_copy, Plan *subplan); static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, @@ -467,19 +467,9 @@ create_plan_recurse(PlannerInfo *root, Path *best_path, int flags) flags); break; case T_Unique: - if (IsA(best_path, UpperUniquePath)) - { - plan = (Plan *) create_upper_unique_plan(root, - (UpperUniquePath *) best_path, - flags); - } - else - { - Assert(IsA(best_path, UniquePath)); - plan = create_unique_plan(root, - (UniquePath *) best_path, - flags); - } + plan = (Plan *) create_unique_plan(root, + (UniquePath *) best_path, + flags); break; case T_Gather: plan = (Plan *) create_gather_plan(root, @@ -1753,214 +1743,14 @@ create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags) plan = make_memoize(subplan, operators, collations, param_exprs, best_path->singlerow, best_path->binary_mode, - best_path->est_entries, keyparamids); + best_path->est_entries, keyparamids, best_path->est_calls, + best_path->est_unique_keys, best_path->est_hit_ratio); copy_generic_path_info(&plan->plan, (Path *) best_path); return plan; } -/* - * create_unique_plan - * Create a Unique plan for 'best_path' and (recursively) plans - * for its subpaths. - * - * Returns a Plan node. - */ -static Plan * -create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags) -{ - Plan *plan; - Plan *subplan; - List *in_operators; - List *uniq_exprs; - List *newtlist; - int nextresno; - bool newitems; - int numGroupCols; - AttrNumber *groupColIdx; - Oid *groupCollations; - int groupColPos; - ListCell *l; - - /* Unique doesn't project, so tlist requirements pass through */ - subplan = create_plan_recurse(root, best_path->subpath, flags); - - /* Done if we don't need to do any actual unique-ifying */ - if (best_path->umethod == UNIQUE_PATH_NOOP) - return subplan; - - /* - * As constructed, the subplan has a "flat" tlist containing just the Vars - * needed here and at upper levels. The values we are supposed to - * unique-ify may be expressions in these variables. We have to add any - * such expressions to the subplan's tlist. - * - * The subplan may have a "physical" tlist if it is a simple scan plan. If - * we're going to sort, this should be reduced to the regular tlist, so - * that we don't sort more data than we need to. For hashing, the tlist - * should be left as-is if we don't need to add any expressions; but if we - * do have to add expressions, then a projection step will be needed at - * runtime anyway, so we may as well remove unneeded items. Therefore - * newtlist starts from build_path_tlist() not just a copy of the - * subplan's tlist; and we don't install it into the subplan unless we are - * sorting or stuff has to be added. - */ - in_operators = best_path->in_operators; - uniq_exprs = best_path->uniq_exprs; - - /* initialize modified subplan tlist as just the "required" vars */ - newtlist = build_path_tlist(root, &best_path->path); - nextresno = list_length(newtlist) + 1; - newitems = false; - - foreach(l, uniq_exprs) - { - Expr *uniqexpr = lfirst(l); - TargetEntry *tle; - - tle = tlist_member(uniqexpr, newtlist); - if (!tle) - { - tle = makeTargetEntry((Expr *) uniqexpr, - nextresno, - NULL, - false); - newtlist = lappend(newtlist, tle); - nextresno++; - newitems = true; - } - } - - /* Use change_plan_targetlist in case we need to insert a Result node */ - if (newitems || best_path->umethod == UNIQUE_PATH_SORT) - subplan = change_plan_targetlist(subplan, newtlist, - best_path->path.parallel_safe); - - /* - * Build control information showing which subplan output columns are to - * be examined by the grouping step. Unfortunately we can't merge this - * with the previous loop, since we didn't then know which version of the - * subplan tlist we'd end up using. - */ - newtlist = subplan->targetlist; - numGroupCols = list_length(uniq_exprs); - groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber)); - groupCollations = (Oid *) palloc(numGroupCols * sizeof(Oid)); - - groupColPos = 0; - foreach(l, uniq_exprs) - { - Expr *uniqexpr = lfirst(l); - TargetEntry *tle; - - tle = tlist_member(uniqexpr, newtlist); - if (!tle) /* shouldn't happen */ - elog(ERROR, "failed to find unique expression in subplan tlist"); - groupColIdx[groupColPos] = tle->resno; - groupCollations[groupColPos] = exprCollation((Node *) tle->expr); - groupColPos++; - } - - if (best_path->umethod == UNIQUE_PATH_HASH) - { - Oid *groupOperators; - - /* - * Get the hashable equality operators for the Agg node to use. - * Normally these are the same as the IN clause operators, but if - * those are cross-type operators then the equality operators are the - * ones for the IN clause operators' RHS datatype. - */ - groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid)); - groupColPos = 0; - foreach(l, in_operators) - { - Oid in_oper = lfirst_oid(l); - Oid eq_oper; - - if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper)) - elog(ERROR, "could not find compatible hash operator for operator %u", - in_oper); - groupOperators[groupColPos++] = eq_oper; - } - - /* - * Since the Agg node is going to project anyway, we can give it the - * minimum output tlist, without any stuff we might have added to the - * subplan tlist. - */ - plan = (Plan *) make_agg(build_path_tlist(root, &best_path->path), - NIL, - AGG_HASHED, - AGGSPLIT_SIMPLE, - numGroupCols, - groupColIdx, - groupOperators, - groupCollations, - NIL, - NIL, - best_path->path.rows, - 0, - subplan); - } - else - { - List *sortList = NIL; - Sort *sort; - - /* Create an ORDER BY list to sort the input compatibly */ - groupColPos = 0; - foreach(l, in_operators) - { - Oid in_oper = lfirst_oid(l); - Oid sortop; - Oid eqop; - TargetEntry *tle; - SortGroupClause *sortcl; - - sortop = get_ordering_op_for_equality_op(in_oper, false); - if (!OidIsValid(sortop)) /* shouldn't happen */ - elog(ERROR, "could not find ordering operator for equality operator %u", - in_oper); - - /* - * The Unique node will need equality operators. Normally these - * are the same as the IN clause operators, but if those are - * cross-type operators then the equality operators are the ones - * for the IN clause operators' RHS datatype. - */ - eqop = get_equality_op_for_ordering_op(sortop, NULL); - if (!OidIsValid(eqop)) /* shouldn't happen */ - elog(ERROR, "could not find equality operator for ordering operator %u", - sortop); - - tle = get_tle_by_resno(subplan->targetlist, - groupColIdx[groupColPos]); - Assert(tle != NULL); - - sortcl = makeNode(SortGroupClause); - sortcl->tleSortGroupRef = assignSortGroupRef(tle, - subplan->targetlist); - sortcl->eqop = eqop; - sortcl->sortop = sortop; - sortcl->reverse_sort = false; - sortcl->nulls_first = false; - sortcl->hashable = false; /* no need to make this accurate */ - sortList = lappend(sortList, sortcl); - groupColPos++; - } - sort = make_sort_from_sortclauses(sortList, subplan); - label_sort_with_costsize(root, sort, -1.0); - plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList); - } - - /* Copy cost data from Path to Plan */ - copy_generic_path_info(plan, &best_path->path); - - return plan; -} - /* * create_gather_plan * @@ -2318,13 +2108,13 @@ create_group_plan(PlannerInfo *root, GroupPath *best_path) } /* - * create_upper_unique_plan + * create_unique_plan * * Create a Unique plan for 'best_path' and (recursively) plans * for its subpaths. */ static Unique * -create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path, int flags) +create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags) { Unique *plan; Plan *subplan; @@ -2336,9 +2126,17 @@ create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path, int flag subplan = create_plan_recurse(root, best_path->subpath, flags | CP_LABEL_TLIST); + /* + * make_unique_from_pathkeys calls find_ec_member_matching_expr, which + * will ignore any child EC members that don't belong to the given relids. + * Thus, if this unique path is based on a child relation, we must pass + * its relids. + */ plan = make_unique_from_pathkeys(subplan, best_path->path.pathkeys, - best_path->numkeys); + best_path->numkeys, + IS_OTHER_REL(best_path->path.parent) ? + best_path->path.parent->relids : NULL); copy_generic_path_info(&plan->plan, (Path *) best_path); @@ -6749,7 +6547,9 @@ materialize_finished_plan(Plan *subplan) static Memoize * make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations, List *param_exprs, bool singlerow, bool binary_mode, - uint32 est_entries, Bitmapset *keyparamids) + uint32 est_entries, Bitmapset *keyparamids, + Cardinality est_calls, Cardinality est_unique_keys, + double est_hit_ratio) { Memoize *node = makeNode(Memoize); Plan *plan = &node->plan; @@ -6767,6 +6567,9 @@ make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations, node->binary_mode = binary_mode; node->est_entries = est_entries; node->keyparamids = keyparamids; + node->est_calls = est_calls; + node->est_unique_keys = est_unique_keys; + node->est_hit_ratio = est_hit_ratio; return node; } @@ -6871,61 +6674,14 @@ make_group(List *tlist, } /* - * distinctList is a list of SortGroupClauses, identifying the targetlist items - * that should be considered by the Unique filter. The input path must - * already be sorted accordingly. - */ -static Unique * -make_unique_from_sortclauses(Plan *lefttree, List *distinctList) -{ - Unique *node = makeNode(Unique); - Plan *plan = &node->plan; - int numCols = list_length(distinctList); - int keyno = 0; - AttrNumber *uniqColIdx; - Oid *uniqOperators; - Oid *uniqCollations; - ListCell *slitem; - - plan->targetlist = lefttree->targetlist; - plan->qual = NIL; - plan->lefttree = lefttree; - plan->righttree = NULL; - - /* - * convert SortGroupClause list into arrays of attr indexes and equality - * operators, as wanted by executor - */ - Assert(numCols > 0); - uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); - uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols); - uniqCollations = (Oid *) palloc(sizeof(Oid) * numCols); - - foreach(slitem, distinctList) - { - SortGroupClause *sortcl = (SortGroupClause *) lfirst(slitem); - TargetEntry *tle = get_sortgroupclause_tle(sortcl, plan->targetlist); - - uniqColIdx[keyno] = tle->resno; - uniqOperators[keyno] = sortcl->eqop; - uniqCollations[keyno] = exprCollation((Node *) tle->expr); - Assert(OidIsValid(uniqOperators[keyno])); - keyno++; - } - - node->numCols = numCols; - node->uniqColIdx = uniqColIdx; - node->uniqOperators = uniqOperators; - node->uniqCollations = uniqCollations; - - return node; -} - -/* - * as above, but use pathkeys to identify the sort columns and semantics + * pathkeys is a list of PathKeys, identifying the sort columns and semantics. + * The input plan must already be sorted accordingly. + * + * relids identifies the child relation being unique-ified, if any. */ static Unique * -make_unique_from_pathkeys(Plan *lefttree, List *pathkeys, int numCols) +make_unique_from_pathkeys(Plan *lefttree, List *pathkeys, int numCols, + Relids relids) { Unique *node = makeNode(Unique); Plan *plan = &node->plan; @@ -6988,7 +6744,7 @@ make_unique_from_pathkeys(Plan *lefttree, List *pathkeys, int numCols) foreach(j, plan->targetlist) { tle = (TargetEntry *) lfirst(j); - em = find_ec_member_matching_expr(ec, tle->expr, NULL); + em = find_ec_member_matching_expr(ec, tle->expr, relids); if (em) { /* found expr already in tlist */ @@ -7224,6 +6980,8 @@ make_modifytable(PlannerInfo *root, Plan *subplan, ModifyTable *node = makeNode(ModifyTable); bool returning_old_or_new = false; bool returning_old_or_new_valid = false; + bool transition_tables = false; + bool transition_tables_valid = false; List *fdw_private_list; Bitmapset *direct_modify_plans; ListCell *lc; @@ -7370,8 +7128,8 @@ make_modifytable(PlannerInfo *root, Plan *subplan, * callback functions needed for that and (2) there are no local * structures that need to be run for each modified row: row-level * triggers on the foreign table, stored generated columns, WITH CHECK - * OPTIONs from parent views, or Vars returning OLD/NEW in the - * RETURNING list. + * OPTIONs from parent views, Vars returning OLD/NEW in the RETURNING + * list, or transition tables on the named relation. */ direct_modify = false; if (fdwroutine != NULL && @@ -7383,7 +7141,10 @@ make_modifytable(PlannerInfo *root, Plan *subplan, !has_row_triggers(root, rti, operation) && !has_stored_generated_columns(root, rti)) { - /* returning_old_or_new is the same for all result relations */ + /* + * returning_old_or_new and transition_tables are the same for all + * result relations, respectively + */ if (!returning_old_or_new_valid) { returning_old_or_new = @@ -7392,7 +7153,18 @@ make_modifytable(PlannerInfo *root, Plan *subplan, returning_old_or_new_valid = true; } if (!returning_old_or_new) - direct_modify = fdwroutine->PlanDirectModify(root, node, rti, i); + { + if (!transition_tables_valid) + { + transition_tables = has_transition_tables(root, + nominalRelation, + operation); + transition_tables_valid = true; + } + if (!transition_tables) + direct_modify = fdwroutine->PlanDirectModify(root, node, + rti, i); + } } if (direct_modify) direct_modify_plans = bms_add_member(direct_modify_plans, i); diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c index 64605be31781f..2ef0bb7f66365 100644 --- a/src/backend/optimizer/plan/planagg.c +++ b/src/backend/optimizer/plan/planagg.c @@ -410,7 +410,7 @@ build_minmax_path(PlannerInfo *root, MinMaxAggInfo *mminfo, parse->limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid, sizeof(int64), Int64GetDatum(1), false, - FLOAT8PASSBYVAL); + true); /* * Generate the best paths for this query, telling query_planner that we diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index a77b2147e9592..41bd8353430df 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -58,6 +58,7 @@ #include "parser/parsetree.h" #include "partitioning/partdesc.h" #include "rewrite/rewriteManip.h" +#include "utils/acl.h" #include "utils/backend_status.h" #include "utils/lsyscache.h" #include "utils/rel.h" @@ -267,6 +268,12 @@ static bool group_by_has_partkey(RelOptInfo *input_rel, static int common_prefix_cmp(const void *a, const void *b); static List *generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist); +static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, + List *sortPathkeys, List *groupClause, + SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel); +static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, + List *sortPathkeys, List *groupClause, + SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel); /***************************************************************************** @@ -558,6 +565,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, result->commandType = parse->commandType; result->queryId = parse->queryId; + result->planOrigin = PLAN_STMT_STANDARD; result->hasReturning = (parse->returningList != NIL); result->hasModifyingCTE = parse->hasModifyingCTE; result->canSetTag = parse->canSetTag; @@ -582,7 +590,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, result->utilityStmt = parse->utilityStmt; result->stmt_location = parse->stmt_location; result->stmt_len = parse->stmt_len; - result->cached_plan_type = PLAN_CACHE_NONE; result->jitFlags = PGJIT_NONE; if (jit_enabled && jit_above_cost >= 0 && @@ -837,6 +844,38 @@ subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bms_make_singleton(parse->resultRelation); } + /* + * This would be a convenient time to check access permissions for all + * relations mentioned in the query, since it would be better to fail now, + * before doing any detailed planning. However, for historical reasons, + * we leave this to be done at executor startup. + * + * Note, however, that we do need to check access permissions for any view + * relations mentioned in the query, in order to prevent information being + * leaked by selectivity estimation functions, which only check view owner + * permissions on underlying tables (see all_rows_selectable() and its + * callers). This is a little ugly, because it means that access + * permissions for views will be checked twice, which is another reason + * why it would be better to do all the ACL checks here. + */ + foreach(l, parse->rtable) + { + RangeTblEntry *rte = lfirst_node(RangeTblEntry, l); + + if (rte->perminfoindex != 0 && + rte->relkind == RELKIND_VIEW) + { + RTEPermissionInfo *perminfo; + bool result; + + perminfo = getRTEPermissionInfo(parse->rteperminfos, rte); + result = ExecCheckOneRelPerms(perminfo); + if (!result) + aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_VIEW, + get_rel_name(perminfo->relid)); + } + } + /* * Preprocess RowMark information. We need to do this after subquery * pullup, so that all base relations are present. @@ -4882,7 +4921,7 @@ create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid, sizeof(int64), Int64GetDatum(1), false, - FLOAT8PASSBYVAL); + true); /* * Apply a LimitPath onto the partial path to restrict the @@ -4906,10 +4945,10 @@ create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, else { add_partial_path(partial_distinct_rel, (Path *) - create_upper_unique_path(root, partial_distinct_rel, - sorted_path, - list_length(root->distinct_pathkeys), - numDistinctRows)); + create_unique_path(root, partial_distinct_rel, + sorted_path, + list_length(root->distinct_pathkeys), + numDistinctRows)); } } } @@ -5085,7 +5124,7 @@ create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid, sizeof(int64), Int64GetDatum(1), false, - FLOAT8PASSBYVAL); + true); /* * If the query already has a LIMIT clause, then we could @@ -5100,10 +5139,10 @@ create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, else { add_path(distinct_rel, (Path *) - create_upper_unique_path(root, distinct_rel, - sorted_path, - list_length(root->distinct_pathkeys), - numDistinctRows)); + create_unique_path(root, distinct_rel, + sorted_path, + list_length(root->distinct_pathkeys), + numDistinctRows)); } } } @@ -8237,3 +8276,560 @@ generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist) return grouplist; } + +/* + * create_unique_paths + * Build a new RelOptInfo containing Paths that represent elimination of + * distinct rows from the input data. Distinct-ness is defined according to + * the needs of the semijoin represented by sjinfo. If it is not possible + * to identify how to make the data unique, NULL is returned. + * + * If used at all, this is likely to be called repeatedly on the same rel, + * so we cache the result. + */ +RelOptInfo * +create_unique_paths(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo) +{ + RelOptInfo *unique_rel; + List *sortPathkeys = NIL; + List *groupClause = NIL; + MemoryContext oldcontext; + + /* Caller made a mistake if SpecialJoinInfo is the wrong one */ + Assert(sjinfo->jointype == JOIN_SEMI); + Assert(bms_equal(rel->relids, sjinfo->syn_righthand)); + + /* If result already cached, return it */ + if (rel->unique_rel) + return rel->unique_rel; + + /* If it's not possible to unique-ify, return NULL */ + if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash)) + return NULL; + + /* + * Punt if this is a child relation and we failed to build a unique-ified + * relation for its parent. This can happen if all the RHS columns were + * found to be equated to constants when unique-ifying the parent table, + * leaving no columns to unique-ify. + */ + if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL) + return NULL; + + /* + * When called during GEQO join planning, we are in a short-lived memory + * context. We must make sure that the unique rel and any subsidiary data + * structures created for a baserel survive the GEQO cycle, else the + * baserel is trashed for future GEQO cycles. On the other hand, when we + * are creating those for a joinrel during GEQO, we don't want them to + * clutter the main planning context. Upshot is that the best solution is + * to explicitly allocate memory in the same context the given RelOptInfo + * is in. + */ + oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel)); + + unique_rel = makeNode(RelOptInfo); + memcpy(unique_rel, rel, sizeof(RelOptInfo)); + + /* + * clear path info + */ + unique_rel->pathlist = NIL; + unique_rel->ppilist = NIL; + unique_rel->partial_pathlist = NIL; + unique_rel->cheapest_startup_path = NULL; + unique_rel->cheapest_total_path = NULL; + unique_rel->cheapest_parameterized_paths = NIL; + + /* + * Build the target list for the unique rel. We also build the pathkeys + * that represent the ordering requirements for the sort-based + * implementation, and the list of SortGroupClause nodes that represent + * the columns to be grouped on for the hash-based implementation. + * + * For a child rel, we can construct these fields from those of its + * parent. + */ + if (IS_OTHER_REL(rel)) + { + PathTarget *child_unique_target; + PathTarget *parent_unique_target; + + parent_unique_target = rel->top_parent->unique_rel->reltarget; + + child_unique_target = copy_pathtarget(parent_unique_target); + + /* Translate the target expressions */ + child_unique_target->exprs = (List *) + adjust_appendrel_attrs_multilevel(root, + (Node *) parent_unique_target->exprs, + rel, + rel->top_parent); + + unique_rel->reltarget = child_unique_target; + + sortPathkeys = rel->top_parent->unique_pathkeys; + groupClause = rel->top_parent->unique_groupclause; + } + else + { + List *newtlist; + int nextresno; + List *sortList = NIL; + ListCell *lc1; + ListCell *lc2; + + /* + * The values we are supposed to unique-ify may be expressions in the + * variables of the input rel's targetlist. We have to add any such + * expressions to the unique rel's targetlist. + * + * To complicate matters, some of the values to be unique-ified may be + * known redundant by the EquivalenceClass machinery (e.g., because + * they have been equated to constants). There is no need to compare + * such values during unique-ification, and indeed we had better not + * try because the Vars involved may not have propagated as high as + * the semijoin's level. We use make_pathkeys_for_sortclauses to + * detect such cases, which is a tad inefficient but it doesn't seem + * worth building specialized infrastructure for this. + */ + newtlist = make_tlist_from_pathtarget(rel->reltarget); + nextresno = list_length(newtlist) + 1; + + forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators) + { + Expr *uniqexpr = lfirst(lc1); + Oid in_oper = lfirst_oid(lc2); + Oid sortop; + TargetEntry *tle; + bool made_tle = false; + + tle = tlist_member(uniqexpr, newtlist); + if (!tle) + { + tle = makeTargetEntry((Expr *) uniqexpr, + nextresno, + NULL, + false); + newtlist = lappend(newtlist, tle); + nextresno++; + made_tle = true; + } + + /* + * Try to build an ORDER BY list to sort the input compatibly. We + * do this for each sortable clause even when the clauses are not + * all sortable, so that we can detect clauses that are redundant + * according to the pathkey machinery. + */ + sortop = get_ordering_op_for_equality_op(in_oper, false); + if (OidIsValid(sortop)) + { + Oid eqop; + SortGroupClause *sortcl; + + /* + * The Unique node will need equality operators. Normally + * these are the same as the IN clause operators, but if those + * are cross-type operators then the equality operators are + * the ones for the IN clause operators' RHS datatype. + */ + eqop = get_equality_op_for_ordering_op(sortop, NULL); + if (!OidIsValid(eqop)) /* shouldn't happen */ + elog(ERROR, "could not find equality operator for ordering operator %u", + sortop); + + sortcl = makeNode(SortGroupClause); + sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist); + sortcl->eqop = eqop; + sortcl->sortop = sortop; + sortcl->reverse_sort = false; + sortcl->nulls_first = false; + sortcl->hashable = false; /* no need to make this accurate */ + sortList = lappend(sortList, sortcl); + + /* + * At each step, convert the SortGroupClause list to pathkey + * form. If the just-added SortGroupClause is redundant, the + * result will be shorter than the SortGroupClause list. + */ + sortPathkeys = make_pathkeys_for_sortclauses(root, sortList, + newtlist); + if (list_length(sortPathkeys) != list_length(sortList)) + { + /* Drop the redundant SortGroupClause */ + sortList = list_delete_last(sortList); + Assert(list_length(sortPathkeys) == list_length(sortList)); + /* Undo tlist addition, if we made one */ + if (made_tle) + { + newtlist = list_delete_last(newtlist); + nextresno--; + } + /* We need not consider this clause for hashing, either */ + continue; + } + } + else if (sjinfo->semi_can_btree) /* shouldn't happen */ + elog(ERROR, "could not find ordering operator for equality operator %u", + in_oper); + + if (sjinfo->semi_can_hash) + { + /* Create a GROUP BY list for the Agg node to use */ + Oid eq_oper; + SortGroupClause *groupcl; + + /* + * Get the hashable equality operators for the Agg node to + * use. Normally these are the same as the IN clause + * operators, but if those are cross-type operators then the + * equality operators are the ones for the IN clause + * operators' RHS datatype. + */ + if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper)) + elog(ERROR, "could not find compatible hash operator for operator %u", + in_oper); + + groupcl = makeNode(SortGroupClause); + groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist); + groupcl->eqop = eq_oper; + groupcl->sortop = sortop; + groupcl->reverse_sort = false; + groupcl->nulls_first = false; + groupcl->hashable = true; + groupClause = lappend(groupClause, groupcl); + } + } + + /* + * Done building the sortPathkeys and groupClause. But the + * sortPathkeys are bogus if not all the clauses were sortable. + */ + if (!sjinfo->semi_can_btree) + sortPathkeys = NIL; + + /* + * It can happen that all the RHS columns are equated to constants. + * We'd have to do something special to unique-ify in that case, and + * it's such an unlikely-in-the-real-world case that it's not worth + * the effort. So just punt if we found no columns to unique-ify. + */ + if (sortPathkeys == NIL && groupClause == NIL) + { + MemoryContextSwitchTo(oldcontext); + return NULL; + } + + /* Convert the required targetlist back to PathTarget form */ + unique_rel->reltarget = create_pathtarget(root, newtlist); + } + + /* build unique paths based on input rel's pathlist */ + create_final_unique_paths(root, rel, sortPathkeys, groupClause, + sjinfo, unique_rel); + + /* build unique paths based on input rel's partial_pathlist */ + create_partial_unique_paths(root, rel, sortPathkeys, groupClause, + sjinfo, unique_rel); + + /* Now choose the best path(s) */ + set_cheapest(unique_rel); + + /* + * There shouldn't be any partial paths for the unique relation; + * otherwise, we won't be able to properly guarantee uniqueness. + */ + Assert(unique_rel->partial_pathlist == NIL); + + /* Cache the result */ + rel->unique_rel = unique_rel; + rel->unique_pathkeys = sortPathkeys; + rel->unique_groupclause = groupClause; + + MemoryContextSwitchTo(oldcontext); + + return unique_rel; +} + +/* + * create_final_unique_paths + * Create unique paths in 'unique_rel' based on 'input_rel' pathlist + */ +static void +create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, + List *sortPathkeys, List *groupClause, + SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel) +{ + Path *cheapest_input_path = input_rel->cheapest_total_path; + + /* Estimate number of output rows */ + unique_rel->rows = estimate_num_groups(root, + sjinfo->semi_rhs_exprs, + cheapest_input_path->rows, + NULL, + NULL); + + /* Consider sort-based implementations, if possible. */ + if (sjinfo->semi_can_btree) + { + ListCell *lc; + + /* + * Use any available suitably-sorted path as input, and also consider + * sorting the cheapest-total path and incremental sort on any paths + * with presorted keys. + * + * To save planning time, we ignore parameterized input paths unless + * they are the cheapest-total path. + */ + foreach(lc, input_rel->pathlist) + { + Path *input_path = (Path *) lfirst(lc); + Path *path; + bool is_sorted; + int presorted_keys; + + /* + * Ignore parameterized paths that are not the cheapest-total + * path. + */ + if (input_path->param_info && + input_path != cheapest_input_path) + continue; + + is_sorted = pathkeys_count_contained_in(sortPathkeys, + input_path->pathkeys, + &presorted_keys); + + /* + * Ignore paths that are not suitably or partially sorted, unless + * they are the cheapest total path (no need to deal with paths + * which have presorted keys when incremental sort is disabled). + */ + if (!is_sorted && input_path != cheapest_input_path && + (presorted_keys == 0 || !enable_incremental_sort)) + continue; + + /* + * Make a separate ProjectionPath in case we need a Result node. + */ + path = (Path *) create_projection_path(root, + unique_rel, + input_path, + unique_rel->reltarget); + + if (!is_sorted) + { + /* + * We've no need to consider both a sort and incremental sort. + * We'll just do a sort if there are no presorted keys and an + * incremental sort when there are presorted keys. + */ + if (presorted_keys == 0 || !enable_incremental_sort) + path = (Path *) create_sort_path(root, + unique_rel, + path, + sortPathkeys, + -1.0); + else + path = (Path *) create_incremental_sort_path(root, + unique_rel, + path, + sortPathkeys, + presorted_keys, + -1.0); + } + + path = (Path *) create_unique_path(root, unique_rel, path, + list_length(sortPathkeys), + unique_rel->rows); + + add_path(unique_rel, path); + } + } + + /* Consider hash-based implementation, if possible. */ + if (sjinfo->semi_can_hash) + { + Path *path; + + /* + * Make a separate ProjectionPath in case we need a Result node. + */ + path = (Path *) create_projection_path(root, + unique_rel, + cheapest_input_path, + unique_rel->reltarget); + + path = (Path *) create_agg_path(root, + unique_rel, + path, + cheapest_input_path->pathtarget, + AGG_HASHED, + AGGSPLIT_SIMPLE, + groupClause, + NIL, + NULL, + unique_rel->rows); + + add_path(unique_rel, path); + } +} + +/* + * create_partial_unique_paths + * Create unique paths in 'unique_rel' based on 'input_rel' partial_pathlist + */ +static void +create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, + List *sortPathkeys, List *groupClause, + SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel) +{ + RelOptInfo *partial_unique_rel; + Path *cheapest_partial_path; + + /* nothing to do when there are no partial paths in the input rel */ + if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL) + return; + + /* + * nothing to do if there's anything in the targetlist that's + * parallel-restricted. + */ + if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs)) + return; + + cheapest_partial_path = linitial(input_rel->partial_pathlist); + + partial_unique_rel = makeNode(RelOptInfo); + memcpy(partial_unique_rel, input_rel, sizeof(RelOptInfo)); + + /* + * clear path info + */ + partial_unique_rel->pathlist = NIL; + partial_unique_rel->ppilist = NIL; + partial_unique_rel->partial_pathlist = NIL; + partial_unique_rel->cheapest_startup_path = NULL; + partial_unique_rel->cheapest_total_path = NULL; + partial_unique_rel->cheapest_parameterized_paths = NIL; + + /* Estimate number of output rows */ + partial_unique_rel->rows = estimate_num_groups(root, + sjinfo->semi_rhs_exprs, + cheapest_partial_path->rows, + NULL, + NULL); + partial_unique_rel->reltarget = unique_rel->reltarget; + + /* Consider sort-based implementations, if possible. */ + if (sjinfo->semi_can_btree) + { + ListCell *lc; + + /* + * Use any available suitably-sorted path as input, and also consider + * sorting the cheapest partial path and incremental sort on any paths + * with presorted keys. + */ + foreach(lc, input_rel->partial_pathlist) + { + Path *input_path = (Path *) lfirst(lc); + Path *path; + bool is_sorted; + int presorted_keys; + + is_sorted = pathkeys_count_contained_in(sortPathkeys, + input_path->pathkeys, + &presorted_keys); + + /* + * Ignore paths that are not suitably or partially sorted, unless + * they are the cheapest partial path (no need to deal with paths + * which have presorted keys when incremental sort is disabled). + */ + if (!is_sorted && input_path != cheapest_partial_path && + (presorted_keys == 0 || !enable_incremental_sort)) + continue; + + /* + * Make a separate ProjectionPath in case we need a Result node. + */ + path = (Path *) create_projection_path(root, + partial_unique_rel, + input_path, + partial_unique_rel->reltarget); + + if (!is_sorted) + { + /* + * We've no need to consider both a sort and incremental sort. + * We'll just do a sort if there are no presorted keys and an + * incremental sort when there are presorted keys. + */ + if (presorted_keys == 0 || !enable_incremental_sort) + path = (Path *) create_sort_path(root, + partial_unique_rel, + path, + sortPathkeys, + -1.0); + else + path = (Path *) create_incremental_sort_path(root, + partial_unique_rel, + path, + sortPathkeys, + presorted_keys, + -1.0); + } + + path = (Path *) create_unique_path(root, partial_unique_rel, path, + list_length(sortPathkeys), + partial_unique_rel->rows); + + add_partial_path(partial_unique_rel, path); + } + } + + /* Consider hash-based implementation, if possible. */ + if (sjinfo->semi_can_hash) + { + Path *path; + + /* + * Make a separate ProjectionPath in case we need a Result node. + */ + path = (Path *) create_projection_path(root, + partial_unique_rel, + cheapest_partial_path, + partial_unique_rel->reltarget); + + path = (Path *) create_agg_path(root, + partial_unique_rel, + path, + cheapest_partial_path->pathtarget, + AGG_HASHED, + AGGSPLIT_SIMPLE, + groupClause, + NIL, + NULL, + partial_unique_rel->rows); + + add_partial_path(partial_unique_rel, path); + } + + if (partial_unique_rel->partial_pathlist != NIL) + { + generate_useful_gather_paths(root, partial_unique_rel, true); + set_cheapest(partial_unique_rel); + + /* + * Finally, create paths to unique-ify the final result. This step is + * needed to remove any duplicates due to combining rows from parallel + * workers. + */ + create_final_unique_paths(root, partial_unique_rel, + sortPathkeys, groupClause, + sjinfo, unique_rel); + } +} diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 846e44186c366..d706546f33264 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -307,6 +307,10 @@ set_plan_references(PlannerInfo *root, Plan *plan) PlanRowMark *rc = lfirst_node(PlanRowMark, lc); PlanRowMark *newrc; + /* sanity check on existing row marks */ + Assert(root->simple_rel_array[rc->rti] != NULL && + root->simple_rte_array[rc->rti] != NULL); + /* flat copy is enough since all fields are scalars */ newrc = (PlanRowMark *) palloc(sizeof(PlanRowMark)); memcpy(newrc, rc, sizeof(PlanRowMark)); diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index d71ed958e31b3..fae18548e074e 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -1397,7 +1397,7 @@ convert_ANY_sublink_to_join(PlannerInfo *root, SubLink *sublink, */ nsitem = addRangeTableEntryForSubquery(pstate, subselect, - makeAlias("ANY_subquery", NIL), + NULL, use_lateral, false); rte = nsitem->p_rte; diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index eab44da65b8f0..28a4ae6444068 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -929,11 +929,11 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, make_pathkeys_for_sortclauses(root, groupList, tlist), -1.0); - path = (Path *) create_upper_unique_path(root, - result_rel, - path, - list_length(path->pathkeys), - dNumGroups); + path = (Path *) create_unique_path(root, + result_rel, + path, + list_length(path->pathkeys), + dNumGroups); add_path(result_rel, path); @@ -946,11 +946,11 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, make_pathkeys_for_sortclauses(root, groupList, tlist), -1.0); - path = (Path *) create_upper_unique_path(root, - result_rel, - path, - list_length(path->pathkeys), - dNumGroups); + path = (Path *) create_unique_path(root, + result_rel, + path, + list_length(path->pathkeys), + dNumGroups); add_path(result_rel, path); } } @@ -970,11 +970,11 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, NULL); /* and make the MergeAppend unique */ - path = (Path *) create_upper_unique_path(root, - result_rel, - path, - list_length(tlist), - dNumGroups); + path = (Path *) create_unique_path(root, + result_rel, + path, + list_length(tlist), + dNumGroups); add_path(result_rel, path); } diff --git a/src/backend/optimizer/util/inherit.c b/src/backend/optimizer/util/inherit.c index 30d158069e332..856d5959d1031 100644 --- a/src/backend/optimizer/util/inherit.c +++ b/src/backend/optimizer/util/inherit.c @@ -322,7 +322,6 @@ expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo, PlanRowMark *top_parentrc, LOCKMODE lockmode) { PartitionDesc partdesc; - Bitmapset *live_parts; int num_live_parts; int i; @@ -356,10 +355,10 @@ expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo, * that survive pruning. Below, we will initialize child objects for the * surviving partitions. */ - relinfo->live_parts = live_parts = prune_append_rel_partitions(relinfo); + relinfo->live_parts = prune_append_rel_partitions(relinfo); /* Expand simple_rel_array and friends to hold child objects. */ - num_live_parts = bms_num_members(live_parts); + num_live_parts = bms_num_members(relinfo->live_parts); if (num_live_parts > 0) expand_planner_arrays(root, num_live_parts); @@ -378,7 +377,7 @@ expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo, * table itself, because it's not going to be scanned. */ i = -1; - while ((i = bms_next_member(live_parts, i)) >= 0) + while ((i = bms_next_member(relinfo->live_parts, i)) >= 0) { Oid childOID = partdesc->oids[i]; Relation childrel; diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 9cc602788eaae..b0da28150d32c 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -46,7 +46,6 @@ typedef enum */ #define STD_FUZZ_FACTOR 1.01 -static List *translate_sub_tlist(List *tlist, int relid); static int append_total_cost_compare(const ListCell *a, const ListCell *b); static int append_startup_cost_compare(const ListCell *a, const ListCell *b); static List *reparameterize_pathlist_by_child(PlannerInfo *root, @@ -381,7 +380,6 @@ set_cheapest(RelOptInfo *parent_rel) parent_rel->cheapest_startup_path = cheapest_startup_path; parent_rel->cheapest_total_path = cheapest_total_path; - parent_rel->cheapest_unique_path = NULL; /* computed only if needed */ parent_rel->cheapest_parameterized_paths = parameterized_paths; } @@ -1689,7 +1687,7 @@ create_material_path(RelOptInfo *rel, Path *subpath) MemoizePath * create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, - bool singlerow, bool binary_mode, double calls) + bool singlerow, bool binary_mode, Cardinality est_calls) { MemoizePath *pathnode = makeNode(MemoizePath); @@ -1710,7 +1708,6 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, pathnode->param_exprs = param_exprs; pathnode->singlerow = singlerow; pathnode->binary_mode = binary_mode; - pathnode->calls = clamp_row_est(calls); /* * For now we set est_entries to 0. cost_memoize_rescan() does all the @@ -1720,6 +1717,12 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, */ pathnode->est_entries = 0; + pathnode->est_calls = clamp_row_est(est_calls); + + /* These will also be set later in cost_memoize_rescan() */ + pathnode->est_unique_keys = 0.0; + pathnode->est_hit_ratio = 0.0; + /* we should not generate this path type when enable_memoize=false */ Assert(enable_memoize); pathnode->path.disabled_nodes = subpath->disabled_nodes; @@ -1735,246 +1738,6 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, return pathnode; } -/* - * create_unique_path - * Creates a path representing elimination of distinct rows from the - * input data. Distinct-ness is defined according to the needs of the - * semijoin represented by sjinfo. If it is not possible to identify - * how to make the data unique, NULL is returned. - * - * If used at all, this is likely to be called repeatedly on the same rel; - * and the input subpath should always be the same (the cheapest_total path - * for the rel). So we cache the result. - */ -UniquePath * -create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, - SpecialJoinInfo *sjinfo) -{ - UniquePath *pathnode; - Path sort_path; /* dummy for result of cost_sort */ - Path agg_path; /* dummy for result of cost_agg */ - MemoryContext oldcontext; - int numCols; - - /* Caller made a mistake if subpath isn't cheapest_total ... */ - Assert(subpath == rel->cheapest_total_path); - Assert(subpath->parent == rel); - /* ... or if SpecialJoinInfo is the wrong one */ - Assert(sjinfo->jointype == JOIN_SEMI); - Assert(bms_equal(rel->relids, sjinfo->syn_righthand)); - - /* If result already cached, return it */ - if (rel->cheapest_unique_path) - return (UniquePath *) rel->cheapest_unique_path; - - /* If it's not possible to unique-ify, return NULL */ - if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash)) - return NULL; - - /* - * When called during GEQO join planning, we are in a short-lived memory - * context. We must make sure that the path and any subsidiary data - * structures created for a baserel survive the GEQO cycle, else the - * baserel is trashed for future GEQO cycles. On the other hand, when we - * are creating those for a joinrel during GEQO, we don't want them to - * clutter the main planning context. Upshot is that the best solution is - * to explicitly allocate memory in the same context the given RelOptInfo - * is in. - */ - oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel)); - - pathnode = makeNode(UniquePath); - - pathnode->path.pathtype = T_Unique; - pathnode->path.parent = rel; - pathnode->path.pathtarget = rel->reltarget; - pathnode->path.param_info = subpath->param_info; - pathnode->path.parallel_aware = false; - pathnode->path.parallel_safe = rel->consider_parallel && - subpath->parallel_safe; - pathnode->path.parallel_workers = subpath->parallel_workers; - - /* - * Assume the output is unsorted, since we don't necessarily have pathkeys - * to represent it. (This might get overridden below.) - */ - pathnode->path.pathkeys = NIL; - - pathnode->subpath = subpath; - - /* - * Under GEQO and when planning child joins, the sjinfo might be - * short-lived, so we'd better make copies of data structures we extract - * from it. - */ - pathnode->in_operators = copyObject(sjinfo->semi_operators); - pathnode->uniq_exprs = copyObject(sjinfo->semi_rhs_exprs); - - /* - * If the input is a relation and it has a unique index that proves the - * semi_rhs_exprs are unique, then we don't need to do anything. Note - * that relation_has_unique_index_for automatically considers restriction - * clauses for the rel, as well. - */ - if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree && - relation_has_unique_index_for(root, rel, NIL, - sjinfo->semi_rhs_exprs, - sjinfo->semi_operators)) - { - pathnode->umethod = UNIQUE_PATH_NOOP; - pathnode->path.rows = rel->rows; - pathnode->path.disabled_nodes = subpath->disabled_nodes; - pathnode->path.startup_cost = subpath->startup_cost; - pathnode->path.total_cost = subpath->total_cost; - pathnode->path.pathkeys = subpath->pathkeys; - - rel->cheapest_unique_path = (Path *) pathnode; - - MemoryContextSwitchTo(oldcontext); - - return pathnode; - } - - /* - * If the input is a subquery whose output must be unique already, then we - * don't need to do anything. The test for uniqueness has to consider - * exactly which columns we are extracting; for example "SELECT DISTINCT - * x,y" doesn't guarantee that x alone is distinct. So we cannot check for - * this optimization unless semi_rhs_exprs consists only of simple Vars - * referencing subquery outputs. (Possibly we could do something with - * expressions in the subquery outputs, too, but for now keep it simple.) - */ - if (rel->rtekind == RTE_SUBQUERY) - { - RangeTblEntry *rte = planner_rt_fetch(rel->relid, root); - - if (query_supports_distinctness(rte->subquery)) - { - List *sub_tlist_colnos; - - sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs, - rel->relid); - - if (sub_tlist_colnos && - query_is_distinct_for(rte->subquery, - sub_tlist_colnos, - sjinfo->semi_operators)) - { - pathnode->umethod = UNIQUE_PATH_NOOP; - pathnode->path.rows = rel->rows; - pathnode->path.disabled_nodes = subpath->disabled_nodes; - pathnode->path.startup_cost = subpath->startup_cost; - pathnode->path.total_cost = subpath->total_cost; - pathnode->path.pathkeys = subpath->pathkeys; - - rel->cheapest_unique_path = (Path *) pathnode; - - MemoryContextSwitchTo(oldcontext); - - return pathnode; - } - } - } - - /* Estimate number of output rows */ - pathnode->path.rows = estimate_num_groups(root, - sjinfo->semi_rhs_exprs, - rel->rows, - NULL, - NULL); - numCols = list_length(sjinfo->semi_rhs_exprs); - - if (sjinfo->semi_can_btree) - { - /* - * Estimate cost for sort+unique implementation - */ - cost_sort(&sort_path, root, NIL, - subpath->disabled_nodes, - subpath->total_cost, - rel->rows, - subpath->pathtarget->width, - 0.0, - work_mem, - -1.0); - - /* - * Charge one cpu_operator_cost per comparison per input tuple. We - * assume all columns get compared at most of the tuples. (XXX - * probably this is an overestimate.) This should agree with - * create_upper_unique_path. - */ - sort_path.total_cost += cpu_operator_cost * rel->rows * numCols; - } - - if (sjinfo->semi_can_hash) - { - /* - * Estimate the overhead per hashtable entry at 64 bytes (same as in - * planner.c). - */ - int hashentrysize = subpath->pathtarget->width + 64; - - if (hashentrysize * pathnode->path.rows > get_hash_memory_limit()) - { - /* - * We should not try to hash. Hack the SpecialJoinInfo to - * remember this, in case we come through here again. - */ - sjinfo->semi_can_hash = false; - } - else - cost_agg(&agg_path, root, - AGG_HASHED, NULL, - numCols, pathnode->path.rows, - NIL, - subpath->disabled_nodes, - subpath->startup_cost, - subpath->total_cost, - rel->rows, - subpath->pathtarget->width); - } - - if (sjinfo->semi_can_btree && sjinfo->semi_can_hash) - { - if (agg_path.disabled_nodes < sort_path.disabled_nodes || - (agg_path.disabled_nodes == sort_path.disabled_nodes && - agg_path.total_cost < sort_path.total_cost)) - pathnode->umethod = UNIQUE_PATH_HASH; - else - pathnode->umethod = UNIQUE_PATH_SORT; - } - else if (sjinfo->semi_can_btree) - pathnode->umethod = UNIQUE_PATH_SORT; - else if (sjinfo->semi_can_hash) - pathnode->umethod = UNIQUE_PATH_HASH; - else - { - /* we can get here only if we abandoned hashing above */ - MemoryContextSwitchTo(oldcontext); - return NULL; - } - - if (pathnode->umethod == UNIQUE_PATH_HASH) - { - pathnode->path.disabled_nodes = agg_path.disabled_nodes; - pathnode->path.startup_cost = agg_path.startup_cost; - pathnode->path.total_cost = agg_path.total_cost; - } - else - { - pathnode->path.disabled_nodes = sort_path.disabled_nodes; - pathnode->path.startup_cost = sort_path.startup_cost; - pathnode->path.total_cost = sort_path.total_cost; - } - - rel->cheapest_unique_path = (Path *) pathnode; - - MemoryContextSwitchTo(oldcontext); - - return pathnode; -} - /* * create_gather_merge_path * @@ -2026,36 +1789,6 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, return pathnode; } -/* - * translate_sub_tlist - get subquery column numbers represented by tlist - * - * The given targetlist usually contains only Vars referencing the given relid. - * Extract their varattnos (ie, the column numbers of the subquery) and return - * as an integer List. - * - * If any of the tlist items is not a simple Var, we cannot determine whether - * the subquery's uniqueness condition (if any) matches ours, so punt and - * return NIL. - */ -static List * -translate_sub_tlist(List *tlist, int relid) -{ - List *result = NIL; - ListCell *l; - - foreach(l, tlist) - { - Var *var = (Var *) lfirst(l); - - if (!var || !IsA(var, Var) || - var->varno != relid) - return NIL; /* punt */ - - result = lappend_int(result, var->varattno); - } - return result; -} - /* * create_gather_path * Creates a path corresponding to a gather scan, returning the @@ -2813,8 +2546,7 @@ create_projection_path(PlannerInfo *root, pathnode->path.pathtype = T_Result; pathnode->path.parent = rel; pathnode->path.pathtarget = target; - /* For now, assume we are above any joins, so no parameterization */ - pathnode->path.param_info = NULL; + pathnode->path.param_info = subpath->param_info; pathnode->path.parallel_aware = false; pathnode->path.parallel_safe = rel->consider_parallel && subpath->parallel_safe && @@ -3069,8 +2801,7 @@ create_incremental_sort_path(PlannerInfo *root, pathnode->path.parent = rel; /* Sort doesn't project, so use source path's pathtarget */ pathnode->path.pathtarget = subpath->pathtarget; - /* For now, assume we are above any joins, so no parameterization */ - pathnode->path.param_info = NULL; + pathnode->path.param_info = subpath->param_info; pathnode->path.parallel_aware = false; pathnode->path.parallel_safe = rel->consider_parallel && subpath->parallel_safe; @@ -3117,8 +2848,7 @@ create_sort_path(PlannerInfo *root, pathnode->path.parent = rel; /* Sort doesn't project, so use source path's pathtarget */ pathnode->path.pathtarget = subpath->pathtarget; - /* For now, assume we are above any joins, so no parameterization */ - pathnode->path.param_info = NULL; + pathnode->path.param_info = subpath->param_info; pathnode->path.parallel_aware = false; pathnode->path.parallel_safe = rel->consider_parallel && subpath->parallel_safe; @@ -3194,13 +2924,10 @@ create_group_path(PlannerInfo *root, } /* - * create_upper_unique_path + * create_unique_path * Creates a pathnode that represents performing an explicit Unique step * on presorted input. * - * This produces a Unique plan node, but the use-case is so different from - * create_unique_path that it doesn't seem worth trying to merge the two. - * * 'rel' is the parent relation associated with the result * 'subpath' is the path representing the source of data * 'numCols' is the number of grouping columns @@ -3209,21 +2936,20 @@ create_group_path(PlannerInfo *root, * The input path must be sorted on the grouping columns, plus possibly * additional columns; so the first numCols pathkeys are the grouping columns */ -UpperUniquePath * -create_upper_unique_path(PlannerInfo *root, - RelOptInfo *rel, - Path *subpath, - int numCols, - double numGroups) +UniquePath * +create_unique_path(PlannerInfo *root, + RelOptInfo *rel, + Path *subpath, + int numCols, + double numGroups) { - UpperUniquePath *pathnode = makeNode(UpperUniquePath); + UniquePath *pathnode = makeNode(UniquePath); pathnode->path.pathtype = T_Unique; pathnode->path.parent = rel; /* Unique doesn't project, so use source path's pathtarget */ pathnode->path.pathtarget = subpath->pathtarget; - /* For now, assume we are above any joins, so no parameterization */ - pathnode->path.param_info = NULL; + pathnode->path.param_info = subpath->param_info; pathnode->path.parallel_aware = false; pathnode->path.parallel_safe = rel->consider_parallel && subpath->parallel_safe; @@ -3279,8 +3005,7 @@ create_agg_path(PlannerInfo *root, pathnode->path.pathtype = T_Agg; pathnode->path.parent = rel; pathnode->path.pathtarget = target; - /* For now, assume we are above any joins, so no parameterization */ - pathnode->path.param_info = NULL; + pathnode->path.param_info = subpath->param_info; pathnode->path.parallel_aware = false; pathnode->path.parallel_safe = rel->consider_parallel && subpath->parallel_safe; @@ -4259,7 +3984,7 @@ reparameterize_path(PlannerInfo *root, Path *path, mpath->hash_operators, mpath->singlerow, mpath->binary_mode, - mpath->calls); + mpath->est_calls); } default: break; diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index c6a58afc5e506..4536bdd6cb4d7 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -77,7 +77,8 @@ static List *get_relation_constraints(PlannerInfo *root, bool include_partition); static List *build_index_tlist(PlannerInfo *root, IndexOptInfo *index, Relation heapRelation); -static List *get_relation_statistics(RelOptInfo *rel, Relation relation); +static List *get_relation_statistics(PlannerInfo *root, RelOptInfo *rel, + Relation relation); static void set_relation_partition_info(PlannerInfo *root, RelOptInfo *rel, Relation relation); static PartitionScheme find_partition_scheme(PlannerInfo *root, @@ -508,7 +509,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, rel->indexlist = indexinfos; - rel->statlist = get_relation_statistics(rel, relation); + rel->statlist = get_relation_statistics(root, rel, relation); /* Grab foreign-table info using the relcache, while we have it */ if (relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE) @@ -1406,6 +1407,14 @@ get_relation_constraints(PlannerInfo *root, cexpr = stringToNode(constr->check[i].ccbin); + /* + * Fix Vars to have the desired varno. This must be done before + * const-simplification because eval_const_expressions reduces + * NullTest for Vars based on varno. + */ + if (varno != 1) + ChangeVarNodes(cexpr, 1, varno, 0); + /* * Run each expression through const-simplification and * canonicalization. This is not just an optimization, but is @@ -1420,10 +1429,6 @@ get_relation_constraints(PlannerInfo *root, cexpr = (Node *) canonicalize_qual((Expr *) cexpr, true); - /* Fix Vars to have the desired varno */ - if (varno != 1) - ChangeVarNodes(cexpr, 1, varno, 0); - /* * Finally, convert to implicit-AND format (that is, a List) and * append the resulting item(s) to our output list. @@ -1572,7 +1577,8 @@ get_relation_statistics_worker(List **stainfos, RelOptInfo *rel, * just the identifying metadata. Only stats actually built are considered. */ static List * -get_relation_statistics(RelOptInfo *rel, Relation relation) +get_relation_statistics(PlannerInfo *root, RelOptInfo *rel, + Relation relation) { Index varno = rel->relid; List *statoidlist; @@ -1604,8 +1610,8 @@ get_relation_statistics(RelOptInfo *rel, Relation relation) keys = bms_add_member(keys, staForm->stxkeys.values[i]); /* - * Preprocess expressions (if any). We read the expressions, run them - * through eval_const_expressions, and fix the varnos. + * Preprocess expressions (if any). We read the expressions, fix the + * varnos, and run them through eval_const_expressions. * * XXX We don't know yet if there are any data for this stats object, * with either stxdinherit value. But it's reasonable to assume there @@ -1628,6 +1634,18 @@ get_relation_statistics(RelOptInfo *rel, Relation relation) exprs = (List *) stringToNode(exprsString); pfree(exprsString); + /* + * Modify the copies we obtain from the relcache to have the + * correct varno for the parent relation, so that they match + * up correctly against qual clauses. + * + * This must be done before const-simplification because + * eval_const_expressions reduces NullTest for Vars based on + * varno. + */ + if (varno != 1) + ChangeVarNodes((Node *) exprs, 1, varno, 0); + /* * Run the expressions through eval_const_expressions. This is * not just an optimization, but is necessary, because the @@ -1636,18 +1654,10 @@ get_relation_statistics(RelOptInfo *rel, Relation relation) * We must not use canonicalize_qual, however, since these * aren't qual expressions. */ - exprs = (List *) eval_const_expressions(NULL, (Node *) exprs); + exprs = (List *) eval_const_expressions(root, (Node *) exprs); /* May as well fix opfuncids too */ fix_opfuncids((Node *) exprs); - - /* - * Modify the copies we obtain from the relcache to have the - * correct varno for the parent relation, so that they match - * up correctly against qual clauses. - */ - if (varno != 1) - ChangeVarNodes((Node *) exprs, 1, varno, 0); } } @@ -2388,6 +2398,60 @@ has_row_triggers(PlannerInfo *root, Index rti, CmdType event) return result; } +/* + * has_transition_tables + * + * Detect whether the specified relation has any transition tables for event. + */ +bool +has_transition_tables(PlannerInfo *root, Index rti, CmdType event) +{ + RangeTblEntry *rte = planner_rt_fetch(rti, root); + Relation relation; + TriggerDesc *trigDesc; + bool result = false; + + Assert(rte->rtekind == RTE_RELATION); + + /* Currently foreign tables cannot have transition tables */ + if (rte->relkind == RELKIND_FOREIGN_TABLE) + return result; + + /* Assume we already have adequate lock */ + relation = table_open(rte->relid, NoLock); + + trigDesc = relation->trigdesc; + switch (event) + { + case CMD_INSERT: + if (trigDesc && + trigDesc->trig_insert_new_table) + result = true; + break; + case CMD_UPDATE: + if (trigDesc && + (trigDesc->trig_update_old_table || + trigDesc->trig_update_new_table)) + result = true; + break; + case CMD_DELETE: + if (trigDesc && + trigDesc->trig_delete_old_table) + result = true; + break; + /* There is no separate event for MERGE, only INSERT/UPDATE/DELETE */ + case CMD_MERGE: + result = false; + break; + default: + elog(ERROR, "unrecognized CmdType: %d", (int) event); + break; + } + + table_close(relation, NoLock); + return result; +} + /* * has_stored_generated_columns * diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index ff507331a061a..0e523d2eb5b44 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -217,7 +217,6 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent) rel->partial_pathlist = NIL; rel->cheapest_startup_path = NULL; rel->cheapest_total_path = NULL; - rel->cheapest_unique_path = NULL; rel->cheapest_parameterized_paths = NIL; rel->relid = relid; rel->rtekind = rte->rtekind; @@ -269,6 +268,9 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent) rel->fdw_private = NULL; rel->unique_for_rels = NIL; rel->non_unique_for_rels = NIL; + rel->unique_rel = NULL; + rel->unique_pathkeys = NIL; + rel->unique_groupclause = NIL; rel->baserestrictinfo = NIL; rel->baserestrictcost.startup = 0; rel->baserestrictcost.per_tuple = 0; @@ -713,7 +715,6 @@ build_join_rel(PlannerInfo *root, joinrel->partial_pathlist = NIL; joinrel->cheapest_startup_path = NULL; joinrel->cheapest_total_path = NULL; - joinrel->cheapest_unique_path = NULL; joinrel->cheapest_parameterized_paths = NIL; /* init direct_lateral_relids from children; we'll finish it up below */ joinrel->direct_lateral_relids = @@ -748,6 +749,9 @@ build_join_rel(PlannerInfo *root, joinrel->fdw_private = NULL; joinrel->unique_for_rels = NIL; joinrel->non_unique_for_rels = NIL; + joinrel->unique_rel = NULL; + joinrel->unique_pathkeys = NIL; + joinrel->unique_groupclause = NIL; joinrel->baserestrictinfo = NIL; joinrel->baserestrictcost.startup = 0; joinrel->baserestrictcost.per_tuple = 0; @@ -906,7 +910,6 @@ build_child_join_rel(PlannerInfo *root, RelOptInfo *outer_rel, joinrel->partial_pathlist = NIL; joinrel->cheapest_startup_path = NULL; joinrel->cheapest_total_path = NULL; - joinrel->cheapest_unique_path = NULL; joinrel->cheapest_parameterized_paths = NIL; joinrel->direct_lateral_relids = NULL; joinrel->lateral_relids = NULL; @@ -933,6 +936,9 @@ build_child_join_rel(PlannerInfo *root, RelOptInfo *outer_rel, joinrel->useridiscurrent = false; joinrel->fdwroutine = NULL; joinrel->fdw_private = NULL; + joinrel->unique_rel = NULL; + joinrel->unique_pathkeys = NIL; + joinrel->unique_groupclause = NIL; joinrel->baserestrictinfo = NIL; joinrel->baserestrictcost.startup = 0; joinrel->baserestrictcost.per_tuple = 0; @@ -1488,7 +1494,6 @@ fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids) upperrel->pathlist = NIL; upperrel->cheapest_startup_path = NULL; upperrel->cheapest_total_path = NULL; - upperrel->cheapest_unique_path = NULL; upperrel->cheapest_parameterized_paths = NIL; root->upper_rels[kind] = lappend(root->upper_rels[kind], upperrel); diff --git a/src/backend/parser/README b/src/backend/parser/README index e0c986a41efea..e26eb437a9f35 100644 --- a/src/backend/parser/README +++ b/src/backend/parser/README @@ -20,6 +20,7 @@ parse_cte.c handle Common Table Expressions (WITH clauses) parse_expr.c handle expressions like col, col + 3, x = 3 or x = 4 parse_enr.c handle ephemeral named rels (trigger transition tables, ...) parse_func.c handle functions, table.column and column identifiers +parse_jsontable.c handle JSON_TABLE parse_merge.c handle MERGE parse_node.c create nodes for various structures parse_oper.c handle operators in expressions diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 34f7c17f576ef..b9763ea17144c 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -777,7 +777,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt) */ nsitem = addRangeTableEntryForSubquery(pstate, selectQuery, - makeAlias("*SELECT*", NIL), + NULL, false, false); addNSItemToQuery(pstate, nsitem, true, false, false); @@ -2100,7 +2100,6 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt, { /* Process leaf SELECT */ Query *selectQuery; - char selectName[32]; ParseNamespaceItem *nsitem; RangeTblRef *rtr; ListCell *tl; @@ -2156,11 +2155,9 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt, /* * Make the leaf query be a subquery in the top-level rangetable. */ - snprintf(selectName, sizeof(selectName), "*SELECT* %d", - list_length(pstate->p_rtable) + 1); nsitem = addRangeTableEntryForSubquery(pstate, selectQuery, - makeAlias(selectName, NIL), + NULL, false, false); diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index db43034b9db57..9fd48acb1f8e7 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -3442,6 +3442,7 @@ CopyStmt: COPY opt_binary qualified_name opt_column_list ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("WHERE clause not allowed with COPY TO"), + errhint("Try the COPY (SELECT ... WHERE ...) TO variant."), parser_errposition(@11))); n->options = NIL; diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index d66276801c67b..e1979a80c198a 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -17,7 +17,6 @@ #include "catalog/pg_aggregate.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c index d6feb16aef375..203b7a321782d 100644 --- a/src/backend/parser/parse_node.c +++ b/src/backend/parser/parse_node.c @@ -408,7 +408,7 @@ make_const(ParseState *pstate, A_Const *aconst) typeid = INT8OID; typelen = sizeof(int64); - typebyval = FLOAT8PASSBYVAL; /* int8 and float8 alike */ + typebyval = true; } } else diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c index 4aba0d9d4d5cc..905c975d83b56 100644 --- a/src/backend/parser/parse_target.c +++ b/src/backend/parser/parse_target.c @@ -16,7 +16,6 @@ #include "catalog/namespace.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "funcapi.h" #include "miscadmin.h" #include "nodes/makefuncs.h" diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index afcf54169c3b3..e96b38a59d503 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -1461,7 +1461,6 @@ expandTableLikeClause(RangeVar *heapRel, TableLikeClause *table_like_clause) char *ccname = constr->check[ccnum].ccname; char *ccbin = constr->check[ccnum].ccbin; bool ccenforced = constr->check[ccnum].ccenforced; - bool ccvalid = constr->check[ccnum].ccvalid; bool ccnoinherit = constr->check[ccnum].ccnoinherit; Node *ccbin_node; bool found_whole_row; @@ -1492,7 +1491,7 @@ expandTableLikeClause(RangeVar *heapRel, TableLikeClause *table_like_clause) n->conname = pstrdup(ccname); n->location = -1; n->is_enforced = ccenforced; - n->initially_valid = ccvalid; + n->initially_valid = ccenforced; /* sic */ n->is_no_inherit = ccnoinherit; n->raw_expr = NULL; n->cooked_expr = nodeToString(ccbin_node); diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index 4bdc2941efb21..822cf4ec451a4 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -1007,9 +1007,6 @@ partition_bounds_copy(PartitionBoundInfo src, int ndatums; int nindexes; int partnatts; - bool hash_part; - int natts; - Datum *boundDatums; dest = (PartitionBoundInfo) palloc(sizeof(PartitionBoundInfoData)); @@ -1023,7 +1020,7 @@ partition_bounds_copy(PartitionBoundInfo src, dest->datums = (Datum **) palloc(sizeof(Datum *) * ndatums); - if (src->kind != NULL) + if (src->kind != NULL && ndatums > 0) { PartitionRangeDatumKind *boundKinds; @@ -1058,36 +1055,40 @@ partition_bounds_copy(PartitionBoundInfo src, * For hash partitioning, datums array will have two elements - modulus * and remainder. */ - hash_part = (key->strategy == PARTITION_STRATEGY_HASH); - natts = hash_part ? 2 : partnatts; - boundDatums = palloc(ndatums * natts * sizeof(Datum)); - - for (i = 0; i < ndatums; i++) + if (ndatums > 0) { - int j; - - dest->datums[i] = &boundDatums[i * natts]; + bool hash_part = (key->strategy == PARTITION_STRATEGY_HASH); + int natts = hash_part ? 2 : partnatts; + Datum *boundDatums = palloc(ndatums * natts * sizeof(Datum)); - for (j = 0; j < natts; j++) + for (i = 0; i < ndatums; i++) { - bool byval; - int typlen; + int j; - if (hash_part) - { - typlen = sizeof(int32); /* Always int4 */ - byval = true; /* int4 is pass-by-value */ - } - else + dest->datums[i] = &boundDatums[i * natts]; + + for (j = 0; j < natts; j++) { - byval = key->parttypbyval[j]; - typlen = key->parttyplen[j]; - } + if (dest->kind == NULL || + dest->kind[i][j] == PARTITION_RANGE_DATUM_VALUE) + { + bool byval; + int typlen; - if (dest->kind == NULL || - dest->kind[i][j] == PARTITION_RANGE_DATUM_VALUE) - dest->datums[i][j] = datumCopy(src->datums[i][j], - byval, typlen); + if (hash_part) + { + typlen = sizeof(int32); /* Always int4 */ + byval = true; /* int4 is pass-by-value */ + } + else + { + byval = key->parttypbyval[j]; + typlen = key->parttyplen[j]; + } + dest->datums[i][j] = datumCopy(src->datums[i][j], + byval, typlen); + } + } } } diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c index 423b2b4f9d6d1..6ac83ea1a821a 100644 --- a/src/backend/port/sysv_sema.c +++ b/src/backend/port/sysv_sema.c @@ -69,7 +69,7 @@ static int nextSemaNumber; /* next free sem num in last sema set */ static IpcSemaphoreId InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, - int numSems); + int numSems, bool retry_ok); static void IpcSemaphoreInitialize(IpcSemaphoreId semId, int semNum, int value); static void IpcSemaphoreKill(IpcSemaphoreId semId); @@ -88,9 +88,13 @@ static void ReleaseSemaphores(int status, Datum arg); * If we fail with a failure code other than collision-with-existing-set, * print out an error and abort. Other types of errors suggest nonrecoverable * problems. + * + * Unfortunately, it's sometimes hard to tell whether errors are + * nonrecoverable. Our caller keeps track of whether continuing to retry + * is sane or not; if not, we abort on failure regardless of the errno. */ static IpcSemaphoreId -InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems) +InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems, bool retry_ok) { int semId; @@ -101,16 +105,27 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems) int saved_errno = errno; /* - * Fail quietly if error indicates a collision with existing set. One - * would expect EEXIST, given that we said IPC_EXCL, but perhaps we - * could get a permission violation instead? Also, EIDRM might occur - * if an old set is slated for destruction but not gone yet. + * Fail quietly if error suggests a collision with an existing set and + * our caller has not lost patience. + * + * One would expect EEXIST, given that we said IPC_EXCL, but perhaps + * we could get a permission violation instead. On some platforms + * EINVAL will be reported if the existing set has too few semaphores. + * Also, EIDRM might occur if an old set is slated for destruction but + * not gone yet. + * + * EINVAL is the key reason why we need the caller-level loop limit, + * as it can also mean that the platform's SEMMSL is less than + * numSems, and that condition can't be fixed by trying another key. */ - if (saved_errno == EEXIST || saved_errno == EACCES + if (retry_ok && + (saved_errno == EEXIST + || saved_errno == EACCES + || saved_errno == EINVAL #ifdef EIDRM - || saved_errno == EIDRM + || saved_errno == EIDRM #endif - ) + )) return -1; /* @@ -207,17 +222,22 @@ IpcSemaphoreGetLastPID(IpcSemaphoreId semId, int semNum) static IpcSemaphoreId IpcSemaphoreCreate(int numSems) { + int num_tries = 0; IpcSemaphoreId semId; union semun semun; PGSemaphoreData mysema; /* Loop till we find a free IPC key */ - for (nextSemaKey++;; nextSemaKey++) + for (nextSemaKey++;; nextSemaKey++, num_tries++) { pid_t creatorPID; - /* Try to create new semaphore set */ - semId = InternalIpcSemaphoreCreate(nextSemaKey, numSems + 1); + /* + * Try to create new semaphore set. Give up after trying 1000 + * distinct IPC keys. + */ + semId = InternalIpcSemaphoreCreate(nextSemaKey, numSems + 1, + num_tries < 1000); if (semId >= 0) break; /* successful create */ @@ -254,7 +274,7 @@ IpcSemaphoreCreate(int numSems) /* * Now try again to create the sema set. */ - semId = InternalIpcSemaphoreCreate(nextSemaKey, numSems + 1); + semId = InternalIpcSemaphoreCreate(nextSemaKey, numSems + 1, true); if (semId >= 0) break; /* successful create */ diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 9474095f271a1..dce4c8c45b9b6 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -77,7 +77,6 @@ #include "catalog/namespace.h" #include "catalog/pg_database.h" #include "catalog/pg_namespace.h" -#include "commands/dbcommands.h" #include "commands/vacuum.h" #include "common/int.h" #include "lib/ilist.h" @@ -310,6 +309,16 @@ static AutoVacuumShmemStruct *AutoVacuumShmem; static dlist_head DatabaseList = DLIST_STATIC_INIT(DatabaseList); static MemoryContext DatabaseListCxt = NULL; +/* + * Dummy pointer to persuade Valgrind that we've not leaked the array of + * avl_dbase structs. Make it global to ensure the compiler doesn't + * optimize it away. + */ +#ifdef USE_VALGRIND +extern avl_dbase *avl_dbase_array; +avl_dbase *avl_dbase_array; +#endif + /* Pointer to my own WorkerInfo, valid on each worker */ static WorkerInfo MyWorkerInfo = NULL; @@ -562,10 +571,10 @@ AutoVacLauncherMain(const void *startup_data, size_t startup_data_len) /* * Create the initial database list. The invariant we want this list to - * keep is that it's ordered by decreasing next_time. As soon as an entry - * is updated to a higher time, it will be moved to the front (which is - * correct because the only operation is to add autovacuum_naptime to the - * entry, and time always increases). + * keep is that it's ordered by decreasing next_worker. As soon as an + * entry is updated to a higher time, it will be moved to the front (which + * is correct because the only operation is to add autovacuum_naptime to + * the entry, and time always increases). */ rebuild_database_list(InvalidOid); @@ -1020,6 +1029,10 @@ rebuild_database_list(Oid newdb) /* put all the hash elements into an array */ dbary = palloc(nelems * sizeof(avl_dbase)); + /* keep Valgrind quiet */ +#ifdef USE_VALGRIND + avl_dbase_array = dbary; +#endif i = 0; hash_seq_init(&seq, dbhash); @@ -2565,8 +2578,18 @@ do_autovacuum(void) /* * We leak table_toast_map here (among other things), but since we're - * going away soon, it's not a problem. + * going away soon, it's not a problem normally. But when using Valgrind, + * release some stuff to reduce complaints about leaked storage. */ +#ifdef USE_VALGRIND + hash_destroy(table_toast_map); + FreeTupleDesc(pg_class_desc); + if (bstrategy) + pfree(bstrategy); +#endif + + /* Run the rest in xact context, mainly to avoid Valgrind leak warnings */ + MemoryContextSwitchTo(TopTransactionContext); /* * Update pg_database.datfrozenxid, and truncate pg_xact if possible. We diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index 8490148a47d52..e84e8663e966b 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -953,11 +953,14 @@ CheckpointerShmemSize(void) Size size; /* - * Currently, the size of the requests[] array is arbitrarily set equal to - * NBuffers. This may prove too large or small ... + * The size of the requests[] array is arbitrarily set equal to NBuffers. + * But there is a cap of MAX_CHECKPOINT_REQUESTS to prevent accumulating + * too many checkpoint requests in the ring buffer. */ size = offsetof(CheckpointerShmemStruct, requests); - size = add_size(size, mul_size(NBuffers, sizeof(CheckpointerRequest))); + size = add_size(size, mul_size(Min(NBuffers, + MAX_CHECKPOINT_REQUESTS), + sizeof(CheckpointerRequest))); return size; } diff --git a/src/backend/postmaster/launch_backend.c b/src/backend/postmaster/launch_backend.c index bf6b55ee83048..a38979c50e4bb 100644 --- a/src/backend/postmaster/launch_backend.c +++ b/src/backend/postmaster/launch_backend.c @@ -101,7 +101,8 @@ typedef struct struct InjectionPointsCtl *ActiveInjectionPoints; #endif int NamedLWLockTrancheRequests; - NamedLWLockTranche *NamedLWLockTrancheArray; + char **LWLockTrancheNames; + int *LWLockCounter; LWLockPadded *MainLWLockArray; slock_t *ProcStructLock; PROC_HDR *ProcGlobal; @@ -760,7 +761,8 @@ save_backend_variables(BackendParameters *param, #endif param->NamedLWLockTrancheRequests = NamedLWLockTrancheRequests; - param->NamedLWLockTrancheArray = NamedLWLockTrancheArray; + param->LWLockTrancheNames = LWLockTrancheNames; + param->LWLockCounter = LWLockCounter; param->MainLWLockArray = MainLWLockArray; param->ProcStructLock = ProcStructLock; param->ProcGlobal = ProcGlobal; @@ -1020,7 +1022,8 @@ restore_backend_variables(BackendParameters *param) #endif NamedLWLockTrancheRequests = param->NamedLWLockTrancheRequests; - NamedLWLockTrancheArray = param->NamedLWLockTrancheArray; + LWLockTrancheNames = param->LWLockTrancheNames; + LWLockCounter = param->LWLockCounter; MainLWLockArray = param->MainLWLockArray; ProcStructLock = param->ProcStructLock; ProcGlobal = param->ProcGlobal; diff --git a/src/backend/postmaster/pmchild.c b/src/backend/postmaster/pmchild.c index cde1d23a4ca8b..584bb58c8abaf 100644 --- a/src/backend/postmaster/pmchild.c +++ b/src/backend/postmaster/pmchild.c @@ -59,6 +59,17 @@ NON_EXEC_STATIC int num_pmchild_slots = 0; */ dlist_head ActiveChildList; +/* + * Dummy pointer to persuade Valgrind that we've not leaked the array of + * PMChild structs. Make it global to ensure the compiler doesn't + * optimize it away. + */ +#ifdef USE_VALGRIND +extern PMChild *pmchild_array; +PMChild *pmchild_array; +#endif + + /* * MaxLivePostmasterChildren * @@ -125,8 +136,13 @@ InitPostmasterChildSlots(void) for (int i = 0; i < BACKEND_NUM_TYPES; i++) num_pmchild_slots += pmchild_pools[i].size; - /* Initialize them */ + /* Allocate enough slots, and make sure Valgrind doesn't complain */ slots = palloc(num_pmchild_slots * sizeof(PMChild)); +#ifdef USE_VALGRIND + pmchild_array = slots; +#endif + + /* Initialize them */ slotno = 0; for (int btype = 0; btype < BACKEND_NUM_TYPES; btype++) { diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index e01d9f0cfe81e..e1d643b013d77 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -854,6 +854,9 @@ PostmasterMain(int argc, char *argv[]) if (summarize_wal && wal_level == WAL_LEVEL_MINIMAL) ereport(ERROR, (errmsg("WAL cannot be summarized when \"wal_level\" is \"minimal\""))); + if (sync_replication_slots && wal_level < WAL_LEVEL_LOGICAL) + ereport(ERROR, + (errmsg("replication slot synchronization (\"sync_replication_slots\" = on) requires \"wal_level\" >= \"logical\""))); /* * Other one-time internal sanity checks can go here, if they are fast. diff --git a/src/backend/postmaster/walsummarizer.c b/src/backend/postmaster/walsummarizer.c index 777c9a8d5553b..e1f142f20c7a0 100644 --- a/src/backend/postmaster/walsummarizer.c +++ b/src/backend/postmaster/walsummarizer.c @@ -644,7 +644,7 @@ WakeupWalSummarizer(void) if (WalSummarizerCtl == NULL) return; - LWLockAcquire(WALSummarizerLock, LW_EXCLUSIVE); + LWLockAcquire(WALSummarizerLock, LW_SHARED); pgprocno = WalSummarizerCtl->summarizer_pgprocno; LWLockRelease(WALSummarizerLock); @@ -685,7 +685,7 @@ WaitForWalSummarization(XLogRecPtr lsn) /* * If the LSN summarized on disk has reached the target value, stop. */ - LWLockAcquire(WALSummarizerLock, LW_EXCLUSIVE); + LWLockAcquire(WALSummarizerLock, LW_SHARED); summarized_lsn = WalSummarizerCtl->summarized_lsn; pending_lsn = WalSummarizerCtl->pending_lsn; LWLockRelease(WALSummarizerLock); diff --git a/src/backend/replication/logical/applyparallelworker.c b/src/backend/replication/logical/applyparallelworker.c index 1fa931a74229d..31a92d1a24abd 100644 --- a/src/backend/replication/logical/applyparallelworker.c +++ b/src/backend/replication/logical/applyparallelworker.c @@ -778,10 +778,10 @@ LogicalParallelApplyLoop(shm_mq_handle *mqh) /* * The first byte of messages sent from leader apply worker to - * parallel apply workers can only be 'w'. + * parallel apply workers can only be PqReplMsg_WALData. */ c = pq_getmsgbyte(&s); - if (c != 'w') + if (c != PqReplMsg_WALData) elog(ERROR, "unexpected message \"%c\"", c); /* @@ -1007,7 +1007,7 @@ ProcessParallelApplyMessage(StringInfo msg) switch (msgtype) { - case 'E': /* ErrorResponse */ + case PqMsg_ErrorResponse: { ErrorData edata; @@ -1044,11 +1044,11 @@ ProcessParallelApplyMessage(StringInfo msg) /* * Don't need to do anything about NoticeResponse and - * NotifyResponse as the logical replication worker doesn't need - * to send messages to the client. + * NotificationResponse as the logical replication worker doesn't + * need to send messages to the client. */ - case 'N': - case 'A': + case PqMsg_NoticeResponse: + case PqMsg_NotificationResponse: break; default: diff --git a/src/backend/replication/logical/conflict.c b/src/backend/replication/logical/conflict.c index 97c4e26b58654..166955922650f 100644 --- a/src/backend/replication/logical/conflict.c +++ b/src/backend/replication/logical/conflict.c @@ -29,6 +29,7 @@ static const char *const ConflictTypeNames[] = { [CT_UPDATE_EXISTS] = "update_exists", [CT_UPDATE_MISSING] = "update_missing", [CT_DELETE_ORIGIN_DIFFERS] = "delete_origin_differs", + [CT_UPDATE_DELETED] = "update_deleted", [CT_DELETE_MISSING] = "delete_missing", [CT_MULTIPLE_UNIQUE_CONFLICTS] = "multiple_unique_conflicts" }; @@ -54,7 +55,7 @@ static char *build_index_value_desc(EState *estate, Relation localrel, /* * Get the xmin and commit timestamp data (origin and timestamp) associated - * with the provided local tuple. + * with the provided local row. * * Return true if the commit timestamp data was found, false otherwise. */ @@ -88,12 +89,12 @@ GetTupleTransactionInfo(TupleTableSlot *localslot, TransactionId *xmin, * This function is used to report a conflict while applying replication * changes. * - * 'searchslot' should contain the tuple used to search the local tuple to be + * 'searchslot' should contain the tuple used to search the local row to be * updated or deleted. * * 'remoteslot' should contain the remote new tuple, if any. * - * conflicttuples is a list of local tuples that caused the conflict and the + * conflicttuples is a list of local rows that caused the conflict and the * conflict related information. See ConflictTupleInfo. * * The caller must ensure that all the indexes passed in ConflictTupleInfo are @@ -176,6 +177,7 @@ errcode_apply_conflict(ConflictType type) case CT_UPDATE_ORIGIN_DIFFERS: case CT_UPDATE_MISSING: case CT_DELETE_ORIGIN_DIFFERS: + case CT_UPDATE_DELETED: case CT_DELETE_MISSING: return errcode(ERRCODE_T_R_SERIALIZATION_FAILURE); } @@ -189,9 +191,9 @@ errcode_apply_conflict(ConflictType type) * * The DETAIL line comprises of two parts: * 1. Explanation of the conflict type, including the origin and commit - * timestamp of the existing local tuple. - * 2. Display of conflicting key, existing local tuple, remote new tuple, and - * replica identity columns, if any. The remote old tuple is excluded as its + * timestamp of the existing local row. + * 2. Display of conflicting key, existing local row, remote new row, and + * replica identity columns, if any. The remote old row is excluded as its * information is covered in the replica identity columns. */ static void @@ -261,6 +263,26 @@ errdetail_apply_conflict(EState *estate, ResultRelInfo *relinfo, break; + case CT_UPDATE_DELETED: + if (localts) + { + if (localorigin == InvalidRepOriginId) + appendStringInfo(&err_detail, _("The row to be updated was deleted locally in transaction %u at %s."), + localxmin, timestamptz_to_str(localts)); + else if (replorigin_by_oid(localorigin, true, &origin_name)) + appendStringInfo(&err_detail, _("The row to be updated was deleted by a different origin \"%s\" in transaction %u at %s."), + origin_name, localxmin, timestamptz_to_str(localts)); + + /* The origin that modified this row has been removed. */ + else + appendStringInfo(&err_detail, _("The row to be updated was deleted by a non-existent origin in transaction %u at %s."), + localxmin, timestamptz_to_str(localts)); + } + else + appendStringInfo(&err_detail, _("The row to be updated was deleted.")); + + break; + case CT_UPDATE_MISSING: appendStringInfoString(&err_detail, _("Could not find the row to be updated.")); break; @@ -291,7 +313,7 @@ errdetail_apply_conflict(EState *estate, ResultRelInfo *relinfo, localslot, remoteslot, indexoid); /* - * Next, append the key values, existing local tuple, remote tuple and + * Next, append the key values, existing local row, remote row, and * replica identity columns after the message. */ if (val_desc) @@ -309,7 +331,7 @@ errdetail_apply_conflict(EState *estate, ResultRelInfo *relinfo, /* * Helper function to build the additional details for conflicting key, - * existing local tuple, remote tuple, and replica identity columns. + * existing local row, remote row, and replica identity columns. * * If the return value is NULL, it indicates that the current user lacks * permissions to view the columns involved. @@ -351,7 +373,7 @@ build_tuple_value_details(EState *estate, ResultRelInfo *relinfo, { /* * The 'modifiedCols' only applies to the new tuple, hence we pass - * NULL for the existing local tuple. + * NULL for the existing local row. */ desc = ExecBuildSlotValueDescription(relid, localslot, tupdesc, NULL, 64); @@ -361,12 +383,12 @@ build_tuple_value_details(EState *estate, ResultRelInfo *relinfo, if (tuple_value.len > 0) { appendStringInfoString(&tuple_value, "; "); - appendStringInfo(&tuple_value, _("existing local tuple %s"), + appendStringInfo(&tuple_value, _("existing local row %s"), desc); } else { - appendStringInfo(&tuple_value, _("Existing local tuple %s"), + appendStringInfo(&tuple_value, _("Existing local row %s"), desc); } } @@ -393,11 +415,11 @@ build_tuple_value_details(EState *estate, ResultRelInfo *relinfo, if (tuple_value.len > 0) { appendStringInfoString(&tuple_value, "; "); - appendStringInfo(&tuple_value, _("remote tuple %s"), desc); + appendStringInfo(&tuple_value, _("remote row %s"), desc); } else { - appendStringInfo(&tuple_value, _("Remote tuple %s"), desc); + appendStringInfo(&tuple_value, _("Remote row %s"), desc); } } } diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 742d9ba68e900..add2e2e066c38 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -43,6 +43,7 @@ #include "utils/memutils.h" #include "utils/pg_lsn.h" #include "utils/snapmgr.h" +#include "utils/syscache.h" /* max sleep time between cycles (3min) */ #define DEFAULT_NAPTIME_PER_CYCLE 180000L @@ -102,7 +103,8 @@ static void ApplyLauncherSetWorkerStartTime(Oid subid, TimestampTz start_time); static TimestampTz ApplyLauncherGetWorkerStartTime(Oid subid); static void compute_min_nonremovable_xid(LogicalRepWorker *worker, TransactionId *xmin); static bool acquire_conflict_slot_if_exists(void); -static void advance_conflict_slot_xmin(TransactionId new_xmin); +static void update_conflict_slot_xmin(TransactionId new_xmin); +static void init_conflict_slot_xmin(void); /* @@ -152,6 +154,7 @@ get_subscription_list(void) sub->enabled = subform->subenabled; sub->name = pstrdup(NameStr(subform->subname)); sub->retaindeadtuples = subform->subretaindeadtuples; + sub->retentionactive = subform->subretentionactive; /* We don't fill fields we are not interested in. */ res = lappend(res, sub); @@ -790,6 +793,8 @@ logicalrep_worker_detach(void) } LWLockRelease(LogicalRepWorkerLock); + + list_free(workers); } /* Block concurrent access. */ @@ -1179,7 +1184,7 @@ ApplyLauncherMain(Datum main_arg) MemoryContext subctx; MemoryContext oldctx; long wait_time = DEFAULT_NAPTIME_PER_CYCLE; - bool can_advance_xmin = true; + bool can_update_xmin = true; bool retain_dead_tuples = false; TransactionId xmin = InvalidTransactionId; @@ -1212,17 +1217,6 @@ ApplyLauncherMain(Datum main_arg) { retain_dead_tuples = true; - /* - * Can't advance xmin of the slot unless all the subscriptions - * with retain_dead_tuples are enabled. This is required to - * ensure that we don't advance the xmin of - * CONFLICT_DETECTION_SLOT if one of the subscriptions is not - * enabled. Otherwise, we won't be able to detect conflicts - * reliably for such a subscription even though it has set the - * retain_dead_tuples option. - */ - can_advance_xmin &= sub->enabled; - /* * Create a replication slot to retain information necessary * for conflict detection such as dead tuples, commit @@ -1238,6 +1232,28 @@ ApplyLauncherMain(Datum main_arg) * subscription was enabled. */ CreateConflictDetectionSlot(); + + if (sub->retentionactive) + { + /* + * Can't advance xmin of the slot unless all the + * subscriptions actively retaining dead tuples are + * enabled. This is required to ensure that we don't + * advance the xmin of CONFLICT_DETECTION_SLOT if one of + * the subscriptions is not enabled. Otherwise, we won't + * be able to detect conflicts reliably for such a + * subscription even though it has set the + * retain_dead_tuples option. + */ + can_update_xmin &= sub->enabled; + + /* + * Initialize the slot once the subscription activiates + * retention. + */ + if (!TransactionIdIsValid(MyReplicationSlot->data.xmin)) + init_conflict_slot_xmin(); + } } if (!sub->enabled) @@ -1252,9 +1268,11 @@ ApplyLauncherMain(Datum main_arg) /* * Compute the minimum xmin required to protect dead tuples * required for conflict detection among all running apply - * workers that enables retain_dead_tuples. + * workers. */ - if (sub->retaindeadtuples && can_advance_xmin) + if (sub->retaindeadtuples && + sub->retentionactive && + can_update_xmin) compute_min_nonremovable_xid(w, &xmin); /* worker is running already */ @@ -1263,12 +1281,12 @@ ApplyLauncherMain(Datum main_arg) /* * Can't advance xmin of the slot unless all the workers - * corresponding to subscriptions with retain_dead_tuples are - * running, disabling the further computation of the minimum + * corresponding to subscriptions actively retaining dead tuples + * are running, disabling the further computation of the minimum * nonremovable xid. */ - if (sub->retaindeadtuples) - can_advance_xmin = false; + if (sub->retaindeadtuples && sub->retentionactive) + can_update_xmin = false; /* * If the worker is eligible to start now, launch it. Otherwise, @@ -1293,7 +1311,8 @@ ApplyLauncherMain(Datum main_arg) sub->dbid, sub->oid, sub->name, sub->owner, InvalidOid, DSM_HANDLE_INVALID, - sub->retaindeadtuples)) + sub->retaindeadtuples && + sub->retentionactive)) { /* * We get here either if we failed to launch a worker @@ -1318,13 +1337,18 @@ ApplyLauncherMain(Datum main_arg) * that requires us to retain dead tuples. Otherwise, if required, * advance the slot's xmin to protect dead tuples required for the * conflict detection. + * + * Additionally, if all apply workers for subscriptions with + * retain_dead_tuples enabled have requested to stop retention, the + * slot's xmin will be set to InvalidTransactionId allowing the + * removal of dead tuples. */ if (MyReplicationSlot) { if (!retain_dead_tuples) ReplicationSlotDropAcquired(); - else if (can_advance_xmin) - advance_conflict_slot_xmin(xmin); + else if (can_update_xmin) + update_conflict_slot_xmin(xmin); } /* Switch back to original memory context. */ @@ -1376,7 +1400,15 @@ compute_min_nonremovable_xid(LogicalRepWorker *worker, TransactionId *xmin) nonremovable_xid = worker->oldest_nonremovable_xid; SpinLockRelease(&worker->relmutex); - Assert(TransactionIdIsValid(nonremovable_xid)); + /* + * Return if the apply worker has stopped retention concurrently. + * + * Although this function is invoked only when retentionactive is true, + * the apply worker might stop retention after the launcher fetches the + * retentionactive flag. + */ + if (!TransactionIdIsValid(nonremovable_xid)) + return; if (!TransactionIdIsValid(*xmin) || TransactionIdPrecedes(nonremovable_xid, *xmin)) @@ -1400,17 +1432,17 @@ acquire_conflict_slot_if_exists(void) } /* - * Advance the xmin the replication slot used to retain information required + * Update the xmin the replication slot used to retain information required * for conflict detection. */ static void -advance_conflict_slot_xmin(TransactionId new_xmin) +update_conflict_slot_xmin(TransactionId new_xmin) { Assert(MyReplicationSlot); - Assert(TransactionIdIsValid(new_xmin)); - Assert(TransactionIdPrecedesOrEquals(MyReplicationSlot->data.xmin, new_xmin)); + Assert(!TransactionIdIsValid(new_xmin) || + TransactionIdPrecedesOrEquals(MyReplicationSlot->data.xmin, new_xmin)); - /* Return if the xmin value of the slot cannot be advanced */ + /* Return if the xmin value of the slot cannot be updated */ if (TransactionIdEquals(MyReplicationSlot->data.xmin, new_xmin)) return; @@ -1437,23 +1469,16 @@ advance_conflict_slot_xmin(TransactionId new_xmin) } /* - * Create and acquire the replication slot used to retain information for - * conflict detection, if not yet. + * Initialize the xmin for the conflict detection slot. */ -void -CreateConflictDetectionSlot(void) +static void +init_conflict_slot_xmin(void) { TransactionId xmin_horizon; - /* Exit early, if the replication slot is already created and acquired */ - if (MyReplicationSlot) - return; - - ereport(LOG, - errmsg("creating replication conflict detection slot")); - - ReplicationSlotCreate(CONFLICT_DETECTION_SLOT, false, RS_PERSISTENT, false, - false, false); + /* Replication slot must exist but shouldn't be initialized. */ + Assert(MyReplicationSlot && + !TransactionIdIsValid(MyReplicationSlot->data.xmin)); LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); @@ -1473,6 +1498,26 @@ CreateConflictDetectionSlot(void) ReplicationSlotSave(); } +/* + * Create and acquire the replication slot used to retain information for + * conflict detection, if not yet. + */ +void +CreateConflictDetectionSlot(void) +{ + /* Exit early, if the replication slot is already created and acquired */ + if (MyReplicationSlot) + return; + + ereport(LOG, + errmsg("creating replication conflict detection slot")); + + ReplicationSlotCreate(CONFLICT_DETECTION_SLOT, false, RS_PERSISTENT, false, + false, false); + + init_conflict_slot_xmin(); +} + /* * Is current process the logical replication launcher? */ diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c index 1a352b542dc56..1b3d9eb49dd70 100644 --- a/src/backend/replication/logical/proto.c +++ b/src/backend/replication/logical/proto.c @@ -809,7 +809,7 @@ logicalrep_write_tuple(StringInfo out, Relation rel, TupleTableSlot *slot, continue; } - if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i])) + if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(DatumGetPointer(values[i]))) { /* * Unchanged toasted datum. (Note that we don't promise to detect diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 5febd154b6bae..34cf05668ae84 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -2599,7 +2599,7 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, if (++changes_count >= CHANGES_THRESHOLD) { - rb->update_progress_txn(rb, txn, change->lsn); + rb->update_progress_txn(rb, txn, prev_lsn); changes_count = 0; } } diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c index 2f0c08b8fbd33..9d0072a49ed6d 100644 --- a/src/backend/replication/logical/slotsync.c +++ b/src/backend/replication/logical/slotsync.c @@ -52,7 +52,6 @@ #include "access/xlog_internal.h" #include "access/xlogrecovery.h" #include "catalog/pg_database.h" -#include "commands/dbcommands.h" #include "libpq/pqsignal.h" #include "pgstat.h" #include "postmaster/interrupt.h" @@ -1059,14 +1058,14 @@ ValidateSlotSyncParams(int elevel) { /* * Logical slot sync/creation requires wal_level >= logical. - * - * Since altering the wal_level requires a server restart, so error out in - * this case regardless of elevel provided by caller. */ if (wal_level < WAL_LEVEL_LOGICAL) - ereport(ERROR, + { + ereport(elevel, errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("replication slot synchronization requires \"wal_level\" >= \"logical\"")); + return false; + } /* * A physical replication slot(primary_slot_name) is required on the @@ -1171,7 +1170,7 @@ slotsync_reread_config(void) * Interrupt handler for main loop of slot sync worker. */ static void -ProcessSlotSyncInterrupts(WalReceiverConn *wrconn) +ProcessSlotSyncInterrupts(void) { CHECK_FOR_INTERRUPTS(); @@ -1477,7 +1476,6 @@ ReplSlotSyncWorkerMain(const void *startup_data, size_t startup_data_len) */ wrconn = walrcv_connect(PrimaryConnInfo, false, false, false, app_name.data, &err); - pfree(app_name.data); if (!wrconn) ereport(ERROR, @@ -1485,6 +1483,8 @@ ReplSlotSyncWorkerMain(const void *startup_data, size_t startup_data_len) errmsg("synchronization worker \"%s\" could not connect to the primary server: %s", app_name.data, err)); + pfree(app_name.data); + /* * Register the disconnection callback. * @@ -1506,7 +1506,7 @@ ReplSlotSyncWorkerMain(const void *startup_data, size_t startup_data_len) { bool some_slot_updated = false; - ProcessSlotSyncInterrupts(wrconn); + ProcessSlotSyncInterrupts(); some_slot_updated = synchronize_slots(wrconn); diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 8532bfd27e53f..98ddee2092905 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -2061,7 +2061,7 @@ SnapBuildSnapshotExists(XLogRecPtr lsn) int ret; struct stat stat_buf; - sprintf(path, "%s/%08X-%08X.snap", + sprintf(path, "%s/%X-%X.snap", PG_LOGICAL_SNAPSHOTS_DIR, LSN_FORMAT_ARGS(lsn)); diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 3fea0a0206ed3..e6da4028d392e 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -316,7 +316,8 @@ process_syncing_tables_for_sync(XLogRecPtr current_lsn) UpdateSubscriptionRelState(MyLogicalRepWorker->subid, MyLogicalRepWorker->relid, MyLogicalRepWorker->relstate, - MyLogicalRepWorker->relstate_lsn); + MyLogicalRepWorker->relstate_lsn, + false); /* * End streaming so that LogRepWorkerWalRcvConn can be used to drop @@ -425,6 +426,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) ListCell *lc; bool started_tx = false; bool should_exit = false; + Relation rel = NULL; Assert(!IsTransactionState()); @@ -492,7 +494,17 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) * worker to remove the origin tracking as if there is any * error while dropping we won't restart it to drop the * origin. So passing missing_ok = true. + * + * Lock the subscription and origin in the same order as we + * are doing during DDL commands to avoid deadlocks. See + * AlterSubscription_refresh. */ + LockSharedObject(SubscriptionRelationId, MyLogicalRepWorker->subid, + 0, AccessShareLock); + + if (!rel) + rel = table_open(SubscriptionRelRelationId, RowExclusiveLock); + ReplicationOriginNameForLogicalRep(MyLogicalRepWorker->subid, rstate->relid, originname, @@ -504,7 +516,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) */ UpdateSubscriptionRelState(MyLogicalRepWorker->subid, rstate->relid, rstate->state, - rstate->lsn); + rstate->lsn, true); } } else @@ -555,7 +567,14 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) * This is required to avoid any undetected deadlocks * due to any existing lock as deadlock detector won't * be able to detect the waits on the latch. + * + * Also close any tables prior to the commit. */ + if (rel) + { + table_close(rel, NoLock); + rel = NULL; + } CommitTransactionCommand(); pgstat_report_stat(false); } @@ -623,6 +642,11 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) } } + /* Close table if opened */ + if (rel) + table_close(rel, NoLock); + + if (started_tx) { /* @@ -1414,7 +1438,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) UpdateSubscriptionRelState(MyLogicalRepWorker->subid, MyLogicalRepWorker->relid, MyLogicalRepWorker->relstate, - MyLogicalRepWorker->relstate_lsn); + MyLogicalRepWorker->relstate_lsn, + false); CommitTransactionCommand(); pgstat_report_stat(true); @@ -1547,7 +1572,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) UpdateSubscriptionRelState(MyLogicalRepWorker->subid, MyLogicalRepWorker->relid, SUBREL_STATE_FINISHEDCOPY, - MyLogicalRepWorker->relstate_lsn); + MyLogicalRepWorker->relstate_lsn, + false); CommitTransactionCommand(); @@ -1762,6 +1788,32 @@ AllTablesyncsReady(void) return has_subrels && (table_states_not_ready == NIL); } +/* + * Return whether the subscription currently has any relations. + * + * Note: Unlike HasSubscriptionRelations(), this function relies on cached + * information for subscription relations. Additionally, it should not be + * invoked outside of apply or tablesync workers, as MySubscription must be + * initialized first. + */ +bool +HasSubscriptionRelationsCached(void) +{ + bool started_tx; + bool has_subrels; + + /* We need up-to-date subscription tables info here */ + has_subrels = FetchTableStates(&started_tx); + + if (started_tx) + { + CommitTransactionCommand(); + pgstat_report_stat(true); + } + + return has_subrels; +} + /* * Update the two_phase state of the specified subscription in pg_subscription. */ diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index b59221c4d0636..ee6ac22329fdc 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -138,9 +138,9 @@ * Each apply worker that enabled retain_dead_tuples option maintains a * non-removable transaction ID (oldest_nonremovable_xid) in shared memory to * prevent dead rows from being removed prematurely when the apply worker still - * needs them to detect conflicts reliably. This helps to retain the required - * commit_ts module information, which further helps to detect - * update_origin_differs and delete_origin_differs conflicts reliably, as + * needs them to detect update_deleted conflicts. Additionally, this helps to + * retain the required commit_ts module information, which further helps to + * detect update_origin_differs and delete_origin_differs conflicts reliably, as * otherwise, vacuum freeze could remove the required information. * * The logical replication launcher manages an internal replication slot named @@ -173,6 +173,14 @@ * Advance the non-removable transaction ID if the current flush location has * reached or surpassed the last received WAL position. * + * - RDT_STOP_CONFLICT_INFO_RETENTION: + * This phase is required only when max_retention_duration is defined. We + * enter this phase if the wait time in either the + * RDT_WAIT_FOR_PUBLISHER_STATUS or RDT_WAIT_FOR_LOCAL_FLUSH phase exceeds + * configured max_retention_duration. In this phase, + * pg_subscription.subretentionactive is updated to false within a new + * transaction, and oldest_nonremovable_xid is set to InvalidTransactionId. + * * The overall state progression is: GET_CANDIDATE_XID -> * REQUEST_PUBLISHER_STATUS -> WAIT_FOR_PUBLISHER_STATUS -> (loop to * REQUEST_PUBLISHER_STATUS till concurrent remote transactions end) -> @@ -185,10 +193,10 @@ * transactions that occurred concurrently with the tuple DELETE, any * subsequent UPDATE from a remote node should have a later timestamp. In such * cases, it is acceptable to detect an update_missing scenario and convert the - * UPDATE to an INSERT when applying it. But, detecting concurrent remote - * transactions with earlier timestamps than the DELETE is necessary, as the - * UPDATEs in remote transactions should be ignored if their timestamp is - * earlier than that of the dead tuples. + * UPDATE to an INSERT when applying it. But, for concurrent remote + * transactions with earlier timestamps than the DELETE, detecting + * update_deleted is necessary, as the UPDATEs in remote transactions should be + * ignored if their timestamp is earlier than that of the dead tuples. * * Note that advancing the non-removable transaction ID is not supported if the * publisher is also a physical standby. This is because the logical walsender @@ -268,7 +276,6 @@ #include "storage/procarray.h" #include "tcop/tcopprot.h" #include "utils/acl.h" -#include "utils/dynahash.h" #include "utils/guc.h" #include "utils/inval.h" #include "utils/lsyscache.h" @@ -373,7 +380,8 @@ typedef enum RDT_GET_CANDIDATE_XID, RDT_REQUEST_PUBLISHER_STATUS, RDT_WAIT_FOR_PUBLISHER_STATUS, - RDT_WAIT_FOR_LOCAL_FLUSH + RDT_WAIT_FOR_LOCAL_FLUSH, + RDT_STOP_CONFLICT_INFO_RETENTION } RetainDeadTuplesPhase; /* @@ -415,6 +423,9 @@ typedef struct RetainDeadTuplesData * updated in final phase * (RDT_WAIT_FOR_LOCAL_FLUSH) */ + long table_sync_wait_time; /* time spent waiting for table sync + * to finish */ + /* * The following fields are used to determine the timing for the next * round of transaction ID advancement. @@ -555,6 +566,9 @@ static void request_publisher_status(RetainDeadTuplesData *rdt_data); static void wait_for_publisher_status(RetainDeadTuplesData *rdt_data, bool status_received); static void wait_for_local_flush(RetainDeadTuplesData *rdt_data); +static bool should_stop_conflict_info_retention(RetainDeadTuplesData *rdt_data); +static void stop_conflict_info_retention(RetainDeadTuplesData *rdt_data); +static void reset_retention_data_fields(RetainDeadTuplesData *rdt_data); static void adjust_xid_advance_interval(RetainDeadTuplesData *rdt_data, bool new_xid_found); @@ -576,6 +590,12 @@ static bool FindReplTupleInLocalRel(ApplyExecutionData *edata, Relation localrel Oid localidxoid, TupleTableSlot *remoteslot, TupleTableSlot **localslot); +static bool FindDeletedTupleInLocalRel(Relation localrel, + Oid localidxoid, + TupleTableSlot *remoteslot, + TransactionId *delete_xid, + RepOriginId *delete_origin, + TimestampTz *delete_time); static void apply_handle_tuple_routing(ApplyExecutionData *edata, TupleTableSlot *remoteslot, LogicalRepTupleData *newtup, @@ -2912,17 +2932,31 @@ apply_handle_update_internal(ApplyExecutionData *edata, } else { + ConflictType type; TupleTableSlot *newslot = localslot; + /* + * Detecting whether the tuple was recently deleted or never existed + * is crucial to avoid misleading the user during conflict handling. + */ + if (FindDeletedTupleInLocalRel(localrel, localindexoid, remoteslot, + &conflicttuple.xmin, + &conflicttuple.origin, + &conflicttuple.ts) && + conflicttuple.origin != replorigin_session_origin) + type = CT_UPDATE_DELETED; + else + type = CT_UPDATE_MISSING; + /* Store the new tuple for conflict reporting */ slot_store_data(newslot, relmapentry, newtup); /* - * The tuple to be updated could not be found. Do nothing except for - * emitting a log message. + * The tuple to be updated could not be found or was deleted. Do + * nothing except for emitting a log message. */ - ReportApplyConflict(estate, relinfo, LOG, CT_UPDATE_MISSING, - remoteslot, newslot, list_make1(&conflicttuple)); + ReportApplyConflict(estate, relinfo, LOG, type, remoteslot, newslot, + list_make1(&conflicttuple)); } /* Cleanup. */ @@ -3142,6 +3176,134 @@ FindReplTupleInLocalRel(ApplyExecutionData *edata, Relation localrel, return found; } +/* + * Determine whether the index can reliably locate the deleted tuple in the + * local relation. + * + * An index may exclude deleted tuples if it was re-indexed or re-created during + * change application. Therefore, an index is considered usable only if the + * conflict detection slot.xmin (conflict_detection_xmin) is greater than the + * index tuple's xmin. This ensures that any tuples deleted prior to the index + * creation or re-indexing are not relevant for conflict detection in the + * current apply worker. + * + * Note that indexes may also be excluded if they were modified by other DDL + * operations, such as ALTER INDEX. However, this is acceptable, as the + * likelihood of such DDL changes coinciding with the need to scan dead + * tuples for the update_deleted is low. + */ +static bool +IsIndexUsableForFindingDeletedTuple(Oid localindexoid, + TransactionId conflict_detection_xmin) +{ + HeapTuple index_tuple; + TransactionId index_xmin; + + index_tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(localindexoid)); + + if (!HeapTupleIsValid(index_tuple)) /* should not happen */ + elog(ERROR, "cache lookup failed for index %u", localindexoid); + + /* + * No need to check for a frozen transaction ID, as + * TransactionIdPrecedes() manages it internally, treating it as falling + * behind the conflict_detection_xmin. + */ + index_xmin = HeapTupleHeaderGetXmin(index_tuple->t_data); + + ReleaseSysCache(index_tuple); + + return TransactionIdPrecedes(index_xmin, conflict_detection_xmin); +} + +/* + * Attempts to locate a deleted tuple in the local relation that matches the + * values of the tuple received from the publication side (in 'remoteslot'). + * The search is performed using either the replica identity index, primary + * key, other available index, or a sequential scan if necessary. + * + * Returns true if the deleted tuple is found. If found, the transaction ID, + * origin, and commit timestamp of the deletion are stored in '*delete_xid', + * '*delete_origin', and '*delete_time' respectively. + */ +static bool +FindDeletedTupleInLocalRel(Relation localrel, Oid localidxoid, + TupleTableSlot *remoteslot, + TransactionId *delete_xid, RepOriginId *delete_origin, + TimestampTz *delete_time) +{ + TransactionId oldestxmin; + + /* + * Return false if either dead tuples are not retained or commit timestamp + * data is not available. + */ + if (!MySubscription->retaindeadtuples || !track_commit_timestamp) + return false; + + /* + * For conflict detection, we use the leader worker's + * oldest_nonremovable_xid value instead of invoking + * GetOldestNonRemovableTransactionId() or using the conflict detection + * slot's xmin. The oldest_nonremovable_xid acts as a threshold to + * identify tuples that were recently deleted. These deleted tuples are no + * longer visible to concurrent transactions. However, if a remote update + * matches such a tuple, we log an update_deleted conflict. + * + * While GetOldestNonRemovableTransactionId() and slot.xmin may return + * transaction IDs older than oldest_nonremovable_xid, for our current + * purpose, it is acceptable to treat tuples deleted by transactions prior + * to oldest_nonremovable_xid as update_missing conflicts. + */ + if (am_leader_apply_worker()) + { + oldestxmin = MyLogicalRepWorker->oldest_nonremovable_xid; + } + else + { + LogicalRepWorker *leader; + + /* + * Obtain the information from the leader apply worker as only the + * leader manages oldest_nonremovable_xid (see + * maybe_advance_nonremovable_xid() for details). + */ + LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); + leader = logicalrep_worker_find(MyLogicalRepWorker->subid, + InvalidOid, false); + if (!leader) + { + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not detect conflict as the leader apply worker has exited"))); + } + + SpinLockAcquire(&leader->relmutex); + oldestxmin = leader->oldest_nonremovable_xid; + SpinLockRelease(&leader->relmutex); + LWLockRelease(LogicalRepWorkerLock); + } + + /* + * Return false if the leader apply worker has stopped retaining + * information for detecting conflicts. This implies that update_deleted + * can no longer be reliably detected. + */ + if (!TransactionIdIsValid(oldestxmin)) + return false; + + if (OidIsValid(localidxoid) && + IsIndexUsableForFindingDeletedTuple(localidxoid, oldestxmin)) + return RelationFindDeletedTupleInfoByIndex(localrel, localidxoid, + remoteslot, oldestxmin, + delete_xid, delete_origin, + delete_time); + else + return RelationFindDeletedTupleInfoSeq(localrel, remoteslot, + oldestxmin, delete_xid, + delete_origin, delete_time); +} + /* * This handles insert, update, delete on a partitioned table. */ @@ -3260,18 +3422,35 @@ apply_handle_tuple_routing(ApplyExecutionData *edata, remoteslot_part, &localslot); if (!found) { + ConflictType type; TupleTableSlot *newslot = localslot; + /* + * Detecting whether the tuple was recently deleted or + * never existed is crucial to avoid misleading the user + * during conflict handling. + */ + if (FindDeletedTupleInLocalRel(partrel, + part_entry->localindexoid, + remoteslot_part, + &conflicttuple.xmin, + &conflicttuple.origin, + &conflicttuple.ts) && + conflicttuple.origin != replorigin_session_origin) + type = CT_UPDATE_DELETED; + else + type = CT_UPDATE_MISSING; + /* Store the new tuple for conflict reporting */ slot_store_data(newslot, part_entry, newtup); /* - * The tuple to be updated could not be found. Do nothing - * except for emitting a log message. + * The tuple to be updated could not be found or was + * deleted. Do nothing except for emitting a log message. */ ReportApplyConflict(estate, partrelinfo, LOG, - CT_UPDATE_MISSING, remoteslot_part, - newslot, list_make1(&conflicttuple)); + type, remoteslot_part, newslot, + list_make1(&conflicttuple)); return; } @@ -3851,7 +4030,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received) c = pq_getmsgbyte(&s); - if (c == 'w') + if (c == PqReplMsg_WALData) { XLogRecPtr start_lsn; XLogRecPtr end_lsn; @@ -3873,7 +4052,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received) maybe_advance_nonremovable_xid(&rdt_data, false); } - else if (c == 'k') + else if (c == PqReplMsg_Keepalive) { XLogRecPtr end_lsn; TimestampTz timestamp; @@ -3892,7 +4071,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received) UpdateWorkerStats(last_received, timestamp, true); } - else if (c == 's') /* Primary status update */ + else if (c == PqReplMsg_PrimaryStatusUpdate) { rdt_data.remote_lsn = pq_getmsgint64(&s); rdt_data.remote_oldestxid = FullTransactionIdFromU64((uint64) pq_getmsgint64(&s)); @@ -3965,11 +4144,17 @@ LogicalRepApplyLoop(XLogRecPtr last_received) /* * Ensure to wake up when it's possible to advance the non-removable - * transaction ID. + * transaction ID, or when the retention duration may have exceeded + * max_retention_duration. */ - if (rdt_data.phase == RDT_GET_CANDIDATE_XID && - rdt_data.xid_advance_interval) - wait_time = Min(wait_time, rdt_data.xid_advance_interval); + if (MySubscription->retentionactive) + { + if (rdt_data.phase == RDT_GET_CANDIDATE_XID && + rdt_data.xid_advance_interval) + wait_time = Min(wait_time, rdt_data.xid_advance_interval); + else if (MySubscription->maxretention > 0) + wait_time = Min(wait_time, MySubscription->maxretention); + } rc = WaitLatchOrSocket(MyLatch, WL_SOCKET_READABLE | WL_LATCH_SET | @@ -4124,7 +4309,7 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply) else resetStringInfo(reply_message); - pq_sendbyte(reply_message, 'r'); + pq_sendbyte(reply_message, PqReplMsg_StandbyStatusUpdate); pq_sendint64(reply_message, recvpos); /* write */ pq_sendint64(reply_message, flushpos); /* flush */ pq_sendint64(reply_message, writepos); /* apply */ @@ -4172,8 +4357,8 @@ can_advance_nonremovable_xid(RetainDeadTuplesData *rdt_data) { /* * It is sufficient to manage non-removable transaction ID for a - * subscription by the main apply worker to detect conflicts reliably even - * for table sync or parallel apply workers. + * subscription by the main apply worker to detect update_deleted reliably + * even for table sync or parallel apply workers. */ if (!am_leader_apply_worker()) return false; @@ -4182,6 +4367,10 @@ can_advance_nonremovable_xid(RetainDeadTuplesData *rdt_data) if (!MySubscription->retaindeadtuples) return false; + /* No need to advance if we have already stopped retaining */ + if (!MySubscription->retentionactive) + return false; + return true; } @@ -4207,6 +4396,9 @@ process_rdt_phase_transition(RetainDeadTuplesData *rdt_data, case RDT_WAIT_FOR_LOCAL_FLUSH: wait_for_local_flush(rdt_data); break; + case RDT_STOP_CONFLICT_INFO_RETENTION: + stop_conflict_info_retention(rdt_data); + break; } } @@ -4295,7 +4487,7 @@ request_publisher_status(RetainDeadTuplesData *rdt_data) * Send the current time to update the remote walsender's latest reply * message received time. */ - pq_sendbyte(request_message, 'p'); + pq_sendbyte(request_message, PqReplMsg_PrimaryStatusRequest); pq_sendint64(request_message, GetCurrentTimestamp()); elog(DEBUG2, "sending publisher status request message"); @@ -4325,6 +4517,13 @@ wait_for_publisher_status(RetainDeadTuplesData *rdt_data, if (!status_received) return; + /* + * We don't need to maintain oldest_nonremovable_xid if we decide to stop + * retaining conflict information for this worker. + */ + if (should_stop_conflict_info_retention(rdt_data)) + return; + if (!FullTransactionIdIsValid(rdt_data->remote_wait_for)) rdt_data->remote_wait_for = rdt_data->remote_nextxid; @@ -4374,10 +4573,11 @@ wait_for_local_flush(RetainDeadTuplesData *rdt_data) * We expect the publisher and subscriber clocks to be in sync using time * sync service like NTP. Otherwise, we will advance this worker's * oldest_nonremovable_xid prematurely, leading to the removal of rows - * required to detect conflicts reliably. This check primarily addresses - * scenarios where the publisher's clock falls behind; if the publisher's - * clock is ahead, subsequent transactions will naturally bear later - * commit timestamps, conforming to the design outlined atop worker.c. + * required to detect update_deleted reliably. This check primarily + * addresses scenarios where the publisher's clock falls behind; if the + * publisher's clock is ahead, subsequent transactions will naturally bear + * later commit timestamps, conforming to the design outlined atop + * worker.c. * * XXX Consider waiting for the publisher's clock to catch up with the * subscriber's before proceeding to the next phase. @@ -4400,11 +4600,49 @@ wait_for_local_flush(RetainDeadTuplesData *rdt_data) * workers is complex and not worth the effort, so we simply return if not * all tables are in the READY state. * - * It is safe to add new tables with initial states to the subscription - * after this check because any changes applied to these tables should - * have a WAL position greater than the rdt_data->remote_lsn. + * Advancing the transaction ID is necessary even when no tables are + * currently subscribed, to avoid retaining dead tuples unnecessarily. + * While it might seem safe to skip all phases and directly assign + * candidate_xid to oldest_nonremovable_xid during the + * RDT_GET_CANDIDATE_XID phase in such cases, this is unsafe. If users + * concurrently add tables to the subscription, the apply worker may not + * process invalidations in time. Consequently, + * HasSubscriptionRelationsCached() might miss the new tables, leading to + * premature advancement of oldest_nonremovable_xid. + * + * Performing the check during RDT_WAIT_FOR_LOCAL_FLUSH is safe, as + * invalidations are guaranteed to be processed before applying changes + * from newly added tables while waiting for the local flush to reach + * remote_lsn. + * + * Additionally, even if we check for subscription tables during + * RDT_GET_CANDIDATE_XID, they might be dropped before reaching + * RDT_WAIT_FOR_LOCAL_FLUSH. Therefore, it's still necessary to verify + * subscription tables at this stage to prevent unnecessary tuple + * retention. */ - if (!AllTablesyncsReady()) + if (HasSubscriptionRelationsCached() && !AllTablesyncsReady()) + { + TimestampTz now; + + now = rdt_data->last_recv_time + ? rdt_data->last_recv_time : GetCurrentTimestamp(); + + /* + * Record the time spent waiting for table sync, it is needed for the + * timeout check in should_stop_conflict_info_retention(). + */ + rdt_data->table_sync_wait_time = + TimestampDifferenceMilliseconds(rdt_data->candidate_xid_time, now); + + return; + } + + /* + * We don't need to maintain oldest_nonremovable_xid if we decide to stop + * retaining conflict information for this worker. + */ + if (should_stop_conflict_info_retention(rdt_data)) return; /* @@ -4443,19 +4681,121 @@ wait_for_local_flush(RetainDeadTuplesData *rdt_data) MyLogicalRepWorker->oldest_nonremovable_xid = rdt_data->candidate_xid; SpinLockRelease(&MyLogicalRepWorker->relmutex); - elog(DEBUG2, "confirmed flush up to remote lsn %X/%X: new oldest_nonremovable_xid %u", + elog(DEBUG2, "confirmed flush up to remote lsn %X/%08X: new oldest_nonremovable_xid %u", LSN_FORMAT_ARGS(rdt_data->remote_lsn), rdt_data->candidate_xid); /* Notify launcher to update the xmin of the conflict slot */ ApplyLauncherWakeup(); + reset_retention_data_fields(rdt_data); + + /* process the next phase */ + process_rdt_phase_transition(rdt_data, false); +} + +/* + * Check whether conflict information retention should be stopped due to + * exceeding the maximum wait time (max_retention_duration). + * + * If retention should be stopped, transition to the + * RDT_STOP_CONFLICT_INFO_RETENTION phase and return true. Otherwise, return + * false. + * + * Note: Retention won't be resumed automatically. The user must manually + * disable retain_dead_tuples and re-enable it after confirming that the + * replication slot maintained by the launcher has been dropped. + */ +static bool +should_stop_conflict_info_retention(RetainDeadTuplesData *rdt_data) +{ + TimestampTz now; + + Assert(TransactionIdIsValid(rdt_data->candidate_xid)); + Assert(rdt_data->phase == RDT_WAIT_FOR_PUBLISHER_STATUS || + rdt_data->phase == RDT_WAIT_FOR_LOCAL_FLUSH); + + if (!MySubscription->maxretention) + return false; + + /* + * Use last_recv_time when applying changes in the loop to avoid + * unnecessary system time retrieval. If last_recv_time is not available, + * obtain the current timestamp. + */ + now = rdt_data->last_recv_time ? rdt_data->last_recv_time : GetCurrentTimestamp(); + /* - * Reset all data fields except those used to determine the timing for the - * next round of transaction ID advancement. We can even use - * flushpos_update_time in the next round to decide whether to get the - * latest flush position. + * Return early if the wait time has not exceeded the configured maximum + * (max_retention_duration). Time spent waiting for table synchronization + * is excluded from this calculation, as it occurs infrequently. */ + if (!TimestampDifferenceExceeds(rdt_data->candidate_xid_time, now, + MySubscription->maxretention + + rdt_data->table_sync_wait_time)) + return false; + + rdt_data->phase = RDT_STOP_CONFLICT_INFO_RETENTION; + + /* process the next phase */ + process_rdt_phase_transition(rdt_data, false); + + return true; +} + +/* + * Workhorse for the RDT_STOP_CONFLICT_INFO_RETENTION phase. + */ +static void +stop_conflict_info_retention(RetainDeadTuplesData *rdt_data) +{ + /* + * Do not update the catalog during an active transaction. The transaction + * may be started during change application, leading to a possible + * rollback of catalog updates if the application fails subsequently. + */ + if (IsTransactionState()) + return; + + StartTransactionCommand(); + + /* + * Updating pg_subscription might involve TOAST table access, so ensure we + * have a valid snapshot. + */ + PushActiveSnapshot(GetTransactionSnapshot()); + + /* Set pg_subscription.subretentionactive to false */ + UpdateDeadTupleRetentionStatus(MySubscription->oid, false); + + PopActiveSnapshot(); + CommitTransactionCommand(); + + SpinLockAcquire(&MyLogicalRepWorker->relmutex); + MyLogicalRepWorker->oldest_nonremovable_xid = InvalidTransactionId; + SpinLockRelease(&MyLogicalRepWorker->relmutex); + + ereport(LOG, + errmsg("logical replication worker for subscription \"%s\" has stopped retaining the information for detecting conflicts", + MySubscription->name), + errdetail("Retention of information used for conflict detection has exceeded max_retention_duration of %u ms.", + MySubscription->maxretention)); + + /* Notify launcher to update the conflict slot */ + ApplyLauncherWakeup(); + + reset_retention_data_fields(rdt_data); +} + +/* + * Reset all data fields of RetainDeadTuplesData except those used to + * determine the timing for the next round of transaction ID advancement. We + * can even use flushpos_update_time in the next round to decide whether to get + * the latest flush position. + */ +static void +reset_retention_data_fields(RetainDeadTuplesData *rdt_data) +{ rdt_data->phase = RDT_GET_CANDIDATE_XID; rdt_data->remote_lsn = InvalidXLogRecPtr; rdt_data->remote_oldestxid = InvalidFullTransactionId; @@ -4463,22 +4803,25 @@ wait_for_local_flush(RetainDeadTuplesData *rdt_data) rdt_data->reply_time = 0; rdt_data->remote_wait_for = InvalidFullTransactionId; rdt_data->candidate_xid = InvalidTransactionId; - - /* process the next phase */ - process_rdt_phase_transition(rdt_data, false); + rdt_data->table_sync_wait_time = 0; } /* * Adjust the interval for advancing non-removable transaction IDs. * - * We double the interval to try advancing the non-removable transaction IDs - * if there is no activity on the node. The maximum value of the interval is - * capped by wal_receiver_status_interval if it is not zero, otherwise to a - * 3 minutes which should be sufficient to avoid using CPU or network - * resources without much benefit. + * If there is no activity on the node, we progressively double the interval + * used to advance non-removable transaction ID. This helps conserve CPU + * and network resources when there's little benefit to frequent updates. * - * The interval is reset to a minimum value of 100ms once there is some - * activity on the node. + * The interval is capped by the lowest of the following: + * - wal_receiver_status_interval (if set), + * - a default maximum of 3 minutes, + * - max_retention_duration. + * + * This ensures the interval never exceeds the retention boundary, even if + * other limits are higher. Once activity resumes on the node, the interval + * is reset to lesser of 100ms and max_retention_duration, allowing timely + * advancement of non-removable transaction ID. * * XXX The use of wal_receiver_status_interval is a bit arbitrary so we can * consider the other interval or a separate GUC if the need arises. @@ -4507,6 +4850,10 @@ adjust_xid_advance_interval(RetainDeadTuplesData *rdt_data, bool new_xid_found) */ rdt_data->xid_advance_interval = MIN_XID_ADVANCE_INTERVAL; } + + /* Ensure the wait time remains within the maximum limit */ + rdt_data->xid_advance_interval = Min(rdt_data->xid_advance_interval, + MySubscription->maxretention); } /* @@ -4767,7 +5114,7 @@ subxact_info_read(Oid subid, TransactionId xid) len = sizeof(SubXactInfo) * subxact_data.nsubxacts; /* we keep the maximum as a power of 2 */ - subxact_data.nsubxacts_max = 1 << my_log2(subxact_data.nsubxacts); + subxact_data.nsubxacts_max = 1 << pg_ceil_log2_32(subxact_data.nsubxacts); /* * Allocate subxact information in the logical streaming context. We need @@ -5271,6 +5618,13 @@ InitializeLogRepWorker(void) StartTransactionCommand(); oldctx = MemoryContextSwitchTo(ApplyContext); + /* + * Lock the subscription to prevent it from being concurrently dropped, + * then re-verify its existence. After the initialization, the worker will + * be terminated gracefully if the subscription is dropped. + */ + LockSharedObject(SubscriptionRelationId, MyLogicalRepWorker->subid, 0, + AccessShareLock); MySubscription = GetSubscription(MyLogicalRepWorker->subid, true); if (!MySubscription) { @@ -5307,11 +5661,12 @@ InitializeLogRepWorker(void) * dropped, a restart is initiated. * * The oldest_nonremovable_xid should be initialized only when the - * retain_dead_tuples is enabled before launching the worker. See + * subscription's retention is active before launching the worker. See * logicalrep_worker_launch. */ if (am_leader_apply_worker() && MySubscription->retaindeadtuples && + MySubscription->retentionactive && !TransactionIdIsValid(MyLogicalRepWorker->oldest_nonremovable_xid)) { ereport(LOG, @@ -5482,8 +5837,9 @@ DisableSubscriptionAndExit(void) * an error, as verifying commit timestamps is unnecessary in this * context. */ - if (MySubscription->retaindeadtuples) - CheckSubDeadTupleRetention(false, true, WARNING); + CheckSubDeadTupleRetention(false, true, WARNING, + MySubscription->retaindeadtuples, + MySubscription->retentionactive, false); proc_exit(0); } diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index f4c977262c5a4..80540c017bd3a 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -1374,8 +1374,8 @@ pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot, * VARTAG_INDIRECT. See ReorderBufferToastReplace. */ if (att->attlen == -1 && - VARATT_IS_EXTERNAL_ONDISK(new_slot->tts_values[i]) && - !VARATT_IS_EXTERNAL_ONDISK(old_slot->tts_values[i])) + VARATT_IS_EXTERNAL_ONDISK(DatumGetPointer(new_slot->tts_values[i])) && + !VARATT_IS_EXTERNAL_ONDISK(DatumGetPointer(old_slot->tts_values[i]))) { if (!tmp_new_slot) { diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 8605776ad8631..fd0fdb96d4246 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -653,7 +653,7 @@ ReplicationSlotAcquire(const char *name, bool nowait, bool error_if_invalid) } else { - active_pid = MyProcPid; + s->active_pid = active_pid = MyProcPid; ReplicationSlotSetInactiveSince(s, 0, true); } LWLockRelease(ReplicationSlotControlLock); diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index 69f4c6157c518..b8f21153e7bb3 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -921,7 +921,6 @@ pg_sync_replication_slots(PG_FUNCTION_ARGS) /* Connect to the primary server. */ wrconn = walrcv_connect(PrimaryConnInfo, false, false, false, app_name.data, &err); - pfree(app_name.data); if (!wrconn) ereport(ERROR, @@ -929,6 +928,8 @@ pg_sync_replication_slots(PG_FUNCTION_ARGS) errmsg("synchronization worker \"%s\" could not connect to the primary server: %s", app_name.data, err)); + pfree(app_name.data); + SyncReplicationSlots(wrconn); walrcv_disconnect(wrconn); diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index b62811017116f..7361ffc9dcf5e 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -826,7 +826,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len, TimeLineID tli) switch (type) { - case 'w': /* WAL records */ + case PqReplMsg_WALData: { StringInfoData incoming_message; @@ -850,7 +850,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len, TimeLineID tli) XLogWalRcvWrite(buf, len, dataStart, tli); break; } - case 'k': /* Keepalive */ + case PqReplMsg_Keepalive: { StringInfoData incoming_message; @@ -1130,7 +1130,7 @@ XLogWalRcvSendReply(bool force, bool requestReply) applyPtr = GetXLogReplayRecPtr(NULL); resetStringInfo(&reply_message); - pq_sendbyte(&reply_message, 'r'); + pq_sendbyte(&reply_message, PqReplMsg_StandbyStatusUpdate); pq_sendint64(&reply_message, writePtr); pq_sendint64(&reply_message, flushPtr); pq_sendint64(&reply_message, applyPtr); @@ -1234,7 +1234,7 @@ XLogWalRcvSendHSFeedback(bool immed) /* Construct the message and send it. */ resetStringInfo(&reply_message); - pq_sendbyte(&reply_message, 'h'); + pq_sendbyte(&reply_message, PqReplMsg_HotStandbyFeedback); pq_sendint64(&reply_message, GetCurrentTimestamp()); pq_sendint32(&reply_message, xmin); pq_sendint32(&reply_message, xmin_epoch); diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index ee911394a23c6..59822f22b8d06 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -51,6 +51,7 @@ #include "access/timeline.h" #include "access/transam.h" +#include "access/twophase.h" #include "access/xact.h" #include "access/xlog_internal.h" #include "access/xlogreader.h" @@ -60,7 +61,6 @@ #include "backup/basebackup_incremental.h" #include "catalog/pg_authid.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "commands/defrem.h" #include "funcapi.h" #include "libpq/libpq.h" @@ -91,6 +91,7 @@ #include "utils/acl.h" #include "utils/builtins.h" #include "utils/guc.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/pg_lsn.h" #include "utils/pgstat_internal.h" @@ -1534,7 +1535,7 @@ WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xi resetStringInfo(ctx->out); - pq_sendbyte(ctx->out, 'w'); + pq_sendbyte(ctx->out, PqReplMsg_WALData); pq_sendint64(ctx->out, lsn); /* dataStart */ pq_sendint64(ctx->out, lsn); /* walEnd */ @@ -2292,7 +2293,8 @@ ProcessRepliesIfAny(void) switch (firstchar) { /* - * 'd' means a standby reply wrapped in a CopyData packet. + * PqMsg_CopyData means a standby reply wrapped in a CopyData + * packet. */ case PqMsg_CopyData: ProcessStandbyMessage(); @@ -2300,8 +2302,9 @@ ProcessRepliesIfAny(void) break; /* - * CopyDone means the standby requested to finish streaming. - * Reply with CopyDone, if we had not sent that already. + * PqMsg_CopyDone means the standby requested to finish + * streaming. Reply with CopyDone, if we had not sent that + * already. */ case PqMsg_CopyDone: if (!streamingDoneSending) @@ -2315,7 +2318,8 @@ ProcessRepliesIfAny(void) break; /* - * 'X' means that the standby is closing down the socket. + * PqMsg_Terminate means that the standby is closing down the + * socket. */ case PqMsg_Terminate: proc_exit(0); @@ -2350,15 +2354,15 @@ ProcessStandbyMessage(void) switch (msgtype) { - case 'r': + case PqReplMsg_StandbyStatusUpdate: ProcessStandbyReplyMessage(); break; - case 'h': + case PqReplMsg_HotStandbyFeedback: ProcessStandbyHSFeedbackMessage(); break; - case 'p': + case PqReplMsg_PrimaryStatusRequest: ProcessStandbyPSRequestMessage(); break; @@ -2716,6 +2720,7 @@ ProcessStandbyPSRequestMessage(void) { XLogRecPtr lsn = InvalidXLogRecPtr; TransactionId oldestXidInCommit; + TransactionId oldestGXidInCommit; FullTransactionId nextFullXid; FullTransactionId fullOldestXidInCommit; WalSnd *walsnd = MyWalSnd; @@ -2743,6 +2748,16 @@ ProcessStandbyPSRequestMessage(void) * ones replicated. */ oldestXidInCommit = GetOldestActiveTransactionId(true, false); + oldestGXidInCommit = TwoPhaseGetOldestXidInCommit(); + + /* + * Update the oldest xid for standby transmission if an older prepared + * transaction exists and is currently in commit phase. + */ + if (TransactionIdIsValid(oldestGXidInCommit) && + TransactionIdPrecedes(oldestGXidInCommit, oldestXidInCommit)) + oldestXidInCommit = oldestGXidInCommit; + nextFullXid = ReadNextFullTransactionId(); fullOldestXidInCommit = FullTransactionIdFromAllowableAt(nextFullXid, oldestXidInCommit); @@ -2752,7 +2767,7 @@ ProcessStandbyPSRequestMessage(void) /* construct the message... */ resetStringInfo(&output_message); - pq_sendbyte(&output_message, 's'); + pq_sendbyte(&output_message, PqReplMsg_PrimaryStatusUpdate); pq_sendint64(&output_message, lsn); pq_sendint64(&output_message, (int64) U64FromFullTransactionId(fullOldestXidInCommit)); pq_sendint64(&output_message, (int64) U64FromFullTransactionId(nextFullXid)); @@ -3364,7 +3379,7 @@ XLogSendPhysical(void) * OK to read and send the slice. */ resetStringInfo(&output_message); - pq_sendbyte(&output_message, 'w'); + pq_sendbyte(&output_message, PqReplMsg_WALData); pq_sendint64(&output_message, startptr); /* dataStart */ pq_sendint64(&output_message, SendRqstPtr); /* walEnd */ @@ -4135,7 +4150,7 @@ WalSndKeepalive(bool requestReply, XLogRecPtr writePtr) /* construct the message... */ resetStringInfo(&output_message); - pq_sendbyte(&output_message, 'k'); + pq_sendbyte(&output_message, PqReplMsg_Keepalive); pq_sendint64(&output_message, XLogRecPtrIsInvalid(writePtr) ? sentPtr : writePtr); pq_sendint64(&output_message, GetCurrentTimestamp()); pq_sendbyte(&output_message, requestReply ? 1 : 0); diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c index 8aa90b0d6fb75..a96fbdc1ddd64 100644 --- a/src/backend/rewrite/rewriteDefine.c +++ b/src/backend/rewrite/rewriteDefine.c @@ -725,10 +725,9 @@ EnableDisableRule(Relation rel, const char *rulename, /* * Change ev_enabled if it is different from the desired new state. */ - if (DatumGetChar(ruleform->ev_enabled) != - fires_when) + if (ruleform->ev_enabled != fires_when) { - ruleform->ev_enabled = CharGetDatum(fires_when); + ruleform->ev_enabled = fires_when; CatalogTupleUpdate(pg_rewrite_desc, &ruletup->t_self, ruletup); changed = true; diff --git a/src/backend/rewrite/rewriteSearchCycle.c b/src/backend/rewrite/rewriteSearchCycle.c index 19b89dee0d096..5202ef43d1068 100644 --- a/src/backend/rewrite/rewriteSearchCycle.c +++ b/src/backend/rewrite/rewriteSearchCycle.c @@ -282,8 +282,8 @@ rewriteSearchAndCycle(CommonTableExpr *cte) newrte = makeNode(RangeTblEntry); newrte->rtekind = RTE_SUBQUERY; - newrte->alias = makeAlias("*TLOCRN*", cte->ctecolnames); - newrte->eref = newrte->alias; + newrte->alias = NULL; + newrte->eref = makeAlias("*TLOCRN*", cte->ctecolnames); newsubquery = copyObject(rte1->subquery); IncrementVarSublevelsUp((Node *) newsubquery, 1, 1); newrte->subquery = newsubquery; @@ -320,7 +320,7 @@ rewriteSearchAndCycle(CommonTableExpr *cte) if (cte->search_clause->search_breadth_first) { search_col_rowexpr->args = lcons(makeConst(INT8OID, -1, InvalidOid, sizeof(int64), - Int64GetDatum(0), false, FLOAT8PASSBYVAL), + Int64GetDatum(0), false, true), search_col_rowexpr->args); search_col_rowexpr->colnames = lcons(makeString("*DEPTH*"), search_col_rowexpr->colnames); texpr = (Expr *) search_col_rowexpr; @@ -379,8 +379,8 @@ rewriteSearchAndCycle(CommonTableExpr *cte) ewcl = lappend(ewcl, makeString(cte->cycle_clause->cycle_mark_column)); ewcl = lappend(ewcl, makeString(cte->cycle_clause->cycle_path_column)); } - newrte->alias = makeAlias("*TROCRN*", ewcl); - newrte->eref = newrte->alias; + newrte->alias = NULL; + newrte->eref = makeAlias("*TROCRN*", ewcl); /* * Find the reference to the recursive CTE in the right UNION subquery's diff --git a/src/backend/statistics/attribute_stats.c b/src/backend/statistics/attribute_stats.c index ab198076401b0..1db6a7f784c58 100644 --- a/src/backend/statistics/attribute_stats.c +++ b/src/backend/statistics/attribute_stats.c @@ -199,7 +199,7 @@ attribute_statistics_update(FunctionCallInfo fcinfo) if (!PG_ARGISNULL(ATTNUM_ARG)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot specify both attname and attnum"))); + errmsg("cannot specify both \"%s\" and \"%s\"", "attname", "attnum"))); attname = TextDatumGetCString(PG_GETARG_DATUM(ATTNAME_ARG)); attnum = get_attnum(reloid, attname); /* note that this test covers attisdropped cases too: */ @@ -225,7 +225,7 @@ attribute_statistics_update(FunctionCallInfo fcinfo) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("must specify either attname or attnum"))); + errmsg("must specify either \"%s\" or \"%s\"", "attname", "attnum"))); attname = NULL; /* keep compiler quiet */ attnum = 0; } @@ -297,8 +297,9 @@ attribute_statistics_update(FunctionCallInfo fcinfo) &elemtypid, &elem_eq_opr)) { ereport(WARNING, - (errmsg("unable to determine element type of attribute \"%s\"", attname), - errdetail("Cannot set STATISTIC_KIND_MCELEM or STATISTIC_KIND_DECHIST."))); + (errmsg("could not determine element type of column \"%s\"", attname), + errdetail("Cannot set %s or %s.", + "STATISTIC_KIND_MCELEM", "STATISTIC_KIND_DECHIST"))); elemtypid = InvalidOid; elem_eq_opr = InvalidOid; @@ -313,8 +314,9 @@ attribute_statistics_update(FunctionCallInfo fcinfo) { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not determine less-than operator for attribute \"%s\"", attname), - errdetail("Cannot set STATISTIC_KIND_HISTOGRAM or STATISTIC_KIND_CORRELATION."))); + errmsg("could not determine less-than operator for column \"%s\"", attname), + errdetail("Cannot set %s or %s.", + "STATISTIC_KIND_HISTOGRAM", "STATISTIC_KIND_CORRELATION"))); do_histogram = false; do_correlation = false; @@ -327,8 +329,9 @@ attribute_statistics_update(FunctionCallInfo fcinfo) { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("attribute \"%s\" is not a range type", attname), - errdetail("Cannot set STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM or STATISTIC_KIND_BOUNDS_HISTOGRAM."))); + errmsg("column \"%s\" is not a range type", attname), + errdetail("Cannot set %s or %s.", + "STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM", "STATISTIC_KIND_BOUNDS_HISTOGRAM"))); do_bounds_histogram = false; do_range_length_histogram = false; @@ -339,7 +342,7 @@ attribute_statistics_update(FunctionCallInfo fcinfo) starel = table_open(StatisticRelationId, RowExclusiveLock); - statup = SearchSysCache3(STATRELATTINH, reloid, attnum, inherited); + statup = SearchSysCache3(STATRELATTINH, ObjectIdGetDatum(reloid), Int16GetDatum(attnum), BoolGetDatum(inherited)); /* initialize from existing tuple if exists */ if (HeapTupleIsValid(statup)) @@ -587,7 +590,7 @@ get_attr_stat_type(Oid reloid, AttrNumber attnum, if (!HeapTupleIsValid(atup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("attribute %d of relation \"%s\" does not exist", + errmsg("column %d of relation \"%s\" does not exist", attnum, RelationGetRelationName(rel)))); attr = (Form_pg_attribute) GETSTRUCT(atup); @@ -595,7 +598,7 @@ get_attr_stat_type(Oid reloid, AttrNumber attnum, if (attr->attisdropped) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("attribute %d of relation \"%s\" does not exist", + errmsg("column %d of relation \"%s\" does not exist", attnum, RelationGetRelationName(rel)))); expr = get_attr_expr(rel, attr->attnum); @@ -729,7 +732,7 @@ text_to_stavalues(const char *staname, FmgrInfo *array_in, Datum d, Oid typid, { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("\"%s\" array cannot contain NULL values", staname))); + errmsg("\"%s\" array must not contain null values", staname))); *ok = false; return (Datum) 0; } @@ -895,9 +898,9 @@ init_empty_stats_tuple(Oid reloid, int16 attnum, bool inherited, { values[Anum_pg_statistic_stakind1 + slotnum - 1] = (Datum) 0; nulls[Anum_pg_statistic_stakind1 + slotnum - 1] = false; - values[Anum_pg_statistic_staop1 + slotnum - 1] = InvalidOid; + values[Anum_pg_statistic_staop1 + slotnum - 1] = ObjectIdGetDatum(InvalidOid); nulls[Anum_pg_statistic_staop1 + slotnum - 1] = false; - values[Anum_pg_statistic_stacoll1 + slotnum - 1] = InvalidOid; + values[Anum_pg_statistic_stacoll1 + slotnum - 1] = ObjectIdGetDatum(InvalidOid); nulls[Anum_pg_statistic_stacoll1 + slotnum - 1] = false; } } diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c index a8b63ec0884a9..3c3d2d315c6f4 100644 --- a/src/backend/statistics/extended_stats.c +++ b/src/backend/statistics/extended_stats.c @@ -986,10 +986,9 @@ build_sorted_items(StatsBuildData *data, int *nitems, { int i, j, - len, nrows; int nvalues = data->numrows * numattrs; - + Size len; SortItem *items; Datum *values; bool *isnull; @@ -997,14 +996,16 @@ build_sorted_items(StatsBuildData *data, int *nitems, int *typlen; /* Compute the total amount of memory we need (both items and values). */ - len = data->numrows * sizeof(SortItem) + nvalues * (sizeof(Datum) + sizeof(bool)); + len = MAXALIGN(data->numrows * sizeof(SortItem)) + + nvalues * (sizeof(Datum) + sizeof(bool)); /* Allocate the memory and split it into the pieces. */ ptr = palloc0(len); /* items to sort */ items = (SortItem *) ptr; - ptr += data->numrows * sizeof(SortItem); + /* MAXALIGN ensures that the following Datums are suitably aligned */ + ptr += MAXALIGN(data->numrows * sizeof(SortItem)); /* values and null flags */ values = (Datum *) ptr; @@ -1317,6 +1318,9 @@ choose_best_statistics(List *stats, char requiredkind, bool inh, * so we can't cope with system columns. * *exprs: input/output parameter collecting primitive subclauses within * the clause tree + * *leakproof: input/output parameter recording the leakproofness of the + * clause tree. This should be true initially, and will be set to false + * if any operator function used in an OpExpr is not leakproof. * * Returns false if there is something we definitively can't handle. * On true return, we can proceed to match the *exprs against statistics. @@ -1324,7 +1328,7 @@ choose_best_statistics(List *stats, char requiredkind, bool inh, static bool statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, Index relid, Bitmapset **attnums, - List **exprs) + List **exprs, bool *leakproof) { /* Look inside any binary-compatible relabeling (as in examine_variable) */ if (IsA(clause, RelabelType)) @@ -1359,7 +1363,6 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, /* (Var/Expr op Const) or (Const op Var/Expr) */ if (is_opclause(clause)) { - RangeTblEntry *rte = root->simple_rte_array[relid]; OpExpr *expr = (OpExpr *) clause; Node *clause_expr; @@ -1394,24 +1397,15 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, return false; } - /* - * If there are any securityQuals on the RTE from security barrier - * views or RLS policies, then the user may not have access to all the - * table's data, and we must check that the operator is leakproof. - * - * If the operator is leaky, then we must ignore this clause for the - * purposes of estimating with MCV lists, otherwise the operator might - * reveal values from the MCV list that the user doesn't have - * permission to see. - */ - if (rte->securityQuals != NIL && - !get_func_leakproof(get_opcode(expr->opno))) - return false; + /* Check if the operator is leakproof */ + if (*leakproof) + *leakproof = get_func_leakproof(get_opcode(expr->opno)); /* Check (Var op Const) or (Const op Var) clauses by recursing. */ if (IsA(clause_expr, Var)) return statext_is_compatible_clause_internal(root, clause_expr, - relid, attnums, exprs); + relid, attnums, + exprs, leakproof); /* Otherwise we have (Expr op Const) or (Const op Expr). */ *exprs = lappend(*exprs, clause_expr); @@ -1421,7 +1415,6 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, /* Var/Expr IN Array */ if (IsA(clause, ScalarArrayOpExpr)) { - RangeTblEntry *rte = root->simple_rte_array[relid]; ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; Node *clause_expr; bool expronleft; @@ -1461,24 +1454,15 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, return false; } - /* - * If there are any securityQuals on the RTE from security barrier - * views or RLS policies, then the user may not have access to all the - * table's data, and we must check that the operator is leakproof. - * - * If the operator is leaky, then we must ignore this clause for the - * purposes of estimating with MCV lists, otherwise the operator might - * reveal values from the MCV list that the user doesn't have - * permission to see. - */ - if (rte->securityQuals != NIL && - !get_func_leakproof(get_opcode(expr->opno))) - return false; + /* Check if the operator is leakproof */ + if (*leakproof) + *leakproof = get_func_leakproof(get_opcode(expr->opno)); /* Check Var IN Array clauses by recursing. */ if (IsA(clause_expr, Var)) return statext_is_compatible_clause_internal(root, clause_expr, - relid, attnums, exprs); + relid, attnums, + exprs, leakproof); /* Otherwise we have Expr IN Array. */ *exprs = lappend(*exprs, clause_expr); @@ -1515,7 +1499,8 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, */ if (!statext_is_compatible_clause_internal(root, (Node *) lfirst(lc), - relid, attnums, exprs)) + relid, attnums, exprs, + leakproof)) return false; } @@ -1529,8 +1514,10 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, /* Check Var IS NULL clauses by recursing. */ if (IsA(nt->arg, Var)) - return statext_is_compatible_clause_internal(root, (Node *) (nt->arg), - relid, attnums, exprs); + return statext_is_compatible_clause_internal(root, + (Node *) (nt->arg), + relid, attnums, + exprs, leakproof); /* Otherwise we have Expr IS NULL. */ *exprs = lappend(*exprs, nt->arg); @@ -1569,11 +1556,9 @@ static bool statext_is_compatible_clause(PlannerInfo *root, Node *clause, Index relid, Bitmapset **attnums, List **exprs) { - RangeTblEntry *rte = root->simple_rte_array[relid]; - RelOptInfo *rel = root->simple_rel_array[relid]; RestrictInfo *rinfo; int clause_relid; - Oid userid; + bool leakproof; /* * Special-case handling for bare BoolExpr AND clauses, because the @@ -1613,18 +1598,31 @@ statext_is_compatible_clause(PlannerInfo *root, Node *clause, Index relid, clause_relid != relid) return false; - /* Check the clause and determine what attributes it references. */ + /* + * Check the clause, determine what attributes it references, and whether + * it includes any non-leakproof operators. + */ + leakproof = true; if (!statext_is_compatible_clause_internal(root, (Node *) rinfo->clause, - relid, attnums, exprs)) + relid, attnums, exprs, + &leakproof)) return false; /* - * Check that the user has permission to read all required attributes. + * If the clause includes any non-leakproof operators, check that the user + * has permission to read all required attributes, otherwise the operators + * might reveal values from the MCV list that the user doesn't have + * permission to see. We require all rows to be selectable --- there must + * be no securityQuals from security barrier views or RLS policies. See + * similar code in examine_variable(), examine_simple_variable(), and + * statistic_proc_security_check(). + * + * Note that for an inheritance child, the permission checks are performed + * on the inheritance root parent, and whole-table select privilege on the + * parent doesn't guarantee that the user could read all columns of the + * child. Therefore we must check all referenced columns. */ - userid = OidIsValid(rel->userid) ? rel->userid : GetUserId(); - - /* Table-level SELECT privilege is sufficient for all columns */ - if (pg_class_aclcheck(rte->relid, userid, ACL_SELECT) != ACLCHECK_OK) + if (!leakproof) { Bitmapset *clause_attnums = NULL; int attnum = -1; @@ -1649,26 +1647,9 @@ statext_is_compatible_clause(PlannerInfo *root, Node *clause, Index relid, if (*exprs != NIL) pull_varattnos((Node *) *exprs, relid, &clause_attnums); - attnum = -1; - while ((attnum = bms_next_member(clause_attnums, attnum)) >= 0) - { - /* Undo the offset */ - AttrNumber attno = attnum + FirstLowInvalidHeapAttributeNumber; - - if (attno == InvalidAttrNumber) - { - /* Whole-row reference, so must have access to all columns */ - if (pg_attribute_aclcheck_all(rte->relid, userid, ACL_SELECT, - ACLMASK_ALL) != ACLCHECK_OK) - return false; - } - else - { - if (pg_attribute_aclcheck(rte->relid, attno, userid, - ACL_SELECT) != ACLCHECK_OK) - return false; - } - } + /* Must have permission to read all rows from these columns */ + if (!all_rows_selectable(root, relid, clause_attnums)) + return false; } /* If we reach here, the clause is OK */ @@ -2618,7 +2599,7 @@ make_build_data(Relation rel, StatExtEntry *stat, int numrows, HeapTuple *rows, } else { - result->values[idx][i] = (Datum) datum; + result->values[idx][i] = datum; result->nulls[idx][i] = false; } diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c index d98cda698d941..f59fb82154370 100644 --- a/src/backend/statistics/mcv.c +++ b/src/backend/statistics/mcv.c @@ -767,7 +767,7 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats) values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i])); /* serialized length (uint32 length + data) */ - len = VARSIZE_ANY_EXHDR(values[dim][i]); + len = VARSIZE_ANY_EXHDR(DatumGetPointer(values[dim][i])); info[dim].nbytes += sizeof(uint32); /* length */ info[dim].nbytes += len; /* value (no header) */ diff --git a/src/backend/statistics/relation_stats.c b/src/backend/statistics/relation_stats.c index cd3a75b621a0c..a59f0c519a474 100644 --- a/src/backend/statistics/relation_stats.c +++ b/src/backend/statistics/relation_stats.c @@ -112,7 +112,7 @@ relation_statistics_update(FunctionCallInfo fcinfo) { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("reltuples cannot be < -1.0"))); + errmsg("argument \"%s\" must not be less than -1.0", "reltuples"))); result = false; } else diff --git a/src/backend/statistics/stat_utils.c b/src/backend/statistics/stat_utils.c index a9a3224efe6fd..ef7e5168bed40 100644 --- a/src/backend/statistics/stat_utils.c +++ b/src/backend/statistics/stat_utils.c @@ -41,7 +41,7 @@ stats_check_required_arg(FunctionCallInfo fcinfo, if (PG_ARGISNULL(argnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("\"%s\" cannot be NULL", + errmsg("argument \"%s\" must not be null", arginfo[argnum].argname))); } @@ -68,7 +68,7 @@ stats_check_arg_array(FunctionCallInfo fcinfo, { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("\"%s\" cannot be a multidimensional array", + errmsg("argument \"%s\" must not be a multidimensional array", arginfo[argnum].argname))); return false; } @@ -77,7 +77,7 @@ stats_check_arg_array(FunctionCallInfo fcinfo, { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("\"%s\" array cannot contain NULL values", + errmsg("argument \"%s\" array must not contain null values", arginfo[argnum].argname))); return false; } @@ -108,7 +108,7 @@ stats_check_arg_pair(FunctionCallInfo fcinfo, ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("\"%s\" must be specified when \"%s\" is specified", + errmsg("argument \"%s\" must be specified when argument \"%s\" is specified", arginfo[nullarg].argname, arginfo[otherarg].argname))); @@ -263,7 +263,7 @@ stats_check_arg_type(const char *argname, Oid argtype, Oid expectedtype) if (argtype != expectedtype) { ereport(WARNING, - (errmsg("argument \"%s\" has type \"%s\", expected type \"%s\"", + (errmsg("argument \"%s\" has type %s, expected type %s", argname, format_type_be(argtype), format_type_be(expectedtype)))); return false; @@ -319,11 +319,11 @@ stats_fill_fcinfo_from_arg_pairs(FunctionCallInfo pairs_fcinfo, if (argnulls[i]) ereport(ERROR, - (errmsg("name at variadic position %d is NULL", i + 1))); + (errmsg("name at variadic position %d is null", i + 1))); if (types[i] != TEXTOID) ereport(ERROR, - (errmsg("name at variadic position %d has type \"%s\", expected type \"%s\"", + (errmsg("name at variadic position %d has type %s, expected type %s", i + 1, format_type_be(types[i]), format_type_be(TEXTOID)))); diff --git a/src/backend/storage/aio/aio.c b/src/backend/storage/aio/aio.c index 3643f27ad6e1b..87d7136a93647 100644 --- a/src/backend/storage/aio/aio.c +++ b/src/backend/storage/aio/aio.c @@ -275,7 +275,7 @@ pgaio_io_release_resowner(dlist_node *ioh_node, bool on_error) ResourceOwnerForgetAioHandle(ioh->resowner, &ioh->resowner_node); ioh->resowner = NULL; - switch (ioh->state) + switch ((PgAioHandleState) ioh->state) { case PGAIO_HS_IDLE: elog(ERROR, "unexpected"); @@ -600,7 +600,7 @@ pgaio_io_wait(PgAioHandle *ioh, uint64 ref_generation) if (pgaio_io_was_recycled(ioh, ref_generation, &state)) return; - switch (state) + switch ((PgAioHandleState) state) { case PGAIO_HS_IDLE: case PGAIO_HS_HANDED_OUT: @@ -825,7 +825,7 @@ pgaio_io_wait_for_free(void) &pgaio_my_backend->in_flight_ios); uint64 generation = ioh->generation; - switch (ioh->state) + switch ((PgAioHandleState) ioh->state) { /* should not be in in-flight list */ case PGAIO_HS_IDLE: @@ -905,7 +905,7 @@ static const char * pgaio_io_state_get_name(PgAioHandleState s) { #define PGAIO_HS_TOSTR_CASE(sym) case PGAIO_HS_##sym: return #sym - switch (s) + switch ((PgAioHandleState) s) { PGAIO_HS_TOSTR_CASE(IDLE); PGAIO_HS_TOSTR_CASE(HANDED_OUT); @@ -930,7 +930,7 @@ pgaio_io_get_state_name(PgAioHandle *ioh) const char * pgaio_result_status_string(PgAioResultStatus rs) { - switch (rs) + switch ((PgAioResultStatus) rs) { case PGAIO_RS_UNKNOWN: return "UNKNOWN"; diff --git a/src/backend/storage/aio/aio_funcs.c b/src/backend/storage/aio/aio_funcs.c index 584e683371a31..d7977387b8f1a 100644 --- a/src/backend/storage/aio/aio_funcs.c +++ b/src/backend/storage/aio/aio_funcs.c @@ -56,7 +56,7 @@ pg_get_aios(PG_FUNCTION_ARGS) for (uint64 i = 0; i < pgaio_ctl->io_handle_count; i++) { PgAioHandle *live_ioh = &pgaio_ctl->io_handles[i]; - uint32 ioh_id = pgaio_io_get_id(live_ioh); + int ioh_id = pgaio_io_get_id(live_ioh); Datum values[PG_GET_AIOS_COLS] = {0}; bool nulls[PG_GET_AIOS_COLS] = {0}; ProcNumber owner; @@ -152,7 +152,7 @@ pg_get_aios(PG_FUNCTION_ARGS) nulls[0] = false; /* column: IO's id */ - values[1] = ioh_id; + values[1] = Int32GetDatum(ioh_id); /* column: IO's generation */ values[2] = Int64GetDatum(start_generation); @@ -175,7 +175,7 @@ pg_get_aios(PG_FUNCTION_ARGS) values[4] = CStringGetTextDatum(pgaio_io_get_op_name(&ioh_copy)); /* columns: details about the IO's operation (offset, length) */ - switch (ioh_copy.op) + switch ((PgAioOp) ioh_copy.op) { case PGAIO_OP_INVALID: nulls[5] = true; diff --git a/src/backend/storage/aio/aio_io.c b/src/backend/storage/aio/aio_io.c index 520b5077df25a..7d11d40284ada 100644 --- a/src/backend/storage/aio/aio_io.c +++ b/src/backend/storage/aio/aio_io.c @@ -121,7 +121,7 @@ pgaio_io_perform_synchronously(PgAioHandle *ioh) START_CRIT_SECTION(); /* Perform IO. */ - switch (ioh->op) + switch ((PgAioOp) ioh->op) { case PGAIO_OP_READV: pgstat_report_wait_start(WAIT_EVENT_DATA_FILE_READ); @@ -176,7 +176,7 @@ pgaio_io_get_op_name(PgAioHandle *ioh) { Assert(ioh->op >= 0 && ioh->op < PGAIO_OP_COUNT); - switch (ioh->op) + switch ((PgAioOp) ioh->op) { case PGAIO_OP_INVALID: return "invalid"; @@ -198,7 +198,7 @@ pgaio_io_uses_fd(PgAioHandle *ioh, int fd) { Assert(ioh->state >= PGAIO_HS_DEFINED); - switch (ioh->op) + switch ((PgAioOp) ioh->op) { case PGAIO_OP_READV: return ioh->op_data.read.fd == fd; @@ -222,7 +222,7 @@ pgaio_io_get_iovec_length(PgAioHandle *ioh, struct iovec **iov) *iov = &pgaio_ctl->iovecs[ioh->iovec_off]; - switch (ioh->op) + switch ((PgAioOp) ioh->op) { case PGAIO_OP_READV: return ioh->op_data.read.iov_length; diff --git a/src/backend/storage/aio/method_io_uring.c b/src/backend/storage/aio/method_io_uring.c index 0a8c054162f06..bb06da63a8e02 100644 --- a/src/backend/storage/aio/method_io_uring.c +++ b/src/backend/storage/aio/method_io_uring.c @@ -377,7 +377,7 @@ pgaio_uring_shmem_init(bool first_time) else if (-ret == ENOSYS) { err = ERRCODE_FEATURE_NOT_SUPPORTED; - hint = _("Kernel does not support io_uring."); + hint = _("The kernel does not support io_uring."); } /* update errno to allow %m to work */ @@ -660,7 +660,7 @@ pgaio_uring_sq_from_io(PgAioHandle *ioh, struct io_uring_sqe *sqe) { struct iovec *iov; - switch (ioh->op) + switch ((PgAioOp) ioh->op) { case PGAIO_OP_READV: iov = &pgaio_ctl->iovecs[ioh->iovec_off]; diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c index bf8f77e6ff606..b5ac073a910df 100644 --- a/src/backend/storage/aio/method_worker.c +++ b/src/backend/storage/aio/method_worker.c @@ -58,7 +58,7 @@ typedef struct PgAioWorkerSubmissionQueue uint32 mask; uint32 head; uint32 tail; - uint32 sqes[FLEXIBLE_ARRAY_MEMBER]; + int sqes[FLEXIBLE_ARRAY_MEMBER]; } PgAioWorkerSubmissionQueue; typedef struct PgAioWorkerSlot @@ -107,7 +107,7 @@ pgaio_worker_queue_shmem_size(int *queue_size) *queue_size = pg_nextpower2_32(io_worker_queue_size); return offsetof(PgAioWorkerSubmissionQueue, sqes) + - sizeof(uint32) * *queue_size; + sizeof(int) * *queue_size; } static size_t @@ -198,15 +198,15 @@ pgaio_worker_submission_queue_insert(PgAioHandle *ioh) return true; } -static uint32 +static int pgaio_worker_submission_queue_consume(void) { PgAioWorkerSubmissionQueue *queue; - uint32 result; + int result; queue = io_worker_submission_queue; if (queue->tail == queue->head) - return UINT32_MAX; /* empty */ + return -1; /* empty */ result = queue->sqes[queue->tail]; queue->tail = (queue->tail + 1) & (queue->size - 1); @@ -470,7 +470,7 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len) * to ensure that we don't see an outdated data in the handle. */ LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE); - if ((io_index = pgaio_worker_submission_queue_consume()) == UINT32_MAX) + if ((io_index = pgaio_worker_submission_queue_consume()) == -1) { /* * Nothing to do. Mark self idle. @@ -500,7 +500,7 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len) for (int i = 0; i < nlatches; ++i) SetLatch(latches[i]); - if (io_index != UINT32_MAX) + if (io_index != -1) { PgAioHandle *ioh = NULL; diff --git a/src/backend/storage/aio/read_stream.c b/src/backend/storage/aio/read_stream.c index 0e7f5557f5cb9..031fde9f4cbef 100644 --- a/src/backend/storage/aio/read_stream.c +++ b/src/backend/storage/aio/read_stream.c @@ -247,12 +247,33 @@ read_stream_start_pending_read(ReadStream *stream) Assert(stream->pinned_buffers + stream->pending_read_nblocks <= stream->max_pinned_buffers); +#ifdef USE_ASSERT_CHECKING /* We had better not be overwriting an existing pinned buffer. */ if (stream->pinned_buffers > 0) Assert(stream->next_buffer_index != stream->oldest_buffer_index); else Assert(stream->next_buffer_index == stream->oldest_buffer_index); + /* + * Pinned buffers forwarded by a preceding StartReadBuffers() call that + * had to split the operation should match the leading blocks of this + * following StartReadBuffers() call. + */ + Assert(stream->forwarded_buffers <= stream->pending_read_nblocks); + for (int i = 0; i < stream->forwarded_buffers; ++i) + Assert(BufferGetBlockNumber(stream->buffers[stream->next_buffer_index + i]) == + stream->pending_read_blocknum + i); + + /* + * Check that we've cleared the queue/overflow entries corresponding to + * the rest of the blocks covered by this read, unless it's the first go + * around and we haven't even initialized them yet. + */ + for (int i = stream->forwarded_buffers; i < stream->pending_read_nblocks; ++i) + Assert(stream->next_buffer_index + i >= stream->initialized_buffers || + stream->buffers[stream->next_buffer_index + i] == InvalidBuffer); +#endif + /* Do we need to issue read-ahead advice? */ flags = stream->read_buffers_flags; if (stream->advice_enabled) @@ -979,6 +1000,19 @@ read_stream_next_buffer(ReadStream *stream, void **per_buffer_data) stream->pending_read_nblocks == 0 && stream->per_buffer_data_size == 0) { + /* + * The fast path spins on one buffer entry repeatedly instead of + * rotating through the whole queue and clearing the entries behind + * it. If the buffer it starts with happened to be forwarded between + * StartReadBuffers() calls and also wrapped around the circular queue + * partway through, then a copy also exists in the overflow zone, and + * it won't clear it out as the regular path would. Do that now, so + * it doesn't need code for that. + */ + if (stream->oldest_buffer_index < stream->io_combine_limit - 1) + stream->buffers[stream->queue_size + stream->oldest_buffer_index] = + InvalidBuffer; + stream->fast_path = true; } #endif diff --git a/src/backend/storage/buffer/README b/src/backend/storage/buffer/README index a182fcd660ccb..119f31b5d6584 100644 --- a/src/backend/storage/buffer/README +++ b/src/backend/storage/buffer/README @@ -128,11 +128,11 @@ independently. If it is necessary to lock more than one partition at a time, they must be locked in partition-number order to avoid risk of deadlock. * A separate system-wide spinlock, buffer_strategy_lock, provides mutual -exclusion for operations that access the buffer free list or select -buffers for replacement. A spinlock is used here rather than a lightweight -lock for efficiency; no other locks of any sort should be acquired while -buffer_strategy_lock is held. This is essential to allow buffer replacement -to happen in multiple backends with reasonable concurrency. +exclusion for operations that select buffers for replacement. A spinlock is +used here rather than a lightweight lock for efficiency; no other locks of any +sort should be acquired while buffer_strategy_lock is held. This is essential +to allow buffer replacement to happen in multiple backends with reasonable +concurrency. * Each buffer header contains a spinlock that must be taken when examining or changing fields of that buffer header. This allows operations such as @@ -158,18 +158,8 @@ unset by sleeping on the buffer's condition variable. Normal Buffer Replacement Strategy ---------------------------------- -There is a "free list" of buffers that are prime candidates for replacement. -In particular, buffers that are completely free (contain no valid page) are -always in this list. We could also throw buffers into this list if we -consider their pages unlikely to be needed soon; however, the current -algorithm never does that. The list is singly-linked using fields in the -buffer headers; we maintain head and tail pointers in global variables. -(Note: although the list links are in the buffer headers, they are -considered to be protected by the buffer_strategy_lock, not the buffer-header -spinlocks.) To choose a victim buffer to recycle when there are no free -buffers available, we use a simple clock-sweep algorithm, which avoids the -need to take system-wide locks during common operations. It works like -this: +To choose a victim buffer to recycle we use a simple clock-sweep algorithm. It +works like this: Each buffer header contains a usage counter, which is incremented (up to a small limit value) whenever the buffer is pinned. (This requires only the @@ -184,20 +174,14 @@ The algorithm for a process that needs to obtain a victim buffer is: 1. Obtain buffer_strategy_lock. -2. If buffer free list is nonempty, remove its head buffer. Release -buffer_strategy_lock. If the buffer is pinned or has a nonzero usage count, -it cannot be used; ignore it go back to step 1. Otherwise, pin the buffer, -and return it. +2. Select the buffer pointed to by nextVictimBuffer, and circularly advance +nextVictimBuffer for next time. Release buffer_strategy_lock. -3. Otherwise, the buffer free list is empty. Select the buffer pointed to by -nextVictimBuffer, and circularly advance nextVictimBuffer for next time. -Release buffer_strategy_lock. - -4. If the selected buffer is pinned or has a nonzero usage count, it cannot +3. If the selected buffer is pinned or has a nonzero usage count, it cannot be used. Decrement its usage count (if nonzero), reacquire buffer_strategy_lock, and return to step 3 to examine the next buffer. -5. Pin the selected buffer, and return. +4. Pin the selected buffer, and return. (Note that if the selected buffer is dirty, we will have to write it out before we can recycle it; if someone else pins the buffer meanwhile we will @@ -211,9 +195,9 @@ Buffer Ring Replacement Strategy When running a query that needs to access a large number of pages just once, such as VACUUM or a large sequential scan, a different strategy is used. A page that has been touched only by such a scan is unlikely to be needed -again soon, so instead of running the normal clock sweep algorithm and +again soon, so instead of running the normal clock-sweep algorithm and blowing out the entire buffer cache, a small ring of buffers is allocated -using the normal clock sweep algorithm and those buffers are reused for the +using the normal clock-sweep algorithm and those buffers are reused for the whole scan. This also implies that much of the write traffic caused by such a statement will be done by the backend itself and not pushed off onto other processes. @@ -234,7 +218,7 @@ the ring strategy effectively degrades to the normal strategy. VACUUM uses a ring like sequential scans, however, the size of this ring is controlled by the vacuum_buffer_usage_limit GUC. Dirty pages are not removed -from the ring. Instead, WAL is flushed if needed to allow reuse of the +from the ring. Instead, the WAL is flushed if needed to allow reuse of the buffers. Before introducing the buffer ring strategy in 8.3, VACUUM's buffers were sent to the freelist, which was effectively a buffer ring of 1 buffer, resulting in excessive WAL flushing. diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index ed1dc488a42b4..6fd3a6bbac5ea 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -128,20 +128,11 @@ BufferManagerShmemInit(void) pgaio_wref_clear(&buf->io_wref); - /* - * Initially link all the buffers together as unused. Subsequent - * management of this list is done by freelist.c. - */ - buf->freeNext = i + 1; - LWLockInitialize(BufferDescriptorGetContentLock(buf), LWTRANCHE_BUFFER_CONTENT); ConditionVariableInit(BufferDescriptorGetIOCV(buf)); } - - /* Correct last entry of linked list */ - GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST; } /* Init other shared buffer-management stuff */ diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 9c6fe587ec940..fe470de63f20c 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -1484,11 +1484,6 @@ StartReadBuffersImpl(ReadBuffersOperation *operation, * buffers must remain valid until WaitReadBuffers() is called, and any * forwarded buffers must also be preserved for a continuing call unless * they are explicitly released. - * - * Currently the I/O is only started with optional operating system advice if - * requested by the caller with READ_BUFFERS_ISSUE_ADVICE, and the real I/O - * happens synchronously in WaitReadBuffers(). In future work, true I/O could - * be initiated here. */ bool StartReadBuffers(ReadBuffersOperation *operation, @@ -2099,12 +2094,6 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, */ UnpinBuffer(victim_buf_hdr); - /* - * The victim buffer we acquired previously is clean and unused, let - * it be found again quickly - */ - StrategyFreeBuffer(victim_buf_hdr); - /* remaining code should match code at top of routine */ existing_buf_hdr = GetBufferDescriptor(existing_buf_id); @@ -2163,8 +2152,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, } /* - * InvalidateBuffer -- mark a shared buffer invalid and return it to the - * freelist. + * InvalidateBuffer -- mark a shared buffer invalid. * * The buffer header spinlock must be held at entry. We drop it before * returning. (This is sane because the caller must have locked the @@ -2262,11 +2250,6 @@ InvalidateBuffer(BufferDesc *buf) * Done with mapping lock. */ LWLockRelease(oldPartitionLock); - - /* - * Insert the buffer at the head of the list of free buffers. - */ - StrategyFreeBuffer(buf); } /* @@ -2684,11 +2667,6 @@ ExtendBufferedRelShared(BufferManagerRelation bmr, { BufferDesc *buf_hdr = GetBufferDescriptor(buffers[i] - 1); - /* - * The victim buffer we acquired previously is clean and unused, - * let it be found again quickly - */ - StrategyFreeBuffer(buf_hdr); UnpinBuffer(buf_hdr); } @@ -2744,9 +2722,9 @@ ExtendBufferedRelShared(BufferManagerRelation bmr, * zero_damaged_pages is ON) and so a previous attempt to read a block * beyond EOF could have left a "valid" zero-filled buffer. * - * This has also been observed when relation was overwritten by external - * process. Since the legitimate cases should always have left a - * zero-filled buffer, complain if not PageIsNew. + * This has also been observed when relation was overwritten by + * external process. Since the legitimate cases should always have + * left a zero-filled buffer, complain if not PageIsNew. */ if (existing_id >= 0) { @@ -2761,12 +2739,6 @@ ExtendBufferedRelShared(BufferManagerRelation bmr, valid = PinBuffer(existing_hdr, strategy); LWLockRelease(partition_lock); - - /* - * The victim buffer we acquired previously is clean and unused, - * let it be found again quickly - */ - StrategyFreeBuffer(victim_buf_hdr); UnpinBuffer(victim_buf_hdr); buffers[i] = BufferDescriptorGetBuffer(existing_hdr); @@ -2774,7 +2746,7 @@ ExtendBufferedRelShared(BufferManagerRelation bmr, if (valid && !PageIsNew((Page) buf_block)) ereport(ERROR, - (errmsg("unexpected data beyond EOF in block %u of relation %s", + (errmsg("unexpected data beyond EOF in block %u of relation \"%s\"", existing_hdr->tag.blockNum, relpath(bmr.smgr->smgr_rlocator, fork).str))); @@ -3613,7 +3585,7 @@ BufferSync(int flags) * This is called periodically by the background writer process. * * Returns true if it's appropriate for the bgwriter process to go into - * low-power hibernation mode. (This happens if the strategy clock sweep + * low-power hibernation mode. (This happens if the strategy clock-sweep * has been "lapped" and no buffer allocations have occurred recently, * or if the bgwriter has been effectively disabled by setting * bgwriter_lru_maxpages to 0.) @@ -3663,8 +3635,8 @@ BgBufferSync(WritebackContext *wb_context) uint32 new_recent_alloc; /* - * Find out where the freelist clock sweep currently is, and how many - * buffer allocations have happened since our last call. + * Find out where the clock-sweep currently is, and how many buffer + * allocations have happened since our last call. */ strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc); @@ -3684,8 +3656,8 @@ BgBufferSync(WritebackContext *wb_context) /* * Compute strategy_delta = how many buffers have been scanned by the - * clock sweep since last time. If first time through, assume none. Then - * see if we are still ahead of the clock sweep, and if so, how many + * clock-sweep since last time. If first time through, assume none. Then + * see if we are still ahead of the clock-sweep, and if so, how many * buffers we could scan before we'd catch up with it and "lap" it. Note: * weird-looking coding of xxx_passes comparisons are to avoid bogus * behavior when the passes counts wrap around. @@ -6196,7 +6168,7 @@ shared_buffer_write_error_callback(void *arg) /* Buffer is pinned, so we can read the tag without locking the spinlock */ if (bufHdr != NULL) - errcontext("writing block %u of relation %s", + errcontext("writing block %u of relation \"%s\"", bufHdr->tag.blockNum, relpathperm(BufTagGetRelFileLocator(&bufHdr->tag), BufTagGetForkNum(&bufHdr->tag)).str); @@ -6211,7 +6183,7 @@ local_buffer_write_error_callback(void *arg) BufferDesc *bufHdr = (BufferDesc *) arg; if (bufHdr != NULL) - errcontext("writing block %u of relation %s", + errcontext("writing block %u of relation \"%s\"", bufHdr->tag.blockNum, relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag), MyProcNumber, @@ -6370,8 +6342,8 @@ ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b) static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg) { - CkptTsStatus *sa = (CkptTsStatus *) a; - CkptTsStatus *sb = (CkptTsStatus *) b; + CkptTsStatus *sa = (CkptTsStatus *) DatumGetPointer(a); + CkptTsStatus *sb = (CkptTsStatus *) DatumGetPointer(b); /* we want a min-heap, so return 1 for the a < b */ if (sa->progress < sb->progress) @@ -7310,13 +7282,15 @@ buffer_readv_report(PgAioResult result, const PgAioTargetData *td, ereport(elevel, errcode(ERRCODE_DATA_CORRUPTED), - errmsg("zeroing %u page(s) and ignoring %u checksum failure(s) among blocks %u..%u of relation %s", + errmsg("zeroing %u page(s) and ignoring %u checksum failure(s) among blocks %u..%u of relation \"%s\"", affected_count, checkfail_count, first, last, rpath.str), affected_count > 1 ? - errdetail("Block %u held first zeroed page.", + errdetail("Block %u held the first zeroed page.", first + first_off) : 0, - errhint("See server log for details about the other %d invalid block(s).", - affected_count + checkfail_count - 1)); + errhint_plural("See server log for details about the other %d invalid block.", + "See server log for details about the other %d invalid blocks.", + affected_count + checkfail_count - 1, + affected_count + checkfail_count - 1)); return; } @@ -7329,25 +7303,25 @@ buffer_readv_report(PgAioResult result, const PgAioTargetData *td, { Assert(!zeroed_any); /* can't have invalid pages when zeroing them */ affected_count = zeroed_or_error_count; - msg_one = _("invalid page in block %u of relation %s"); - msg_mult = _("%u invalid pages among blocks %u..%u of relation %s"); - det_mult = _("Block %u held first invalid page."); + msg_one = _("invalid page in block %u of relation \"%s\""); + msg_mult = _("%u invalid pages among blocks %u..%u of relation \"%s\""); + det_mult = _("Block %u held the first invalid page."); hint_mult = _("See server log for the other %u invalid block(s)."); } else if (zeroed_any && !ignored_any) { affected_count = zeroed_or_error_count; - msg_one = _("invalid page in block %u of relation %s; zeroing out page"); - msg_mult = _("zeroing out %u invalid pages among blocks %u..%u of relation %s"); - det_mult = _("Block %u held first zeroed page."); + msg_one = _("invalid page in block %u of relation \"%s\"; zeroing out page"); + msg_mult = _("zeroing out %u invalid pages among blocks %u..%u of relation \"%s\""); + det_mult = _("Block %u held the first zeroed page."); hint_mult = _("See server log for the other %u zeroed block(s)."); } else if (!zeroed_any && ignored_any) { affected_count = checkfail_count; - msg_one = _("ignoring checksum failure in block %u of relation %s"); - msg_mult = _("ignoring %u checksum failures among blocks %u..%u of relation %s"); - det_mult = _("Block %u held first ignored page."); + msg_one = _("ignoring checksum failure in block %u of relation \"%s\""); + msg_mult = _("ignoring %u checksum failures among blocks %u..%u of relation \"%s\""); + det_mult = _("Block %u held the first ignored page."); hint_mult = _("See server log for the other %u ignored block(s)."); } else diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c index 01909be027258..7d59a92bd1a88 100644 --- a/src/backend/storage/buffer/freelist.c +++ b/src/backend/storage/buffer/freelist.c @@ -33,25 +33,17 @@ typedef struct slock_t buffer_strategy_lock; /* - * Clock sweep hand: index of next buffer to consider grabbing. Note that + * clock-sweep hand: index of next buffer to consider grabbing. Note that * this isn't a concrete buffer - we only ever increase the value. So, to * get an actual buffer, it needs to be used modulo NBuffers. */ pg_atomic_uint32 nextVictimBuffer; - int firstFreeBuffer; /* Head of list of unused buffers */ - int lastFreeBuffer; /* Tail of list of unused buffers */ - - /* - * NOTE: lastFreeBuffer is undefined when firstFreeBuffer is -1 (that is, - * when the list is empty) - */ - /* * Statistics. These counters should be wide enough that they can't * overflow during a single bgwriter cycle. */ - uint32 completePasses; /* Complete cycles of the clock sweep */ + uint32 completePasses; /* Complete cycles of the clock-sweep */ pg_atomic_uint32 numBufferAllocs; /* Buffers allocated since last reset */ /* @@ -163,23 +155,6 @@ ClockSweepTick(void) return victim; } -/* - * have_free_buffer -- a lockless check to see if there is a free buffer in - * buffer pool. - * - * If the result is true that will become stale once free buffers are moved out - * by other operations, so the caller who strictly want to use a free buffer - * should not call this. - */ -bool -have_free_buffer(void) -{ - if (StrategyControl->firstFreeBuffer >= 0) - return true; - else - return false; -} - /* * StrategyGetBuffer * @@ -249,69 +224,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r */ pg_atomic_fetch_add_u32(&StrategyControl->numBufferAllocs, 1); - /* - * First check, without acquiring the lock, whether there's buffers in the - * freelist. Since we otherwise don't require the spinlock in every - * StrategyGetBuffer() invocation, it'd be sad to acquire it here - - * uselessly in most cases. That obviously leaves a race where a buffer is - * put on the freelist but we don't see the store yet - but that's pretty - * harmless, it'll just get used during the next buffer acquisition. - * - * If there's buffers on the freelist, acquire the spinlock to pop one - * buffer of the freelist. Then check whether that buffer is usable and - * repeat if not. - * - * Note that the freeNext fields are considered to be protected by the - * buffer_strategy_lock not the individual buffer spinlocks, so it's OK to - * manipulate them without holding the spinlock. - */ - if (StrategyControl->firstFreeBuffer >= 0) - { - while (true) - { - /* Acquire the spinlock to remove element from the freelist */ - SpinLockAcquire(&StrategyControl->buffer_strategy_lock); - - if (StrategyControl->firstFreeBuffer < 0) - { - SpinLockRelease(&StrategyControl->buffer_strategy_lock); - break; - } - - buf = GetBufferDescriptor(StrategyControl->firstFreeBuffer); - Assert(buf->freeNext != FREENEXT_NOT_IN_LIST); - - /* Unconditionally remove buffer from freelist */ - StrategyControl->firstFreeBuffer = buf->freeNext; - buf->freeNext = FREENEXT_NOT_IN_LIST; - - /* - * Release the lock so someone else can access the freelist while - * we check out this buffer. - */ - SpinLockRelease(&StrategyControl->buffer_strategy_lock); - - /* - * If the buffer is pinned or has a nonzero usage_count, we cannot - * use it; discard it and retry. (This can only happen if VACUUM - * put a valid buffer in the freelist and then someone else used - * it before we got to it. It's probably impossible altogether as - * of 8.3, but we'd better check anyway.) - */ - local_buf_state = LockBufHdr(buf); - if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0 - && BUF_STATE_GET_USAGECOUNT(local_buf_state) == 0) - { - if (strategy != NULL) - AddBufferToRing(strategy, buf); - *buf_state = local_buf_state; - return buf; - } - UnlockBufHdr(buf, local_buf_state); - } - } - - /* Nothing on the freelist, so run the "clock sweep" algorithm */ + /* Use the "clock sweep" algorithm to find a free buffer */ trycounter = NBuffers; for (;;) { @@ -356,29 +269,6 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r } } -/* - * StrategyFreeBuffer: put a buffer on the freelist - */ -void -StrategyFreeBuffer(BufferDesc *buf) -{ - SpinLockAcquire(&StrategyControl->buffer_strategy_lock); - - /* - * It is possible that we are told to put something in the freelist that - * is already in it; don't screw up the list if so. - */ - if (buf->freeNext == FREENEXT_NOT_IN_LIST) - { - buf->freeNext = StrategyControl->firstFreeBuffer; - if (buf->freeNext < 0) - StrategyControl->lastFreeBuffer = buf->buf_id; - StrategyControl->firstFreeBuffer = buf->buf_id; - } - - SpinLockRelease(&StrategyControl->buffer_strategy_lock); -} - /* * StrategySyncStart -- tell BgBufferSync where to start syncing * @@ -504,14 +394,7 @@ StrategyInitialize(bool init) SpinLockInit(&StrategyControl->buffer_strategy_lock); - /* - * Grab the whole linked list of free buffers for our strategy. We - * assume it was previously set up by BufferManagerShmemInit(). - */ - StrategyControl->firstFreeBuffer = 0; - StrategyControl->lastFreeBuffer = NBuffers - 1; - - /* Initialize the clock sweep pointer */ + /* Initialize the clock-sweep pointer */ pg_atomic_init_u32(&StrategyControl->nextVictimBuffer, 0); /* Clear statistics */ @@ -759,7 +642,7 @@ GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state) * * If usage_count is 0 or 1 then the buffer is fair game (we expect 1, * since our own previous usage of the ring element would have left it - * there, but it might've been decremented by clock sweep since then). A + * there, but it might've been decremented by clock-sweep since then). A * higher usage_count indicates someone else has touched the buffer, so we * shouldn't re-use it. */ diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 3da9c41ee1d7a..04fef13409b02 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -229,7 +229,7 @@ GetLocalVictimBuffer(void) ResourceOwnerEnlarge(CurrentResourceOwner); /* - * Need to get a new buffer. We use a clock sweep algorithm (essentially + * Need to get a new buffer. We use a clock-sweep algorithm (essentially * the same as what freelist.c does now...) */ trycounter = NLocBuffer; @@ -932,10 +932,11 @@ GetLocalBufferStorage(void) num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ); /* Buffers should be I/O aligned. */ - cur_block = (char *) - TYPEALIGN(PG_IO_ALIGN_SIZE, - MemoryContextAlloc(LocalBufferContext, - num_bufs * BLCKSZ + PG_IO_ALIGN_SIZE)); + cur_block = MemoryContextAllocAligned(LocalBufferContext, + num_bufs * BLCKSZ, + PG_IO_ALIGN_SIZE, + 0); + next_buf_in_block = 0; num_bufs_in_block = num_bufs; } diff --git a/src/backend/storage/file/fileset.c b/src/backend/storage/file/fileset.c index 64141c7cb91c9..4d5ee353fd7a0 100644 --- a/src/backend/storage/file/fileset.c +++ b/src/backend/storage/file/fileset.c @@ -185,7 +185,7 @@ FileSetPath(char *path, FileSet *fileset, Oid tablespace) static Oid ChooseTablespace(const FileSet *fileset, const char *name) { - uint32 hash = hash_any((const unsigned char *) name, strlen(name)); + uint32 hash = hash_bytes((const unsigned char *) name, strlen(name)); return fileset->tablespaces[hash % fileset->ntablespaces]; } diff --git a/src/backend/storage/ipc/dsm_registry.c b/src/backend/storage/ipc/dsm_registry.c index 1682cc6d34c7f..971309251062d 100644 --- a/src/backend/storage/ipc/dsm_registry.c +++ b/src/backend/storage/ipc/dsm_registry.c @@ -48,12 +48,6 @@ #include "utils/builtins.h" #include "utils/memutils.h" -#define DSMR_NAME_LEN 128 - -#define DSMR_DSA_TRANCHE_SUFFIX " DSA" -#define DSMR_DSA_TRANCHE_SUFFIX_LEN (sizeof(DSMR_DSA_TRANCHE_SUFFIX) - 1) -#define DSMR_DSA_TRANCHE_NAME_LEN (DSMR_NAME_LEN + DSMR_DSA_TRANCHE_SUFFIX_LEN) - typedef struct DSMRegistryCtxStruct { dsa_handle dsah; @@ -72,15 +66,13 @@ typedef struct NamedDSAState { dsa_handle handle; int tranche; - char tranche_name[DSMR_DSA_TRANCHE_NAME_LEN]; } NamedDSAState; typedef struct NamedDSHState { - NamedDSAState dsa; - dshash_table_handle handle; + dsa_handle dsa_handle; + dshash_table_handle dsh_handle; int tranche; - char tranche_name[DSMR_NAME_LEN]; } NamedDSHState; typedef enum DSMREntryType @@ -99,7 +91,7 @@ static const char *const DSMREntryTypeNames[] = typedef struct DSMRegistryEntry { - char name[DSMR_NAME_LEN]; + char name[NAMEDATALEN]; DSMREntryType type; union { @@ -307,9 +299,7 @@ GetNamedDSA(const char *name, bool *found) entry->type = DSMR_ENTRY_TYPE_DSA; /* Initialize the LWLock tranche for the DSA. */ - state->tranche = LWLockNewTrancheId(); - strcpy(state->tranche_name, name); - LWLockRegisterTranche(state->tranche, state->tranche_name); + state->tranche = LWLockNewTrancheId(name); /* Initialize the DSA. */ ret = dsa_create(state->tranche); @@ -330,9 +320,6 @@ GetNamedDSA(const char *name, bool *found) ereport(ERROR, (errmsg("requested DSA already attached to current process"))); - /* Initialize existing LWLock tranche for the DSA. */ - LWLockRegisterTranche(state->tranche, state->tranche_name); - /* Attach to existing DSA. */ ret = dsa_attach(state->handle); dsa_pin_mapping(ret); @@ -348,11 +335,10 @@ GetNamedDSA(const char *name, bool *found) * Initialize or attach a named dshash table. * * This routine returns the address of the table. The tranche_id member of - * params is ignored; new tranche IDs will be generated if needed. Note that - * the DSA lock tranche will be registered with the provided name with " DSA" - * appended. The dshash lock tranche will be registered with the provided - * name. Also note that this should be called at most once for a given table - * in each backend. + * params is ignored; a new LWLock tranche ID will be generated if needed. + * Note that the lock tranche will be registered with the provided name. Also + * note that this should be called at most once for a given table in each + * backend. */ dshash_table * GetNamedDSHash(const char *name, const dshash_parameters *params, bool *found) @@ -381,25 +367,17 @@ GetNamedDSHash(const char *name, const dshash_parameters *params, bool *found) entry = dshash_find_or_insert(dsm_registry_table, name, found); if (!(*found)) { - NamedDSAState *dsa_state = &entry->data.dsh.dsa; NamedDSHState *dsh_state = &entry->data.dsh; dshash_parameters params_copy; dsa_area *dsa; entry->type = DSMR_ENTRY_TYPE_DSH; - /* Initialize the LWLock tranche for the DSA. */ - dsa_state->tranche = LWLockNewTrancheId(); - sprintf(dsa_state->tranche_name, "%s%s", name, DSMR_DSA_TRANCHE_SUFFIX); - LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name); - - /* Initialize the LWLock tranche for the dshash table. */ - dsh_state->tranche = LWLockNewTrancheId(); - strcpy(dsh_state->tranche_name, name); - LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name); + /* Initialize the LWLock tranche for the hash table. */ + dsh_state->tranche = LWLockNewTrancheId(name); /* Initialize the DSA for the hash table. */ - dsa = dsa_create(dsa_state->tranche); + dsa = dsa_create(dsh_state->tranche); dsa_pin(dsa); dsa_pin_mapping(dsa); @@ -409,34 +387,29 @@ GetNamedDSHash(const char *name, const dshash_parameters *params, bool *found) ret = dshash_create(dsa, ¶ms_copy, NULL); /* Store handles for other backends to use. */ - dsa_state->handle = dsa_get_handle(dsa); - dsh_state->handle = dshash_get_hash_table_handle(ret); + dsh_state->dsa_handle = dsa_get_handle(dsa); + dsh_state->dsh_handle = dshash_get_hash_table_handle(ret); } else if (entry->type != DSMR_ENTRY_TYPE_DSH) ereport(ERROR, (errmsg("requested DSHash does not match type of existing entry"))); else { - NamedDSAState *dsa_state = &entry->data.dsh.dsa; NamedDSHState *dsh_state = &entry->data.dsh; dsa_area *dsa; /* XXX: Should we verify params matches what table was created with? */ - if (dsa_is_attached(dsa_state->handle)) + if (dsa_is_attached(dsh_state->dsa_handle)) ereport(ERROR, (errmsg("requested DSHash already attached to current process"))); - /* Initialize existing LWLock tranches for the DSA and dshash table. */ - LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name); - LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name); - /* Attach to existing DSA for the hash table. */ - dsa = dsa_attach(dsa_state->handle); + dsa = dsa_attach(dsh_state->dsa_handle); dsa_pin_mapping(dsa); /* Attach to existing dshash table. */ - ret = dshash_attach(dsa, params, dsh_state->handle, NULL); + ret = dshash_attach(dsa, params, dsh_state->dsh_handle, NULL); } dshash_release_lock(dsm_registry_table, entry); diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c index 567739b5be93a..2704e80b3a7d9 100644 --- a/src/backend/storage/ipc/ipc.c +++ b/src/backend/storage/ipc/ipc.c @@ -399,7 +399,7 @@ cancel_before_shmem_exit(pg_on_exit_callback function, Datum arg) before_shmem_exit_list[before_shmem_exit_index - 1].arg == arg) --before_shmem_exit_index; else - elog(ERROR, "before_shmem_exit callback (%p,0x%" PRIxPTR ") is not the latest entry", + elog(ERROR, "before_shmem_exit callback (%p,0x%" PRIx64 ") is not the latest entry", function, arg); } diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index bf987aed8d327..200f72c6e2565 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -54,7 +54,6 @@ #include "access/xlogutils.h" #include "catalog/catalog.h" #include "catalog/pg_authid.h" -#include "commands/dbcommands.h" #include "miscadmin.h" #include "pgstat.h" #include "port/pg_lfind.h" @@ -62,6 +61,7 @@ #include "storage/procarray.h" #include "utils/acl.h" #include "utils/builtins.h" +#include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/snapmgr.h" diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c index a9bb540b55ac2..087821311cceb 100644 --- a/src/backend/storage/ipc/procsignal.c +++ b/src/backend/storage/ipc/procsignal.c @@ -728,7 +728,11 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) void SendCancelRequest(int backendPID, const uint8 *cancel_key, int cancel_key_len) { - Assert(backendPID != 0); + if (backendPID == 0) + { + ereport(LOG, (errmsg("invalid cancel request with PID 0"))); + return; + } /* * See if we have a matching backend. Reading the pss_pid and diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index ca3656fc76f43..a0770e867968a 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -330,8 +330,8 @@ InitShmemIndex(void) */ HTAB * ShmemInitHash(const char *name, /* table string name for shmem index */ - long init_size, /* initial table size */ - long max_size, /* max size of the table */ + int64 init_size, /* initial table size */ + int64 max_size, /* max size of the table */ HASHCTL *infoP, /* info about key and bucket size */ int hash_flags) /* info about infoP */ { @@ -714,7 +714,7 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS) for (i = 0; i <= max_nodes; i++) { values[0] = CStringGetTextDatum(ent->key); - values[1] = i; + values[1] = Int32GetDatum(i); values[2] = Int64GetDatum(nodes[i] * os_page_size); tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c index 68b76f2cc18a0..a874000c8ca26 100644 --- a/src/backend/storage/large_object/inv_api.c +++ b/src/backend/storage/large_object/inv_api.c @@ -561,7 +561,7 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes) char data[LOBLKSIZE + VARHDRSZ]; /* ensure union is aligned well enough: */ int32 align_it; - } workbuf; + } workbuf = {0}; char *workb = VARDATA(&workbuf.hdr); HeapTuple newtup; Datum values[Natts_pg_largeobject]; @@ -752,7 +752,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int64 len) char data[LOBLKSIZE + VARHDRSZ]; /* ensure union is aligned well enough: */ int32 align_it; - } workbuf; + } workbuf = {0}; char *workb = VARDATA(&workbuf.hdr); HeapTuple newtup; Datum values[Natts_pg_largeobject]; diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index 3f6bf70bd3c24..4798eb7900379 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -55,7 +55,7 @@ typedef struct XactLockTableWaitInfo { XLTW_Oper oper; Relation rel; - ItemPointer ctid; + const ItemPointerData *ctid; } XactLockTableWaitInfo; static void XactLockTableWaitErrorCb(void *arg); @@ -559,7 +559,7 @@ UnlockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode) * tuple. See heap_lock_tuple before using this! */ void -LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode) +LockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode) { LOCKTAG tag; @@ -579,7 +579,7 @@ LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode) * Returns true iff the lock was acquired. */ bool -ConditionalLockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode, +ConditionalLockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode, bool logLockFailure) { LOCKTAG tag; @@ -598,7 +598,7 @@ ConditionalLockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode, * UnlockTuple */ void -UnlockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode) +UnlockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode) { LOCKTAG tag; @@ -660,7 +660,7 @@ XactLockTableDelete(TransactionId xid) * and if so wait for its parent. */ void -XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, +XactLockTableWait(TransactionId xid, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper) { LOCKTAG tag; diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 62f3471448ebc..4cc7f645c3171 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -415,6 +415,7 @@ static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner); static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode); static void FinishStrongLockAcquire(void); static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner); +static void waitonlock_error_callback(void *arg); static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock); static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent); static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, @@ -443,7 +444,7 @@ void LockManagerShmemInit(void) { HASHCTL info; - long init_table_size, + int64 init_table_size, max_table_size; bool found; @@ -589,7 +590,7 @@ proclock_hash(const void *key, Size keysize) * intermediate variable to suppress cast-pointer-to-int warnings. */ procptr = PointerGetDatum(proclocktag->myProc); - lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS; + lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS; return lockhash; } @@ -610,7 +611,7 @@ ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode) * This must match proclock_hash()! */ procptr = PointerGetDatum(proclocktag->myProc); - lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS; + lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS; return lockhash; } @@ -1931,6 +1932,7 @@ static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner) { ProcWaitStatus result; + ErrorContextCallback waiterrcontext; TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1, locallock->tag.lock.locktag_field2, @@ -1939,6 +1941,12 @@ WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner) locallock->tag.lock.locktag_type, locallock->tag.mode); + /* Setup error traceback support for ereport() */ + waiterrcontext.callback = waitonlock_error_callback; + waiterrcontext.arg = (void *) locallock; + waiterrcontext.previous = error_context_stack; + error_context_stack = &waiterrcontext; + /* adjust the process title to indicate that it's waiting */ set_ps_display_suffix("waiting"); @@ -1990,6 +1998,8 @@ WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner) /* reset ps display to remove the suffix */ set_ps_display_remove_suffix(); + error_context_stack = waiterrcontext.previous; + TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1, locallock->tag.lock.locktag_field2, locallock->tag.lock.locktag_field3, @@ -2000,6 +2010,28 @@ WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner) return result; } +/* + * error context callback for failures in WaitOnLock + * + * We report which lock was being waited on, in the same style used in + * deadlock reports. This helps with lock timeout errors in particular. + */ +static void +waitonlock_error_callback(void *arg) +{ + LOCALLOCK *locallock = (LOCALLOCK *) arg; + const LOCKTAG *tag = &locallock->tag.lock; + LOCKMODE mode = locallock->tag.mode; + StringInfoData locktagbuf; + + initStringInfo(&locktagbuf); + DescribeLockTag(&locktagbuf, tag); + + errcontext("waiting for %s on %s", + GetLockmodeName(tag->locktag_lockmethodid, mode), + locktagbuf.data); +} + /* * Remove a proc from the wait-queue it is on (caller must know it is on one). * This is only used when the proc has failed to get the lock, so we set its diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index ec9c345ffdfb8..fcbac5213a5c0 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -126,8 +126,8 @@ StaticAssertDecl((LW_VAL_EXCLUSIVE & LW_FLAG_MASK) == 0, * in lwlocklist.h. We absorb the names of these tranches, too. * * 3. Extensions can create new tranches, via either RequestNamedLWLockTranche - * or LWLockRegisterTranche. The names of these that are known in the current - * process appear in LWLockTrancheNames[]. + * or LWLockNewTrancheId. These names are stored in shared memory and can be + * accessed via LWLockTrancheNames. * * All these names are user-visible as wait event names, so choose with care * ... and do not forget to update the documentation's list of wait events. @@ -146,11 +146,12 @@ StaticAssertDecl(lengthof(BuiltinTrancheNames) == /* * This is indexed by tranche ID minus LWTRANCHE_FIRST_USER_DEFINED, and - * stores the names of all dynamically-created tranches known to the current - * process. Any unused entries in the array will contain NULL. + * points to the shared memory locations of the names of all + * dynamically-created tranches. Backends inherit the pointer by fork from the + * postmaster (except in the EXEC_BACKEND case, where we have special measures + * to pass it down). */ -static const char **LWLockTrancheNames = NULL; -static int LWLockTrancheNamesAllocated = 0; +char **LWLockTrancheNames = NULL; /* * This points to the main array of LWLocks in shared memory. Backends inherit @@ -162,8 +163,7 @@ LWLockPadded *MainLWLockArray = NULL; /* * We use this structure to keep track of locked LWLocks for release * during error recovery. Normally, only a few will be held at once, but - * occasionally the number can be much higher; for example, the pg_buffercache - * extension locks all buffer partitions simultaneously. + * occasionally the number can be much higher. */ #define MAX_SIMUL_LWLOCKS 200 @@ -185,18 +185,21 @@ typedef struct NamedLWLockTrancheRequest } NamedLWLockTrancheRequest; static NamedLWLockTrancheRequest *NamedLWLockTrancheRequestArray = NULL; -static int NamedLWLockTrancheRequestsAllocated = 0; /* - * NamedLWLockTrancheRequests is both the valid length of the request array, - * and the length of the shared-memory NamedLWLockTrancheArray later on. - * This variable and NamedLWLockTrancheArray are non-static so that - * postmaster.c can copy them to child processes in EXEC_BACKEND builds. + * NamedLWLockTrancheRequests is the valid length of the request array. This + * variable is non-static so that postmaster.c can copy them to child processes + * in EXEC_BACKEND builds. */ int NamedLWLockTrancheRequests = 0; -/* points to data in shared memory: */ -NamedLWLockTranche *NamedLWLockTrancheArray = NULL; +/* shared memory counter of registered tranches */ +int *LWLockCounter = NULL; + +/* backend-local counter of registered tranches */ +static int LocalLWLockCounter; + +#define MAX_NAMED_TRANCHES 256 static void InitializeLWLocks(void); static inline void LWLockReportWaitStart(LWLock *lock); @@ -392,31 +395,28 @@ Size LWLockShmemSize(void) { Size size; - int i; int numLocks = NUM_FIXED_LWLOCKS; /* Calculate total number of locks needed in the main array. */ numLocks += NumLWLocksForNamedTranches(); - /* Space for the LWLock array. */ - size = mul_size(numLocks, sizeof(LWLockPadded)); + /* Space for dynamic allocation counter. */ + size = MAXALIGN(sizeof(int)); - /* Space for dynamic allocation counter, plus room for alignment. */ - size = add_size(size, sizeof(int) + LWLOCK_PADDED_SIZE); + /* Space for named tranches. */ + size = add_size(size, mul_size(MAX_NAMED_TRANCHES, sizeof(char *))); + size = add_size(size, mul_size(MAX_NAMED_TRANCHES, NAMEDATALEN)); - /* space for named tranches. */ - size = add_size(size, mul_size(NamedLWLockTrancheRequests, sizeof(NamedLWLockTranche))); - - /* space for name of each tranche. */ - for (i = 0; i < NamedLWLockTrancheRequests; i++) - size = add_size(size, strlen(NamedLWLockTrancheRequestArray[i].tranche_name) + 1); + /* Space for the LWLock array, plus room for cache line alignment. */ + size = add_size(size, LWLOCK_PADDED_SIZE); + size = add_size(size, mul_size(numLocks, sizeof(LWLockPadded))); return size; } /* * Allocate shmem space for the main LWLock array and all tranches and - * initialize it. We also register extension LWLock tranches here. + * initialize it. */ void CreateLWLocks(void) @@ -424,35 +424,32 @@ CreateLWLocks(void) if (!IsUnderPostmaster) { Size spaceLocks = LWLockShmemSize(); - int *LWLockCounter; char *ptr; /* Allocate space */ ptr = (char *) ShmemAlloc(spaceLocks); - /* Leave room for dynamic allocation of tranches */ - ptr += sizeof(int); + /* Initialize the dynamic-allocation counter for tranches */ + LWLockCounter = (int *) ptr; + *LWLockCounter = LWTRANCHE_FIRST_USER_DEFINED; + ptr += MAXALIGN(sizeof(int)); + + /* Initialize tranche names */ + LWLockTrancheNames = (char **) ptr; + ptr += MAX_NAMED_TRANCHES * sizeof(char *); + for (int i = 0; i < MAX_NAMED_TRANCHES; i++) + { + LWLockTrancheNames[i] = ptr; + ptr += NAMEDATALEN; + } /* Ensure desired alignment of LWLock array */ ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE; - MainLWLockArray = (LWLockPadded *) ptr; - /* - * Initialize the dynamic-allocation counter for tranches, which is - * stored just before the first LWLock. - */ - LWLockCounter = (int *) ((char *) MainLWLockArray - sizeof(int)); - *LWLockCounter = LWTRANCHE_FIRST_USER_DEFINED; - /* Initialize all LWLocks */ InitializeLWLocks(); } - - /* Register named extension LWLock tranches in the current process. */ - for (int i = 0; i < NamedLWLockTrancheRequests; i++) - LWLockRegisterTranche(NamedLWLockTrancheArray[i].trancheId, - NamedLWLockTrancheArray[i].trancheName); } /* @@ -461,7 +458,6 @@ CreateLWLocks(void) static void InitializeLWLocks(void) { - int numNamedLocks = NumLWLocksForNamedTranches(); int id; int i; int j; @@ -492,32 +488,18 @@ InitializeLWLocks(void) */ if (NamedLWLockTrancheRequests > 0) { - char *trancheNames; - - NamedLWLockTrancheArray = (NamedLWLockTranche *) - &MainLWLockArray[NUM_FIXED_LWLOCKS + numNamedLocks]; - - trancheNames = (char *) NamedLWLockTrancheArray + - (NamedLWLockTrancheRequests * sizeof(NamedLWLockTranche)); lock = &MainLWLockArray[NUM_FIXED_LWLOCKS]; for (i = 0; i < NamedLWLockTrancheRequests; i++) { NamedLWLockTrancheRequest *request; - NamedLWLockTranche *tranche; - char *name; + int tranche; request = &NamedLWLockTrancheRequestArray[i]; - tranche = &NamedLWLockTrancheArray[i]; - - name = trancheNames; - trancheNames += strlen(request->tranche_name) + 1; - strcpy(name, request->tranche_name); - tranche->trancheId = LWLockNewTrancheId(); - tranche->trancheName = name; + tranche = LWLockNewTrancheId(request->tranche_name); for (j = 0; j < request->num_lwlocks; j++, lock++) - LWLockInitialize(&lock->lock, tranche->trancheId); + LWLockInitialize(&lock->lock, tranche); } } } @@ -569,61 +551,47 @@ GetNamedLWLockTranche(const char *tranche_name) } /* - * Allocate a new tranche ID. + * Allocate a new tranche ID with the provided name. */ int -LWLockNewTrancheId(void) +LWLockNewTrancheId(const char *name) { int result; - int *LWLockCounter; - LWLockCounter = (int *) ((char *) MainLWLockArray - sizeof(int)); - /* We use the ShmemLock spinlock to protect LWLockCounter */ - SpinLockAcquire(ShmemLock); - result = (*LWLockCounter)++; - SpinLockRelease(ShmemLock); + if (!name) + ereport(ERROR, + (errcode(ERRCODE_INVALID_NAME), + errmsg("tranche name cannot be NULL"))); - return result; -} + if (strlen(name) >= NAMEDATALEN) + ereport(ERROR, + (errcode(ERRCODE_NAME_TOO_LONG), + errmsg("tranche name too long"), + errdetail("LWLock tranche names must be no longer than %d bytes.", + NAMEDATALEN - 1))); -/* - * Register a dynamic tranche name in the lookup table of the current process. - * - * This routine will save a pointer to the tranche name passed as an argument, - * so the name should be allocated in a backend-lifetime context - * (shared memory, TopMemoryContext, static constant, or similar). - * - * The tranche name will be user-visible as a wait event name, so try to - * use a name that fits the style for those. - */ -void -LWLockRegisterTranche(int tranche_id, const char *tranche_name) -{ - /* This should only be called for user-defined tranches. */ - if (tranche_id < LWTRANCHE_FIRST_USER_DEFINED) - return; - - /* Convert to array index. */ - tranche_id -= LWTRANCHE_FIRST_USER_DEFINED; + /* + * We use the ShmemLock spinlock to protect LWLockCounter and + * LWLockTrancheNames. + */ + SpinLockAcquire(ShmemLock); - /* If necessary, create or enlarge array. */ - if (tranche_id >= LWLockTrancheNamesAllocated) + if (*LWLockCounter - LWTRANCHE_FIRST_USER_DEFINED >= MAX_NAMED_TRANCHES) { - int newalloc; + SpinLockRelease(ShmemLock); + ereport(ERROR, + (errmsg("maximum number of tranches already registered"), + errdetail("No more than %d tranches may be registered.", + MAX_NAMED_TRANCHES))); + } - newalloc = pg_nextpower2_32(Max(8, tranche_id + 1)); + result = (*LWLockCounter)++; + LocalLWLockCounter = *LWLockCounter; + strlcpy(LWLockTrancheNames[result - LWTRANCHE_FIRST_USER_DEFINED], name, NAMEDATALEN); - if (LWLockTrancheNames == NULL) - LWLockTrancheNames = (const char **) - MemoryContextAllocZero(TopMemoryContext, - newalloc * sizeof(char *)); - else - LWLockTrancheNames = - repalloc0_array(LWLockTrancheNames, const char *, LWLockTrancheNamesAllocated, newalloc); - LWLockTrancheNamesAllocated = newalloc; - } + SpinLockRelease(ShmemLock); - LWLockTrancheNames[tranche_id] = tranche_name; + return result; } /* @@ -642,10 +610,23 @@ void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks) { NamedLWLockTrancheRequest *request; + static int NamedLWLockTrancheRequestsAllocated; if (!process_shmem_requests_in_progress) elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook"); + if (!tranche_name) + ereport(ERROR, + (errcode(ERRCODE_INVALID_NAME), + errmsg("tranche name cannot be NULL"))); + + if (strlen(tranche_name) >= NAMEDATALEN) + ereport(ERROR, + (errcode(ERRCODE_NAME_TOO_LONG), + errmsg("tranche name too long"), + errdetail("LWLock tranche names must be no longer than %d bytes.", + NAMEDATALEN - 1))); + if (NamedLWLockTrancheRequestArray == NULL) { NamedLWLockTrancheRequestsAllocated = 16; @@ -666,7 +647,6 @@ RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks) } request = &NamedLWLockTrancheRequestArray[NamedLWLockTrancheRequests]; - Assert(strlen(tranche_name) + 1 <= NAMEDATALEN); strlcpy(request->tranche_name, tranche_name, NAMEDATALEN); request->num_lwlocks = num_lwlocks; NamedLWLockTrancheRequests++; @@ -678,6 +658,9 @@ RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks) void LWLockInitialize(LWLock *lock, int tranche_id) { + /* verify the tranche_id is valid */ + (void) GetLWTrancheName(tranche_id); + pg_atomic_init_u32(&lock->state, LW_FLAG_RELEASE_OK); #ifdef LOCK_DEBUG pg_atomic_init_u32(&lock->nwaiters, 0); @@ -719,15 +702,27 @@ GetLWTrancheName(uint16 trancheId) return BuiltinTrancheNames[trancheId]; /* - * It's an extension tranche, so look in LWLockTrancheNames[]. However, - * it's possible that the tranche has never been registered in the current - * process, in which case give up and return "extension". + * We only ever add new entries to LWLockTrancheNames, so most lookups can + * avoid taking the spinlock as long as the backend-local counter + * (LocalLWLockCounter) is greater than the requested tranche ID. Else, + * we need to first update the backend-local counter with ShmemLock held + * before attempting the lookup again. In practice, the latter case is + * probably rare. */ - trancheId -= LWTRANCHE_FIRST_USER_DEFINED; + if (trancheId >= LocalLWLockCounter) + { + SpinLockAcquire(ShmemLock); + LocalLWLockCounter = *LWLockCounter; + SpinLockRelease(ShmemLock); + + if (trancheId >= LocalLWLockCounter) + elog(ERROR, "tranche %d is not registered", trancheId); + } - if (trancheId >= LWLockTrancheNamesAllocated || - LWLockTrancheNames[trancheId] == NULL) - return "extension"; + /* + * It's an extension tranche, so look in LWLockTrancheNames. + */ + trancheId -= LWTRANCHE_FIRST_USER_DEFINED; return LWLockTrancheNames[trancheId]; } diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index c07fb58835557..c1d8511ad17a9 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -1145,7 +1145,7 @@ void PredicateLockShmemInit(void) { HASHCTL info; - long max_table_size; + int64 max_table_size; Size requestSize; bool found; diff --git a/src/backend/storage/page/meson.build b/src/backend/storage/page/meson.build index c3e4a805862a9..112f00ff36552 100644 --- a/src/backend/storage/page/meson.build +++ b/src/backend/storage/page/meson.build @@ -1,7 +1,15 @@ # Copyright (c) 2022-2025, PostgreSQL Global Development Group +checksum_backend_lib = static_library('checksum_backend_lib', + 'checksum.c', + dependencies: backend_build_deps, + kwargs: internal_lib_args, + c_args: vectorize_cflags + unroll_loops_cflags, +) + +backend_link_with += checksum_backend_lib + backend_sources += files( 'bufpage.c', - 'checksum.c', 'itemptr.c', ) diff --git a/src/backend/tcop/backend_startup.c b/src/backend/tcop/backend_startup.c index ad0af5edc1f21..14d5fc0b1965a 100644 --- a/src/backend/tcop/backend_startup.c +++ b/src/backend/tcop/backend_startup.c @@ -492,7 +492,7 @@ static int ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) { int32 len; - char *buf; + char *buf = NULL; ProtocolVersion proto; MemoryContext oldcontext; @@ -516,7 +516,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) * scanners, which may be less benign, but it's not really our job to * notice those.) */ - return STATUS_ERROR; + goto fail; } if (pq_getbytes(((char *) &len) + 1, 3) == EOF) @@ -526,7 +526,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete startup packet"))); - return STATUS_ERROR; + goto fail; } len = pg_ntoh32(len); @@ -538,7 +538,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid length of startup packet"))); - return STATUS_ERROR; + goto fail; } /* @@ -554,7 +554,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete startup packet"))); - return STATUS_ERROR; + goto fail; } pq_endmsgread(); @@ -568,7 +568,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) { ProcessCancelRequestPacket(port, buf, len); /* Not really an error, but we don't want to proceed further */ - return STATUS_ERROR; + goto fail; } if (proto == NEGOTIATE_SSL_CODE && !ssl_done) @@ -607,14 +607,16 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode_for_socket_access(), errmsg("failed to send SSL negotiation response: %m"))); - return STATUS_ERROR; /* close the connection */ + goto fail; /* close the connection */ } #ifdef USE_SSL if (SSLok == 'S' && secure_open_server(port) == -1) - return STATUS_ERROR; + goto fail; #endif + pfree(buf); + /* * At this point we should have no data already buffered. If we do, * it was received before we performed the SSL handshake, so it wasn't @@ -661,14 +663,16 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode_for_socket_access(), errmsg("failed to send GSSAPI negotiation response: %m"))); - return STATUS_ERROR; /* close the connection */ + goto fail; /* close the connection */ } #ifdef ENABLE_GSS if (GSSok == 'G' && secure_open_gssapi(port) == -1) - return STATUS_ERROR; + goto fail; #endif + pfree(buf); + /* * At this point we should have no data already buffered. If we do, * it was received before we performed the GSS handshake, so it wasn't @@ -863,7 +867,16 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) */ MemoryContextSwitchTo(oldcontext); + pfree(buf); + return STATUS_OK; + +fail: + /* be tidy, just to avoid Valgrind complaints */ + if (buf) + pfree(buf); + + return STATUS_ERROR; } /* diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index a297606cdd7fa..d356830f756be 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -649,6 +649,10 @@ pg_parse_query(const char *query_string) TRACE_POSTGRESQL_QUERY_PARSE_DONE(query_string); + if (Debug_print_raw_parse) + elog_node_display(LOG, "raw parse tree", raw_parsetree_list, + Debug_pretty_print); + return raw_parsetree_list; } @@ -988,7 +992,7 @@ pg_plan_queries(List *querytrees, const char *query_string, int cursorOptions, stmt->stmt_location = query->stmt_location; stmt->stmt_len = query->stmt_len; stmt->queryId = query->queryId; - stmt->cached_plan_type = PLAN_CACHE_NONE; + stmt->planOrigin = PLAN_STMT_INTERNAL; } else { @@ -3697,7 +3701,10 @@ set_debug_options(int debug_flag, GucContext context, GucSource source) if (debug_flag >= 2) SetConfigOption("log_statement", "all", context, source); if (debug_flag >= 3) + { + SetConfigOption("debug_print_raw_parse", "true", context, source); SetConfigOption("debug_print_parse", "true", context, source); + } if (debug_flag >= 4) SetConfigOption("debug_print_plan", "true", context, source); if (debug_flag >= 5) diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index babc34d0cbe1d..5f442bc3bd4e1 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1234,7 +1234,7 @@ ProcessUtilitySlow(ParseState *pstate, wrapper->utilityStmt = stmt; wrapper->stmt_location = pstmt->stmt_location; wrapper->stmt_len = pstmt->stmt_len; - wrapper->cached_plan_type = PLAN_CACHE_NONE; + wrapper->planOrigin = PLAN_STMT_INTERNAL; ProcessUtility(wrapper, queryString, @@ -1874,7 +1874,8 @@ ProcessUtilitySlow(ParseState *pstate, if (!IsA(rel, RangeVar)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("only a single relation is allowed in CREATE STATISTICS"))); + errmsg("cannot create statistics on the specified relation"), + errdetail("CREATE STATISTICS only supports tables, foreign tables and materialized views."))); /* * CREATE STATISTICS will influence future execution plans @@ -1965,7 +1966,7 @@ ProcessUtilityForAlterTable(Node *stmt, AlterTableUtilityContext *context) wrapper->utilityStmt = stmt; wrapper->stmt_location = context->pstmt->stmt_location; wrapper->stmt_len = context->pstmt->stmt_len; - wrapper->cached_plan_type = PLAN_CACHE_NONE; + wrapper->planOrigin = PLAN_STMT_INTERNAL; ProcessUtility(wrapper, context->queryString, diff --git a/src/backend/tsearch/dict_ispell.c b/src/backend/tsearch/dict_ispell.c index 63bd193a78a89..debfbf956cc1f 100644 --- a/src/backend/tsearch/dict_ispell.c +++ b/src/backend/tsearch/dict_ispell.c @@ -47,24 +47,30 @@ dispell_init(PG_FUNCTION_ARGS) if (strcmp(defel->defname, "dictfile") == 0) { + char *filename; + if (dictloaded) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("multiple DictFile parameters"))); - NIImportDictionary(&(d->obj), - get_tsearch_config_filename(defGetString(defel), - "dict")); + filename = get_tsearch_config_filename(defGetString(defel), + "dict"); + NIImportDictionary(&(d->obj), filename); + pfree(filename); dictloaded = true; } else if (strcmp(defel->defname, "afffile") == 0) { + char *filename; + if (affloaded) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("multiple AffFile parameters"))); - NIImportAffixes(&(d->obj), - get_tsearch_config_filename(defGetString(defel), - "affix")); + filename = get_tsearch_config_filename(defGetString(defel), + "affix"); + NIImportAffixes(&(d->obj), filename); + pfree(filename); affloaded = true; } else if (strcmp(defel->defname, "stopwords") == 0) diff --git a/src/backend/tsearch/dict_synonym.c b/src/backend/tsearch/dict_synonym.c index 0da5a9d686802..c2773eb01adee 100644 --- a/src/backend/tsearch/dict_synonym.c +++ b/src/backend/tsearch/dict_synonym.c @@ -199,6 +199,7 @@ dsynonym_init(PG_FUNCTION_ARGS) } tsearch_readline_end(&trst); + pfree(filename); d->len = cur; qsort(d->syn, d->len, sizeof(Syn), compareSyn); diff --git a/src/backend/tsearch/dict_thesaurus.c b/src/backend/tsearch/dict_thesaurus.c index 1bebe36a6910e..1e6bbde1ca7d8 100644 --- a/src/backend/tsearch/dict_thesaurus.c +++ b/src/backend/tsearch/dict_thesaurus.c @@ -167,17 +167,17 @@ addWrd(DictThesaurus *d, char *b, char *e, uint32 idsubst, uint16 nwrd, uint16 p static void thesaurusRead(const char *filename, DictThesaurus *d) { + char *real_filename = get_tsearch_config_filename(filename, "ths"); tsearch_readline_state trst; uint32 idsubst = 0; bool useasis = false; char *line; - filename = get_tsearch_config_filename(filename, "ths"); - if (!tsearch_readline_begin(&trst, filename)) + if (!tsearch_readline_begin(&trst, real_filename)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not open thesaurus file \"%s\": %m", - filename))); + real_filename))); while ((line = tsearch_readline(&trst)) != NULL) { @@ -297,6 +297,7 @@ thesaurusRead(const char *filename, DictThesaurus *d) d->nsubst = idsubst; tsearch_readline_end(&trst); + pfree(real_filename); } static TheLexeme * diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c index e5da6cf17ec19..cba421892bf45 100644 --- a/src/backend/tsearch/ts_parse.c +++ b/src/backend/tsearch/ts_parse.c @@ -218,7 +218,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem) * position and go to multiword mode */ - ld->curDictId = DatumGetObjectId(map->dictIds[i]); + ld->curDictId = map->dictIds[i]; ld->posDict = i + 1; ld->curSub = curVal->next; if (res) @@ -275,7 +275,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem) * dictionaries ? */ for (i = 0; i < map->len && !dictExists; i++) - if (ld->curDictId == DatumGetObjectId(map->dictIds[i])) + if (ld->curDictId == map->dictIds[i]) dictExists = true; if (!dictExists) diff --git a/src/backend/tsearch/ts_selfuncs.c b/src/backend/tsearch/ts_selfuncs.c index 0c1d2bc1109da..453a5e5c2ea06 100644 --- a/src/backend/tsearch/ts_selfuncs.c +++ b/src/backend/tsearch/ts_selfuncs.c @@ -233,7 +233,7 @@ mcelem_tsquery_selec(TSQuery query, Datum *mcelem, int nmcelem, * The text Datums came from an array, so it cannot be compressed or * stored out-of-line -- it's safe to use VARSIZE_ANY*. */ - Assert(!VARATT_IS_COMPRESSED(mcelem[i]) && !VARATT_IS_EXTERNAL(mcelem[i])); + Assert(!VARATT_IS_COMPRESSED(DatumGetPointer(mcelem[i])) && !VARATT_IS_EXTERNAL(DatumGetPointer(mcelem[i]))); lookup[i].element = (text *) DatumGetPointer(mcelem[i]); lookup[i].frequency = numbers[i]; } diff --git a/src/backend/utils/.gitignore b/src/backend/utils/.gitignore index 068555695946f..303c01d051512 100644 --- a/src/backend/utils/.gitignore +++ b/src/backend/utils/.gitignore @@ -2,5 +2,6 @@ /fmgroids.h /fmgrprotos.h /fmgr-stamp +/guc_tables.inc.c /probes.h /errcodes.h diff --git a/src/backend/utils/Makefile b/src/backend/utils/Makefile index 140fbba5c222a..985ef52e7e318 100644 --- a/src/backend/utils/Makefile +++ b/src/backend/utils/Makefile @@ -43,7 +43,7 @@ generated-header-symlinks: $(top_builddir)/src/include/utils/header-stamp submak submake-adt-headers: $(MAKE) -C adt jsonpath_gram.h -$(SUBDIRS:%=%-recursive): fmgr-stamp errcodes.h +$(SUBDIRS:%=%-recursive): fmgr-stamp errcodes.h guc_tables.inc.c # fmgr-stamp records the last time we ran Gen_fmgrtab.pl. We don't rely on # the timestamps of the individual output files, because the Perl script @@ -55,6 +55,9 @@ fmgr-stamp: Gen_fmgrtab.pl $(catalogdir)/Catalog.pm $(top_srcdir)/src/include/ca errcodes.h: $(top_srcdir)/src/backend/utils/errcodes.txt generate-errcodes.pl $(PERL) $(srcdir)/generate-errcodes.pl --outfile $@ $< +guc_tables.inc.c: $(top_srcdir)/src/backend/utils/misc/guc_parameters.dat $(top_srcdir)/src/backend/utils/misc/gen_guc_tables.pl + $(PERL) $(top_srcdir)/src/backend/utils/misc/gen_guc_tables.pl $< $@ + ifeq ($(enable_dtrace), yes) probes.h: postprocess_dtrace.sed probes.h.tmp sed -f $^ >$@ @@ -70,8 +73,8 @@ endif # These generated headers must be symlinked into src/include/. # We use header-stamp to record that we've done this because the symlinks # themselves may appear older than fmgr-stamp. -$(top_builddir)/src/include/utils/header-stamp: fmgr-stamp errcodes.h probes.h - cd '$(dir $@)' && for file in fmgroids.h fmgrprotos.h errcodes.h probes.h; do \ +$(top_builddir)/src/include/utils/header-stamp: fmgr-stamp errcodes.h probes.h guc_tables.inc.c + cd '$(dir $@)' && for file in fmgroids.h fmgrprotos.h errcodes.h probes.h guc_tables.inc.c; do \ rm -f $$file && $(LN_S) "../../../$(subdir)/$$file" . ; \ done touch $@ @@ -89,4 +92,4 @@ uninstall-data: clean: rm -f probes.h probes.h.tmp - rm -f fmgroids.h fmgrprotos.h fmgrtab.c fmgr-stamp errcodes.h + rm -f fmgroids.h fmgrprotos.h fmgrtab.c fmgr-stamp errcodes.h guc_tables.inc.c diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c index 6bc91ce0dadda..f8e91484e36be 100644 --- a/src/backend/utils/activity/pgstat.c +++ b/src/backend/utils/activity/pgstat.c @@ -821,7 +821,7 @@ pgstat_force_next_flush(void) static bool match_db_entries(PgStatShared_HashEntry *entry, Datum match_data) { - return entry->key.dboid == DatumGetObjectId(MyDatabaseId); + return entry->key.dboid == MyDatabaseId; } /* @@ -1975,6 +1975,17 @@ pgstat_read_statsfile(void) header = pgstat_init_entry(key.kind, p); dshash_release_lock(pgStatLocal.shared_hash, p); + if (header == NULL) + { + /* + * It would be tempting to switch this ERROR to a + * WARNING, but it would mean that all the statistics + * are discarded when the environment fails on OOM. + */ + elog(ERROR, "could not allocate entry %u/%u/%" PRIu64 " of type %c", + key.kind, key.dboid, + key.objid, t); + } if (!read_chunk(fpin, pgstat_get_entry_data(key.kind, header), diff --git a/src/backend/utils/activity/pgstat_backend.c b/src/backend/utils/activity/pgstat_backend.c index 8714a85e2d936..07a1116671b18 100644 --- a/src/backend/utils/activity/pgstat_backend.c +++ b/src/backend/utils/activity/pgstat_backend.c @@ -41,9 +41,9 @@ static bool backend_has_iostats = false; /* * WAL usage counters saved from pgWalUsage at the previous call to - * pgstat_report_wal(). This is used to calculate how much WAL usage - * happens between pgstat_report_wal() calls, by subtracting the previous - * counters from the current ones. + * pgstat_flush_backend(). This is used to calculate how much WAL usage + * happens between pgstat_flush_backend() calls, by subtracting the + * previous counters from the current ones. */ static WalUsage prevBackendWalUsage; diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c index 53e7d534270ac..9dc3212f7dd01 100644 --- a/src/backend/utils/activity/pgstat_shmem.c +++ b/src/backend/utils/activity/pgstat_shmem.c @@ -180,7 +180,6 @@ StatsShmemInit(void) * provides a small efficiency win. */ ctl->raw_dsa_area = p; - p += MAXALIGN(pgstat_dsa_init_size()); dsa = dsa_create_in_place(ctl->raw_dsa_area, pgstat_dsa_init_size(), LWTRANCHE_PGSTATS_DSA, NULL); @@ -290,6 +289,13 @@ pgstat_detach_shmem(void) * ------------------------------------------------------------ */ +/* + * Initialize entry newly-created. + * + * Returns NULL in the event of an allocation failure, so as callers can + * take cleanup actions as the entry initialized is already inserted in the + * shared hashtable. + */ PgStatShared_Common * pgstat_init_entry(PgStat_Kind kind, PgStatShared_HashEntry *shhashent) @@ -312,7 +318,12 @@ pgstat_init_entry(PgStat_Kind kind, pg_atomic_init_u32(&shhashent->generation, 0); shhashent->dropped = false; - chunk = dsa_allocate0(pgStatLocal.dsa, pgstat_get_kind_info(kind)->shared_size); + chunk = dsa_allocate_extended(pgStatLocal.dsa, + pgstat_get_kind_info(kind)->shared_size, + DSA_ALLOC_ZERO | DSA_ALLOC_NO_OOM); + if (chunk == InvalidDsaPointer) + return NULL; + shheader = dsa_get_address(pgStatLocal.dsa, chunk); shheader->magic = 0xdeadbeef; @@ -510,6 +521,20 @@ pgstat_get_entry_ref(PgStat_Kind kind, Oid dboid, uint64 objid, bool create, if (!shfound) { shheader = pgstat_init_entry(kind, shhashent); + if (shheader == NULL) + { + /* + * Failed the allocation of a new entry, so clean up the + * shared hashtable before giving up. + */ + dshash_delete_entry(pgStatLocal.shared_hash, shhashent); + + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"), + errdetail("Failed while allocating entry %u/%u/%" PRIu64 ".", + key.kind, key.dboid, key.objid))); + } pgstat_acquire_entry_ref(entry_ref, shhashent, shheader); if (created_entry != NULL) @@ -874,11 +899,12 @@ pgstat_drop_entry_internal(PgStatShared_HashEntry *shent, */ if (shent->dropped) elog(ERROR, - "trying to drop stats entry already dropped: kind=%s dboid=%u objid=%" PRIu64 " refcount=%u", + "trying to drop stats entry already dropped: kind=%s dboid=%u objid=%" PRIu64 " refcount=%u generation=%u", pgstat_get_kind_info(shent->key.kind)->name, shent->key.dboid, shent->key.objid, - pg_atomic_read_u32(&shent->refcount)); + pg_atomic_read_u32(&shent->refcount), + pg_atomic_read_u32(&shent->generation)); shent->dropped = true; /* release refcount marking entry as not dropped */ diff --git a/src/backend/utils/activity/pgstat_slru.c b/src/backend/utils/activity/pgstat_slru.c index 7bd8744accb0e..da50f8a04578c 100644 --- a/src/backend/utils/activity/pgstat_slru.c +++ b/src/backend/utils/activity/pgstat_slru.c @@ -55,47 +55,33 @@ pgstat_reset_slru(const char *name) * SLRU statistics count accumulation functions --- called from slru.c */ -void -pgstat_count_slru_page_zeroed(int slru_idx) -{ - get_slru_entry(slru_idx)->blocks_zeroed += 1; +#define PGSTAT_COUNT_SLRU(stat) \ +void \ +CppConcat(pgstat_count_slru_,stat)(int slru_idx) \ +{ \ + get_slru_entry(slru_idx)->stat += 1; \ } -void -pgstat_count_slru_page_hit(int slru_idx) -{ - get_slru_entry(slru_idx)->blocks_hit += 1; -} +/* pgstat_count_slru_blocks_zeroed */ +PGSTAT_COUNT_SLRU(blocks_zeroed) -void -pgstat_count_slru_page_exists(int slru_idx) -{ - get_slru_entry(slru_idx)->blocks_exists += 1; -} +/* pgstat_count_slru_blocks_hit */ +PGSTAT_COUNT_SLRU(blocks_hit) -void -pgstat_count_slru_page_read(int slru_idx) -{ - get_slru_entry(slru_idx)->blocks_read += 1; -} +/* pgstat_count_slru_blocks_exists */ +PGSTAT_COUNT_SLRU(blocks_exists) -void -pgstat_count_slru_page_written(int slru_idx) -{ - get_slru_entry(slru_idx)->blocks_written += 1; -} +/* pgstat_count_slru_blocks_read */ +PGSTAT_COUNT_SLRU(blocks_read) -void -pgstat_count_slru_flush(int slru_idx) -{ - get_slru_entry(slru_idx)->flush += 1; -} +/* pgstat_count_slru_blocks_written */ +PGSTAT_COUNT_SLRU(blocks_written) -void -pgstat_count_slru_truncate(int slru_idx) -{ - get_slru_entry(slru_idx)->truncate += 1; -} +/* pgstat_count_slru_flush */ +PGSTAT_COUNT_SLRU(flush) + +/* pgstat_count_slru_truncate */ +PGSTAT_COUNT_SLRU(truncate) /* * Support function for the SQL-callable pgstat* functions. Returns diff --git a/src/backend/utils/activity/wait_event_names.txt b/src/backend/utils/activity/wait_event_names.txt index 0be307d2ca04b..7553f6eacef7b 100644 --- a/src/backend/utils/activity/wait_event_names.txt +++ b/src/backend/utils/activity/wait_event_names.txt @@ -156,7 +156,6 @@ REPLICATION_SLOT_DROP "Waiting for a replication slot to become inactive so it c RESTORE_COMMAND "Waiting for to complete." SAFE_SNAPSHOT "Waiting to obtain a valid snapshot for a READ ONLY DEFERRABLE transaction." SYNC_REP "Waiting for confirmation from a remote server during synchronous replication." -WAL_BUFFER_INIT "Waiting on WAL buffer to be initialized." WAL_RECEIVER_EXIT "Waiting for the WAL receiver to exit." WAL_RECEIVER_WAIT_START "Waiting for startup process to send initial data for streaming replication." WAL_SUMMARY_READY "Waiting for a new WAL summary to be generated." @@ -303,9 +302,12 @@ ABI_compatibility: # This class of wait events has its own set of C structure, so these are # only used for the documentation. # -# NB: Predefined LWLocks (i.e., those declared in lwlocklist.h) must be -# listed in the top section of locks and must be listed in the same order as in -# lwlocklist.h. +# NB: Predefined LWLocks (i.e., those declared with PG_LWLOCK in lwlocklist.h) +# must be listed before the "END OF PREDEFINED LWLOCKS" comment and must be +# listed in the same order as in lwlocklist.h. Likewise, the built-in LWLock +# tranches (i.e., those declared with PG_LWLOCKTRANCHE in lwlocklist.h) must be +# listed after the "END OF PREDEFINED LWLOCKS" comment and must be listed in +# the same order as lwlocklist.h. # Section: ClassName - WaitEventLWLock @@ -316,6 +318,7 @@ XidGen "Waiting to allocate a new transaction ID." ProcArray "Waiting to access the shared per-process data structures (typically, to get a snapshot or report a session's transaction ID)." SInvalRead "Waiting to retrieve messages from the shared catalog invalidation queue." SInvalWrite "Waiting to add a message to the shared catalog invalidation queue." +WALBufMapping "Waiting to replace a page in WAL buffers." WALWrite "Waiting for WAL buffers to be written to disk." ControlFile "Waiting to read or update the pg_control file or create a new WAL file." MultiXactGen "Waiting to read or update shared multixact state." @@ -356,14 +359,6 @@ AioWorkerSubmissionQueue "Waiting to access AIO worker submission queue." # # END OF PREDEFINED LWLOCKS (DO NOT CHANGE THIS LINE) # -# Predefined LWLocks (i.e., those declared at the top of lwlocknames.h) must be -# listed in the section above and must be listed in the same order as in -# lwlocknames.h. -# -# Likewise, the built-in LWLock tranches (i.e., those declared at the bottom of -# lwlocknames.h) must be listed in the section below and must be listed in the -# same order as in lwlocknames.h. -# XactBuffer "Waiting for I/O on a transaction status SLRU buffer." CommitTsBuffer "Waiting for I/O on a commit timestamp SLRU buffer." diff --git a/src/backend/utils/adt/Makefile b/src/backend/utils/adt/Makefile index ffeacf2b819f3..cc68ac545a5f0 100644 --- a/src/backend/utils/adt/Makefile +++ b/src/backend/utils/adt/Makefile @@ -68,6 +68,7 @@ OBJS = \ misc.o \ multirangetypes.o \ multirangetypes_selfuncs.o \ + multixactfuncs.o \ name.o \ network.o \ network_gist.o \ diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index 1213f9106d515..7dadaefdfc1b8 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -31,7 +31,6 @@ #include "catalog/pg_proc.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "commands/proclang.h" #include "commands/tablespace.h" #include "common/hashfn.h" diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index c8f53c6fbe788..c833e7df1fd9e 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -3406,7 +3406,7 @@ construct_array_builtin(Datum *elems, int nelems, Oid elmtype) case FLOAT8OID: elmlen = sizeof(float8); - elmbyval = FLOAT8PASSBYVAL; + elmbyval = true; elmalign = TYPALIGN_DOUBLE; break; @@ -3424,7 +3424,7 @@ construct_array_builtin(Datum *elems, int nelems, Oid elmtype) case INT8OID: elmlen = sizeof(int64); - elmbyval = FLOAT8PASSBYVAL; + elmbyval = true; elmalign = TYPALIGN_DOUBLE; break; @@ -3718,7 +3718,7 @@ deconstruct_array_builtin(ArrayType *array, case FLOAT8OID: elmlen = sizeof(float8); - elmbyval = FLOAT8PASSBYVAL; + elmbyval = true; elmalign = TYPALIGN_DOUBLE; break; diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c index fcd5b1653dd3e..614644a4e2a06 100644 --- a/src/backend/utils/adt/datum.c +++ b/src/backend/utils/adt/datum.c @@ -299,9 +299,9 @@ datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen) len1 - VARHDRSZ) == 0); /* Only free memory if it's a copy made here. */ - if ((Pointer) arg1val != (Pointer) value1) + if ((Pointer) arg1val != DatumGetPointer(value1)) pfree(arg1val); - if ((Pointer) arg2val != (Pointer) value2) + if ((Pointer) arg2val != DatumGetPointer(value2)) pfree(arg2val); } } @@ -355,7 +355,7 @@ datum_image_hash(Datum value, bool typByVal, int typLen) result = hash_bytes((unsigned char *) VARDATA_ANY(val), len - VARHDRSZ); /* Only free memory if it's a copy made here. */ - if ((Pointer) val != (Pointer) value) + if ((Pointer) val != DatumGetPointer(value)) pfree(val); } else if (typLen == -2) diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index 25865b660ef83..894d226541f23 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -19,12 +19,12 @@ #include "catalog/pg_authid.h" #include "catalog/pg_database.h" #include "catalog/pg_tablespace.h" -#include "commands/dbcommands.h" #include "commands/tablespace.h" #include "miscadmin.h" #include "storage/fd.h" #include "utils/acl.h" #include "utils/builtins.h" +#include "utils/lsyscache.h" #include "utils/numeric.h" #include "utils/rel.h" #include "utils/relfilenumbermap.h" @@ -938,6 +938,9 @@ pg_relation_filenode(PG_FUNCTION_ARGS) * * We don't fail but return NULL if we cannot find a mapping. * + * Temporary relations are not detected, returning NULL (see + * RelidByRelfilenumber() for the reasons). + * * InvalidOid can be passed instead of the current database's default * tablespace. */ diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 1d05481181db7..78e19ac39ac17 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -1565,6 +1565,8 @@ get_th(char *num, int type) int len = strlen(num), last; + Assert(len > 0); + last = *(num + (len - 1)); if (!isdigit((unsigned char) last)) ereport(ERROR, @@ -6387,12 +6389,12 @@ numeric_to_char(PG_FUNCTION_ARGS) if (IS_ROMAN(&Num)) { int32 intvalue; - bool err; + ErrorSaveContext escontext = {T_ErrorSaveContext}; /* Round and convert to int */ - intvalue = numeric_int4_opt_error(value, &err); + intvalue = numeric_int4_safe(value, (Node *) &escontext); /* On overflow, just use PG_INT32_MAX; int_to_roman will cope */ - if (err) + if (escontext.error_occurred) intvalue = PG_INT32_MAX; numstr = int_to_roman(intvalue); } diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c index 9dd5889f34c62..bdea490202a64 100644 --- a/src/backend/utils/adt/int8.c +++ b/src/backend/utils/adt/int8.c @@ -718,76 +718,29 @@ int8lcm(PG_FUNCTION_ARGS) Datum int8inc(PG_FUNCTION_ARGS) { - /* - * When int8 is pass-by-reference, we provide this special case to avoid - * palloc overhead for COUNT(): when called as an aggregate, we know that - * the argument is modifiable local storage, so just update it in-place. - * (If int8 is pass-by-value, then of course this is useless as well as - * incorrect, so just ifdef it out.) - */ -#ifndef USE_FLOAT8_BYVAL /* controls int8 too */ - if (AggCheckCallContext(fcinfo, NULL)) - { - int64 *arg = (int64 *) PG_GETARG_POINTER(0); - - if (unlikely(pg_add_s64_overflow(*arg, 1, arg))) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - - PG_RETURN_POINTER(arg); - } - else -#endif - { - /* Not called as an aggregate, so just do it the dumb way */ - int64 arg = PG_GETARG_INT64(0); - int64 result; + int64 arg = PG_GETARG_INT64(0); + int64 result; - if (unlikely(pg_add_s64_overflow(arg, 1, &result))) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); + if (unlikely(pg_add_s64_overflow(arg, 1, &result))) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("bigint out of range"))); - PG_RETURN_INT64(result); - } + PG_RETURN_INT64(result); } Datum int8dec(PG_FUNCTION_ARGS) { - /* - * When int8 is pass-by-reference, we provide this special case to avoid - * palloc overhead for COUNT(): when called as an aggregate, we know that - * the argument is modifiable local storage, so just update it in-place. - * (If int8 is pass-by-value, then of course this is useless as well as - * incorrect, so just ifdef it out.) - */ -#ifndef USE_FLOAT8_BYVAL /* controls int8 too */ - if (AggCheckCallContext(fcinfo, NULL)) - { - int64 *arg = (int64 *) PG_GETARG_POINTER(0); - - if (unlikely(pg_sub_s64_overflow(*arg, 1, arg))) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - PG_RETURN_POINTER(arg); - } - else -#endif - { - /* Not called as an aggregate, so just do it the dumb way */ - int64 arg = PG_GETARG_INT64(0); - int64 result; + int64 arg = PG_GETARG_INT64(0); + int64 result; - if (unlikely(pg_sub_s64_overflow(arg, 1, &result))) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); + if (unlikely(pg_sub_s64_overflow(arg, 1, &result))) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("bigint out of range"))); - PG_RETURN_INT64(result); - } + PG_RETURN_INT64(result); } diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index 51452755f5868..e9d370cb3da8e 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -904,7 +904,7 @@ json_unique_hash(const void *key, Size keysize) hash ^= hash_bytes((const unsigned char *) entry->key, entry->key_len); - return DatumGetUInt32(hash); + return hash; } static int diff --git a/src/backend/utils/adt/jsonb_gin.c b/src/backend/utils/adt/jsonb_gin.c index c1950792b5aea..9b56248cf0bee 100644 --- a/src/backend/utils/adt/jsonb_gin.c +++ b/src/backend/utils/adt/jsonb_gin.c @@ -896,8 +896,8 @@ gin_extract_jsonb_query(PG_FUNCTION_ARGS) continue; /* We rely on the array elements not being toasted */ entries[j++] = make_text_key(JGINFLAG_KEY, - VARDATA_ANY(key_datums[i]), - VARSIZE_ANY_EXHDR(key_datums[i])); + VARDATA_ANY(DatumGetPointer(key_datums[i])), + VARSIZE_ANY_EXHDR(DatumGetPointer(key_datums[i]))); } *nentries = j; diff --git a/src/backend/utils/adt/jsonb_op.c b/src/backend/utils/adt/jsonb_op.c index fa5603f26e1d6..51d38e321fb2f 100644 --- a/src/backend/utils/adt/jsonb_op.c +++ b/src/backend/utils/adt/jsonb_op.c @@ -63,8 +63,8 @@ jsonb_exists_any(PG_FUNCTION_ARGS) strVal.type = jbvString; /* We rely on the array elements not being toasted */ - strVal.val.string.val = VARDATA_ANY(key_datums[i]); - strVal.val.string.len = VARSIZE_ANY_EXHDR(key_datums[i]); + strVal.val.string.val = VARDATA_ANY(DatumGetPointer(key_datums[i])); + strVal.val.string.len = VARSIZE_ANY_EXHDR(DatumGetPointer(key_datums[i])); if (findJsonbValueFromContainer(&jb->root, JB_FOBJECT | JB_FARRAY, @@ -96,8 +96,8 @@ jsonb_exists_all(PG_FUNCTION_ARGS) strVal.type = jbvString; /* We rely on the array elements not being toasted */ - strVal.val.string.val = VARDATA_ANY(key_datums[i]); - strVal.val.string.len = VARSIZE_ANY_EXHDR(key_datums[i]); + strVal.val.string.val = VARDATA_ANY(DatumGetPointer(key_datums[i])); + strVal.val.string.len = VARSIZE_ANY_EXHDR(DatumGetPointer(key_datums[i])); if (findJsonbValueFromContainer(&jb->root, JB_FOBJECT | JB_FARRAY, diff --git a/src/backend/utils/adt/jsonbsubs.c b/src/backend/utils/adt/jsonbsubs.c index de64d49851251..e8626d3b4fc6e 100644 --- a/src/backend/utils/adt/jsonbsubs.c +++ b/src/backend/utils/adt/jsonbsubs.c @@ -51,7 +51,7 @@ jsonb_subscript_transform(SubscriptingRef *sbsref, /* * Transform and convert the subscript expressions. Jsonb subscripting - * does not support slices, look only and the upper index. + * does not support slices, look only at the upper index. */ foreach(idx, indirection) { diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index bcb1720b6cde2..c5e1a027956bc 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -2027,7 +2027,7 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text) { /* a json null is an sql null in text mode */ nulls[1] = true; - values[1] = (Datum) NULL; + values[1] = (Datum) 0; } else values[1] = PointerGetDatum(JsonbValueAsText(&v)); @@ -2266,7 +2266,7 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, { /* a json null is an sql null in text mode */ nulls[0] = true; - values[0] = (Datum) NULL; + values[0] = (Datum) 0; } else values[0] = PointerGetDatum(JsonbValueAsText(&v)); @@ -2389,7 +2389,7 @@ elements_array_element_end(void *state, bool isnull) if (isnull && _state->normalize_results) { nulls[0] = true; - values[0] = (Datum) NULL; + values[0] = (Datum) 0; } else if (_state->next_scalar) { @@ -4766,8 +4766,8 @@ jsonb_delete_array(PG_FUNCTION_ARGS) continue; /* We rely on the array elements not being toasted */ - keyptr = VARDATA_ANY(keys_elems[i]); - keylen = VARSIZE_ANY_EXHDR(keys_elems[i]); + keyptr = VARDATA_ANY(DatumGetPointer(keys_elems[i])); + keylen = VARSIZE_ANY_EXHDR(DatumGetPointer(keys_elems[i])); if (keylen == v.val.string.len && memcmp(keyptr, v.val.string.val, keylen) == 0) { diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c index dbab24737ef1f..8156695e97e09 100644 --- a/src/backend/utils/adt/jsonpath_exec.c +++ b/src/backend/utils/adt/jsonpath_exec.c @@ -252,7 +252,8 @@ typedef JsonPathBool (*JsonPathPredicateCallback) (JsonPathItem *jsp, JsonbValue *larg, JsonbValue *rarg, void *param); -typedef Numeric (*BinaryArithmFunc) (Numeric num1, Numeric num2, bool *error); +typedef Numeric (*BinaryArithmFunc) (Numeric num1, Numeric num2, + Node *escontext); static JsonPathExecResult executeJsonPath(JsonPath *path, void *vars, JsonPathGetVarCallback getVar, @@ -808,23 +809,23 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, case jpiAdd: return executeBinaryArithmExpr(cxt, jsp, jb, - numeric_add_opt_error, found); + numeric_add_safe, found); case jpiSub: return executeBinaryArithmExpr(cxt, jsp, jb, - numeric_sub_opt_error, found); + numeric_sub_safe, found); case jpiMul: return executeBinaryArithmExpr(cxt, jsp, jb, - numeric_mul_opt_error, found); + numeric_mul_safe, found); case jpiDiv: return executeBinaryArithmExpr(cxt, jsp, jb, - numeric_div_opt_error, found); + numeric_div_safe, found); case jpiMod: return executeBinaryArithmExpr(cxt, jsp, jb, - numeric_mod_opt_error, found); + numeric_mod_safe, found); case jpiPlus: return executeUnaryArithmExpr(cxt, jsp, jb, NULL, found); @@ -1269,11 +1270,12 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, if (jb->type == jbvNumeric) { - bool have_error; + ErrorSaveContext escontext = {T_ErrorSaveContext}; int64 val; - val = numeric_int8_opt_error(jb->val.numeric, &have_error); - if (have_error) + val = numeric_int8_safe(jb->val.numeric, + (Node *) &escontext); + if (escontext.error_occurred) RETURN_ERROR(ereport(ERROR, (errcode(ERRCODE_NON_NUMERIC_SQL_JSON_ITEM), errmsg("argument \"%s\" of jsonpath item method .%s() is invalid for type %s", @@ -1466,7 +1468,6 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, Datum dtypmod; int32 precision; int32 scale = 0; - bool have_error; bool noerr; ArrayType *arrtypmod; Datum datums[2]; @@ -1478,9 +1479,9 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, if (elem.type != jpiNumeric) elog(ERROR, "invalid jsonpath item type for .decimal() precision"); - precision = numeric_int4_opt_error(jspGetNumeric(&elem), - &have_error); - if (have_error) + precision = numeric_int4_safe(jspGetNumeric(&elem), + (Node *) &escontext); + if (escontext.error_occurred) RETURN_ERROR(ereport(ERROR, (errcode(ERRCODE_NON_NUMERIC_SQL_JSON_ITEM), errmsg("precision of jsonpath item method .%s() is out of range for type integer", @@ -1492,9 +1493,9 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, if (elem.type != jpiNumeric) elog(ERROR, "invalid jsonpath item type for .decimal() scale"); - scale = numeric_int4_opt_error(jspGetNumeric(&elem), - &have_error); - if (have_error) + scale = numeric_int4_safe(jspGetNumeric(&elem), + (Node *) &escontext); + if (escontext.error_occurred) RETURN_ERROR(ereport(ERROR, (errcode(ERRCODE_NON_NUMERIC_SQL_JSON_ITEM), errmsg("scale of jsonpath item method .%s() is out of range for type integer", @@ -1517,7 +1518,7 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, /* Convert numstr to Numeric with typmod */ Assert(numstr != NULL); noerr = DirectInputFunctionCallSafe(numeric_in, numstr, - InvalidOid, dtypmod, + InvalidOid, DatumGetInt32(dtypmod), (Node *) &escontext, &numdatum); @@ -1550,11 +1551,12 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, if (jb->type == jbvNumeric) { - bool have_error; int32 val; + ErrorSaveContext escontext = {T_ErrorSaveContext}; - val = numeric_int4_opt_error(jb->val.numeric, &have_error); - if (have_error) + val = numeric_int4_safe(jb->val.numeric, + (Node *) &escontext); + if (escontext.error_occurred) RETURN_ERROR(ereport(ERROR, (errcode(ERRCODE_NON_NUMERIC_SQL_JSON_ITEM), errmsg("argument \"%s\" of jsonpath item method .%s() is invalid for type %s", @@ -2149,11 +2151,11 @@ executeBinaryArithmExpr(JsonPathExecContext *cxt, JsonPathItem *jsp, } else { - bool error = false; + ErrorSaveContext escontext = {T_ErrorSaveContext}; - res = func(lval->val.numeric, rval->val.numeric, &error); + res = func(lval->val.numeric, rval->val.numeric, (Node *) &escontext); - if (error) + if (escontext.error_occurred) return jperError; } @@ -2433,7 +2435,7 @@ executeDateTimeMethod(JsonPathExecContext *cxt, JsonPathItem *jsp, if (jsp->type != jpiDatetime && jsp->type != jpiDate && jsp->content.arg) { - bool have_error; + ErrorSaveContext escontext = {T_ErrorSaveContext}; jspGetArg(jsp, &elem); @@ -2441,9 +2443,9 @@ executeDateTimeMethod(JsonPathExecContext *cxt, JsonPathItem *jsp, elog(ERROR, "invalid jsonpath item type for %s argument", jspOperationName(jsp->type)); - time_precision = numeric_int4_opt_error(jspGetNumeric(&elem), - &have_error); - if (have_error) + time_precision = numeric_int4_safe(jspGetNumeric(&elem), + (Node *) &escontext); + if (escontext.error_occurred) RETURN_ERROR(ereport(ERROR, (errcode(ERRCODE_INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION), errmsg("time precision of jsonpath item method .%s() is out of range for type integer", @@ -3074,8 +3076,8 @@ JsonItemFromDatum(Datum val, Oid typid, int32 typmod, JsonbValue *res) case TEXTOID: case VARCHAROID: res->type = jbvString; - res->val.string.val = VARDATA_ANY(val); - res->val.string.len = VARSIZE_ANY_EXHDR(val); + res->val.string.val = VARDATA_ANY(DatumGetPointer(val)); + res->val.string.len = VARSIZE_ANY_EXHDR(DatumGetPointer(val)); break; case DATEOID: case TIMEOID: @@ -3462,7 +3464,7 @@ getArrayIndex(JsonPathExecContext *cxt, JsonPathItem *jsp, JsonbValue *jb, JsonValueList found = {0}; JsonPathExecResult res = executeItem(cxt, jsp, jb, &found); Datum numeric_index; - bool have_error = false; + ErrorSaveContext escontext = {T_ErrorSaveContext}; if (jperIsError(res)) return res; @@ -3477,10 +3479,10 @@ getArrayIndex(JsonPathExecContext *cxt, JsonPathItem *jsp, JsonbValue *jb, NumericGetDatum(jbv->val.numeric), Int32GetDatum(0)); - *index = numeric_int4_opt_error(DatumGetNumeric(numeric_index), - &have_error); + *index = numeric_int4_safe(DatumGetNumeric(numeric_index), + (Node *) &escontext); - if (have_error) + if (escontext.error_occurred) RETURN_ERROR(ereport(ERROR, (errcode(ERRCODE_INVALID_SQL_JSON_SUBSCRIPT), errmsg("jsonpath array subscript is out of integer range")))); diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c index 00e67fb46d074..df938812dd368 100644 --- a/src/backend/utils/adt/lockfuncs.c +++ b/src/backend/utils/adt/lockfuncs.c @@ -398,15 +398,15 @@ pg_lock_status(PG_FUNCTION_ARGS) values[0] = CStringGetTextDatum(PredicateLockTagTypeNames[lockType]); /* lock target */ - values[1] = GET_PREDICATELOCKTARGETTAG_DB(*predTag); - values[2] = GET_PREDICATELOCKTARGETTAG_RELATION(*predTag); + values[1] = ObjectIdGetDatum(GET_PREDICATELOCKTARGETTAG_DB(*predTag)); + values[2] = ObjectIdGetDatum(GET_PREDICATELOCKTARGETTAG_RELATION(*predTag)); if (lockType == PREDLOCKTAG_TUPLE) - values[4] = GET_PREDICATELOCKTARGETTAG_OFFSET(*predTag); + values[4] = UInt16GetDatum(GET_PREDICATELOCKTARGETTAG_OFFSET(*predTag)); else nulls[4] = true; if ((lockType == PREDLOCKTAG_TUPLE) || (lockType == PREDLOCKTAG_PAGE)) - values[3] = GET_PREDICATELOCKTARGETTAG_PAGE(*predTag); + values[3] = UInt32GetDatum(GET_PREDICATELOCKTARGETTAG_PAGE(*predTag)); else nulls[3] = true; diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c index 3644e9735f5d0..bb38ef2f5e440 100644 --- a/src/backend/utils/adt/mac.c +++ b/src/backend/utils/adt/mac.c @@ -481,33 +481,26 @@ macaddr_abbrev_convert(Datum original, SortSupport ssup) Datum res; /* - * On a 64-bit machine, zero out the 8-byte datum and copy the 6 bytes of - * the MAC address in. There will be two bytes of zero padding on the end - * of the least significant bits. + * Zero out the 8-byte Datum and copy in the 6 bytes of the MAC address. + * There will be two bytes of zero padding on the end of the least + * significant bits. */ -#if SIZEOF_DATUM == 8 - memset(&res, 0, SIZEOF_DATUM); + StaticAssertStmt(sizeof(res) >= sizeof(macaddr), + "Datum is too small for macaddr"); + memset(&res, 0, sizeof(res)); memcpy(&res, authoritative, sizeof(macaddr)); -#else /* SIZEOF_DATUM != 8 */ - memcpy(&res, authoritative, SIZEOF_DATUM); -#endif uss->input_count += 1; /* - * Cardinality estimation. The estimate uses uint32, so on a 64-bit - * architecture, XOR the two 32-bit halves together to produce slightly - * more entropy. The two zeroed bytes won't have any practical impact on - * this operation. + * Cardinality estimation. The estimate uses uint32, so XOR the two 32-bit + * halves together to produce slightly more entropy. The two zeroed bytes + * won't have any practical impact on this operation. */ if (uss->estimating) { uint32 tmp; -#if SIZEOF_DATUM == 8 - tmp = (uint32) res ^ (uint32) ((uint64) res >> 32); -#else /* SIZEOF_DATUM != 8 */ - tmp = (uint32) res; -#endif + tmp = DatumGetUInt32(res) ^ (uint32) (DatumGetUInt64(res) >> 32); addHyperLogLog(&uss->abbr_card, DatumGetUInt32(hash_uint32(tmp))); } diff --git a/src/backend/utils/adt/meson.build b/src/backend/utils/adt/meson.build index ed9bbd7b9266b..12fa0c209127c 100644 --- a/src/backend/utils/adt/meson.build +++ b/src/backend/utils/adt/meson.build @@ -1,5 +1,15 @@ # Copyright (c) 2022-2025, PostgreSQL Global Development Group +# Some code in numeric.c benefits from auto-vectorization +numeric_backend_lib = static_library('numeric_backend_lib', + 'numeric.c', + dependencies: backend_build_deps, + kwargs: internal_lib_args, + c_args: vectorize_cflags, +) + +backend_link_with += numeric_backend_lib + backend_sources += files( 'acl.c', 'amutils.c', @@ -55,12 +65,12 @@ backend_sources += files( 'misc.c', 'multirangetypes.c', 'multirangetypes_selfuncs.c', + 'multixactfuncs.c', 'name.c', 'network.c', 'network_gist.c', 'network_selfuncs.c', 'network_spgist.c', - 'numeric.c', 'numutils.c', 'oid.c', 'oracle_compat.c', diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index 6fcfd031428ed..6c5e3438447b0 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -26,7 +26,6 @@ #include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" #include "catalog/system_fk_info.h" -#include "commands/dbcommands.h" #include "commands/tablespace.h" #include "common/keywords.h" #include "funcapi.h" diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c index cd84ced5b487c..84733dc50195b 100644 --- a/src/backend/utils/adt/multirangetypes.c +++ b/src/backend/utils/adt/multirangetypes.c @@ -394,12 +394,13 @@ multirange_send(PG_FUNCTION_ARGS) for (int i = 0; i < range_count; i++) { Datum range; + bytea *outputbytes; range = RangeTypePGetDatum(ranges[i]); - range = PointerGetDatum(SendFunctionCall(&cache->typioproc, range)); + outputbytes = SendFunctionCall(&cache->typioproc, range); - pq_sendint32(buf, VARSIZE(range) - VARHDRSZ); - pq_sendbytes(buf, VARDATA(range), VARSIZE(range) - VARHDRSZ); + pq_sendint32(buf, VARSIZE(outputbytes) - VARHDRSZ); + pq_sendbytes(buf, VARDATA(outputbytes), VARSIZE(outputbytes) - VARHDRSZ); } PG_RETURN_BYTEA_P(pq_endtypsend(buf)); @@ -2081,15 +2082,14 @@ range_overleft_multirange_internal(TypeCacheEntry *rangetyp, bool empty; if (RangeIsEmpty(r) || MultirangeIsEmpty(mr)) - PG_RETURN_BOOL(false); - + return false; range_deserialize(rangetyp, r, &lower1, &upper1, &empty); Assert(!empty); multirange_get_bounds(rangetyp, mr, mr->rangeCount - 1, &lower2, &upper2); - PG_RETURN_BOOL(range_cmp_bounds(rangetyp, &upper1, &upper2) <= 0); + return (range_cmp_bounds(rangetyp, &upper1, &upper2) <= 0); } Datum @@ -2166,7 +2166,7 @@ range_overright_multirange_internal(TypeCacheEntry *rangetyp, bool empty; if (RangeIsEmpty(r) || MultirangeIsEmpty(mr)) - PG_RETURN_BOOL(false); + return false; range_deserialize(rangetyp, r, &lower1, &upper1, &empty); Assert(!empty); @@ -2523,7 +2523,7 @@ multirange_adjacent_range(PG_FUNCTION_ARGS) TypeCacheEntry *typcache; if (RangeIsEmpty(r) || MultirangeIsEmpty(mr)) - return false; + PG_RETURN_BOOL(false); typcache = multirange_get_typcache(fcinfo, MultirangeTypeGetOid(mr)); @@ -2544,7 +2544,7 @@ multirange_adjacent_multirange(PG_FUNCTION_ARGS) upper2; if (MultirangeIsEmpty(mr1) || MultirangeIsEmpty(mr2)) - return false; + PG_RETURN_BOOL(false); typcache = multirange_get_typcache(fcinfo, MultirangeTypeGetOid(mr1)); @@ -2639,7 +2639,7 @@ multirange_cmp(PG_FUNCTION_ARGS) Datum multirange_lt(PG_FUNCTION_ARGS) { - int cmp = multirange_cmp(fcinfo); + int cmp = DatumGetInt32(multirange_cmp(fcinfo)); PG_RETURN_BOOL(cmp < 0); } @@ -2647,7 +2647,7 @@ multirange_lt(PG_FUNCTION_ARGS) Datum multirange_le(PG_FUNCTION_ARGS) { - int cmp = multirange_cmp(fcinfo); + int cmp = DatumGetInt32(multirange_cmp(fcinfo)); PG_RETURN_BOOL(cmp <= 0); } @@ -2655,7 +2655,7 @@ multirange_le(PG_FUNCTION_ARGS) Datum multirange_ge(PG_FUNCTION_ARGS) { - int cmp = multirange_cmp(fcinfo); + int cmp = DatumGetInt32(multirange_cmp(fcinfo)); PG_RETURN_BOOL(cmp >= 0); } @@ -2663,7 +2663,7 @@ multirange_ge(PG_FUNCTION_ARGS) Datum multirange_gt(PG_FUNCTION_ARGS) { - int cmp = multirange_cmp(fcinfo); + int cmp = DatumGetInt32(multirange_cmp(fcinfo)); PG_RETURN_BOOL(cmp > 0); } @@ -2833,7 +2833,7 @@ hash_multirange(PG_FUNCTION_ARGS) upper_hash = 0; /* Merge hashes of flags and bounds */ - range_hash = hash_uint32((uint32) flags); + range_hash = hash_bytes_uint32((uint32) flags); range_hash ^= lower_hash; range_hash = pg_rotate_left32(range_hash, 1); range_hash ^= upper_hash; diff --git a/src/backend/utils/adt/multixactfuncs.c b/src/backend/utils/adt/multixactfuncs.c new file mode 100644 index 0000000000000..e74ea93834860 --- /dev/null +++ b/src/backend/utils/adt/multixactfuncs.c @@ -0,0 +1,87 @@ +/*------------------------------------------------------------------------- + * + * multixactfuncs.c + * Functions for accessing multixact-related data. + * + * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/utils/adt/multixactfuncs.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/multixact.h" +#include "funcapi.h" +#include "utils/builtins.h" + +/* + * pg_get_multixact_members + * + * Returns information about the MultiXactMembers of the specified + * MultiXactId. + */ +Datum +pg_get_multixact_members(PG_FUNCTION_ARGS) +{ + typedef struct + { + MultiXactMember *members; + int nmembers; + int iter; + } mxact; + MultiXactId mxid = PG_GETARG_TRANSACTIONID(0); + mxact *multi; + FuncCallContext *funccxt; + + if (mxid < FirstMultiXactId) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid MultiXactId: %u", mxid))); + + if (SRF_IS_FIRSTCALL()) + { + MemoryContext oldcxt; + TupleDesc tupdesc; + + funccxt = SRF_FIRSTCALL_INIT(); + oldcxt = MemoryContextSwitchTo(funccxt->multi_call_memory_ctx); + + multi = palloc(sizeof(mxact)); + /* no need to allow for old values here */ + multi->nmembers = GetMultiXactIdMembers(mxid, &multi->members, false, + false); + multi->iter = 0; + + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + funccxt->tuple_desc = tupdesc; + funccxt->attinmeta = TupleDescGetAttInMetadata(tupdesc); + funccxt->user_fctx = multi; + + MemoryContextSwitchTo(oldcxt); + } + + funccxt = SRF_PERCALL_SETUP(); + multi = (mxact *) funccxt->user_fctx; + + while (multi->iter < multi->nmembers) + { + HeapTuple tuple; + char *values[2]; + + values[0] = psprintf("%u", multi->members[multi->iter].xid); + values[1] = mxstatus_to_string(multi->members[multi->iter].status); + + tuple = BuildTupleFromCStrings(funccxt->attinmeta, values); + + multi->iter++; + pfree(values[0]); + SRF_RETURN_NEXT(funccxt, HeapTupleGetDatum(tuple)); + } + + SRF_RETURN_DONE(funccxt); +} diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c index 9fd211b2d4576..3cb0ab6829ae8 100644 --- a/src/backend/utils/adt/network.c +++ b/src/backend/utils/adt/network.c @@ -567,24 +567,11 @@ network_abbrev_abort(int memtupcount, SortSupport ssup) * * When generating abbreviated keys for SortSupport, we pack as much as we can * into a datum while ensuring that when comparing those keys as integers, - * these rules will be respected. Exact contents depend on IP family and datum - * size. + * these rules will be respected. Exact contents depend on IP family: * * IPv4 * ---- * - * 4 byte datums: - * - * Start with 1 bit for the IP family (IPv4 or IPv6; this bit is present in - * every case below) followed by all but 1 of the netmasked bits. - * - * +----------+---------------------+ - * | 1 bit IP | 31 bits network | (1 bit network - * | family | (truncated) | omitted) - * +----------+---------------------+ - * - * 8 byte datums: - * * We have space to store all netmasked bits, followed by the netmask size, * followed by 25 bits of the subnet (25 bits is usually more than enough in * practice). cidr datums always have all-zero subnet bits. @@ -597,15 +584,6 @@ network_abbrev_abort(int memtupcount, SortSupport ssup) * IPv6 * ---- * - * 4 byte datums: - * - * +----------+---------------------+ - * | 1 bit IP | 31 bits network | (up to 97 bits - * | family | (truncated) | network omitted) - * +----------+---------------------+ - * - * 8 byte datums: - * * +----------+---------------------------------+ * | 1 bit IP | 63 bits network | (up to 65 bits * | family | (truncated) | network omitted) @@ -628,8 +606,7 @@ network_abbrev_convert(Datum original, SortSupport ssup) /* * Get an unsigned integer representation of the IP address by taking its * first 4 or 8 bytes. Always take all 4 bytes of an IPv4 address. Take - * the first 8 bytes of an IPv6 address with an 8 byte datum and 4 bytes - * otherwise. + * the first 8 bytes of an IPv6 address. * * We're consuming an array of unsigned char, so byteswap on little endian * systems (an inet's ipaddr field stores the most significant byte @@ -659,7 +636,7 @@ network_abbrev_convert(Datum original, SortSupport ssup) ipaddr_datum = DatumBigEndianToNative(ipaddr_datum); /* Initialize result with ipfamily (most significant) bit set */ - res = ((Datum) 1) << (SIZEOF_DATUM * BITS_PER_BYTE - 1); + res = ((Datum) 1) << (sizeof(Datum) * BITS_PER_BYTE - 1); } /* @@ -668,8 +645,7 @@ network_abbrev_convert(Datum original, SortSupport ssup) * while low order bits go in "subnet" component when there is space for * one. This is often accomplished by generating a temp datum subnet * bitmask, which we may reuse later when generating the subnet bits - * themselves. (Note that subnet bits are only used with IPv4 datums on - * platforms where datum is 8 bytes.) + * themselves. * * The number of bits in subnet is used to generate a datum subnet * bitmask. For example, with a /24 IPv4 datum there are 8 subnet bits @@ -681,14 +657,14 @@ network_abbrev_convert(Datum original, SortSupport ssup) subnet_size = ip_maxbits(authoritative) - ip_bits(authoritative); Assert(subnet_size >= 0); /* subnet size must work with prefix ipaddr cases */ - subnet_size %= SIZEOF_DATUM * BITS_PER_BYTE; + subnet_size %= sizeof(Datum) * BITS_PER_BYTE; if (ip_bits(authoritative) == 0) { /* Fit as many ipaddr bits as possible into subnet */ subnet_bitmask = ((Datum) 0) - 1; network = 0; } - else if (ip_bits(authoritative) < SIZEOF_DATUM * BITS_PER_BYTE) + else if (ip_bits(authoritative) < sizeof(Datum) * BITS_PER_BYTE) { /* Split ipaddr bits between network and subnet */ subnet_bitmask = (((Datum) 1) << subnet_size) - 1; @@ -701,12 +677,11 @@ network_abbrev_convert(Datum original, SortSupport ssup) network = ipaddr_datum; } -#if SIZEOF_DATUM == 8 if (ip_family(authoritative) == PGSQL_AF_INET) { /* - * IPv4 with 8 byte datums: keep all 32 netmasked bits, netmask size, - * and most significant 25 subnet bits + * IPv4: keep all 32 netmasked bits, netmask size, and most + * significant 25 subnet bits */ Datum netmask_size = (Datum) ip_bits(authoritative); Datum subnet; @@ -750,12 +725,11 @@ network_abbrev_convert(Datum original, SortSupport ssup) res |= network | netmask_size | subnet; } else -#endif { /* - * 4 byte datums, or IPv6 with 8 byte datums: Use as many of the - * netmasked bits as will fit in final abbreviated key. Avoid - * clobbering the ipfamily bit that was set earlier. + * IPv6: Use as many of the netmasked bits as will fit in final + * abbreviated key. Avoid clobbering the ipfamily bit that was set + * earlier. */ res |= network >> 1; } @@ -767,11 +741,7 @@ network_abbrev_convert(Datum original, SortSupport ssup) { uint32 tmp; -#if SIZEOF_DATUM == 8 - tmp = (uint32) res ^ (uint32) ((uint64) res >> 32); -#else /* SIZEOF_DATUM != 8 */ - tmp = (uint32) res; -#endif + tmp = DatumGetUInt32(res) ^ (uint32) (DatumGetUInt64(res) >> 32); addHyperLogLog(&uss->abbr_card, DatumGetUInt32(hash_uint32(tmp))); } diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index c9233565d57a7..76269918593d7 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -28,6 +28,7 @@ #include "common/hashfn.h" #include "common/int.h" +#include "common/int128.h" #include "funcapi.h" #include "lib/hyperloglog.h" #include "libpq/pqformat.h" @@ -391,30 +392,21 @@ typedef struct NumericSumAccum /* * We define our own macros for packing and unpacking abbreviated-key - * representations for numeric values in order to avoid depending on - * USE_FLOAT8_BYVAL. The type of abbreviation we use is based only on - * the size of a datum, not the argument-passing convention for float8. + * representations, just to have a notational indication that that's + * what we're doing. Now that sizeof(Datum) is always 8, we can rely + * on fitting an int64 into Datum. * - * The range of abbreviations for finite values is from +PG_INT64/32_MAX - * to -PG_INT64/32_MAX. NaN has the abbreviation PG_INT64/32_MIN, and we + * The range of abbreviations for finite values is from +PG_INT64_MAX + * to -PG_INT64_MAX. NaN has the abbreviation PG_INT64_MIN, and we * define the sort ordering to make that work out properly (see further * comments below). PINF and NINF share the abbreviations of the largest * and smallest finite abbreviation classes. */ -#define NUMERIC_ABBREV_BITS (SIZEOF_DATUM * BITS_PER_BYTE) -#if SIZEOF_DATUM == 8 -#define NumericAbbrevGetDatum(X) ((Datum) (X)) -#define DatumGetNumericAbbrev(X) ((int64) (X)) +#define NumericAbbrevGetDatum(X) Int64GetDatum(X) +#define DatumGetNumericAbbrev(X) DatumGetInt64(X) #define NUMERIC_ABBREV_NAN NumericAbbrevGetDatum(PG_INT64_MIN) #define NUMERIC_ABBREV_PINF NumericAbbrevGetDatum(-PG_INT64_MAX) #define NUMERIC_ABBREV_NINF NumericAbbrevGetDatum(PG_INT64_MAX) -#else -#define NumericAbbrevGetDatum(X) ((Datum) (X)) -#define DatumGetNumericAbbrev(X) ((int32) (X)) -#define NUMERIC_ABBREV_NAN NumericAbbrevGetDatum(PG_INT32_MIN) -#define NUMERIC_ABBREV_PINF NumericAbbrevGetDatum(-PG_INT32_MAX) -#define NUMERIC_ABBREV_NINF NumericAbbrevGetDatum(PG_INT32_MAX) -#endif /* ---------- @@ -525,7 +517,7 @@ static void numericvar_deserialize(StringInfo buf, NumericVar *var); static Numeric duplicate_numeric(Numeric num); static Numeric make_result(const NumericVar *var); -static Numeric make_result_opt_error(const NumericVar *var, bool *have_error); +static Numeric make_result_safe(const NumericVar *var, Node *escontext); static bool apply_typmod(NumericVar *var, int32 typmod, Node *escontext); static bool apply_typmod_special(Numeric num, int32 typmod, Node *escontext); @@ -534,10 +526,7 @@ static bool numericvar_to_int32(const NumericVar *var, int32 *result); static bool numericvar_to_int64(const NumericVar *var, int64 *result); static void int64_to_numericvar(int64 val, NumericVar *var); static bool numericvar_to_uint64(const NumericVar *var, uint64 *result); -#ifdef HAVE_INT128 -static bool numericvar_to_int128(const NumericVar *var, int128 *result); -static void int128_to_numericvar(int128 val, NumericVar *var); -#endif +static void int128_to_numericvar(INT128 val, NumericVar *var); static double numericvar_to_double_no_overflow(const NumericVar *var); static Datum numeric_abbrev_convert(Datum original_datum, SortSupport ssup); @@ -728,7 +717,6 @@ numeric_in(PG_FUNCTION_ARGS) */ NumericVar value; int base; - bool have_error; init_var(&value); @@ -787,12 +775,7 @@ numeric_in(PG_FUNCTION_ARGS) if (!apply_typmod(&value, typmod, escontext)) PG_RETURN_NULL(); - res = make_result_opt_error(&value, &have_error); - - if (have_error) - ereturn(escontext, (Datum) 0, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("value overflows numeric format"))); + res = make_result_safe(&value, escontext); free_var(&value); } @@ -2098,12 +2081,11 @@ compute_bucket(Numeric operand, Numeric bound1, Numeric bound2, * while this could be worked on itself, the abbreviation strategy gives more * speedup in many common cases. * - * Two different representations are used for the abbreviated form, one in - * int32 and one in int64, whichever fits into a by-value Datum. In both cases - * the representation is negated relative to the original value, because we use - * the largest negative value for NaN, which sorts higher than other values. We - * convert the absolute value of the numeric to a 31-bit or 63-bit positive - * value, and then negate it if the original number was positive. + * The abbreviated format is an int64. The representation is negated relative + * to the original value, because we use the largest negative value for NaN, + * which sorts higher than other values. We convert the absolute value of the + * numeric to a 63-bit positive value, and then negate it if the original + * number was positive. * * We abort the abbreviation process if the abbreviation cardinality is below * 0.01% of the row count (1 per 10k non-null rows). The actual break-even @@ -2330,7 +2312,7 @@ numeric_cmp_abbrev(Datum x, Datum y, SortSupport ssup) } /* - * Abbreviate a NumericVar according to the available bit size. + * Abbreviate a NumericVar into the 64-bit sortsupport size. * * The 31-bit value is constructed as: * @@ -2374,9 +2356,6 @@ numeric_cmp_abbrev(Datum x, Datum y, SortSupport ssup) * with all bits zero. This allows simple comparisons to work on the composite * value. */ - -#if NUMERIC_ABBREV_BITS == 64 - static Datum numeric_abbrev_convert_var(const NumericVar *var, NumericSortSupport *nss) { @@ -2428,84 +2407,6 @@ numeric_abbrev_convert_var(const NumericVar *var, NumericSortSupport *nss) return NumericAbbrevGetDatum(result); } -#endif /* NUMERIC_ABBREV_BITS == 64 */ - -#if NUMERIC_ABBREV_BITS == 32 - -static Datum -numeric_abbrev_convert_var(const NumericVar *var, NumericSortSupport *nss) -{ - int ndigits = var->ndigits; - int weight = var->weight; - int32 result; - - if (ndigits == 0 || weight < -11) - { - result = 0; - } - else if (weight > 20) - { - result = PG_INT32_MAX; - } - else - { - NumericDigit nxt1 = (ndigits > 1) ? var->digits[1] : 0; - - weight = (weight + 11) * 4; - - result = var->digits[0]; - - /* - * "result" now has 1 to 4 nonzero decimal digits. We pack in more - * digits to make 7 in total (largest we can fit in 24 bits) - */ - - if (result > 999) - { - /* already have 4 digits, add 3 more */ - result = (result * 1000) + (nxt1 / 10); - weight += 3; - } - else if (result > 99) - { - /* already have 3 digits, add 4 more */ - result = (result * 10000) + nxt1; - weight += 2; - } - else if (result > 9) - { - NumericDigit nxt2 = (ndigits > 2) ? var->digits[2] : 0; - - /* already have 2 digits, add 5 more */ - result = (result * 100000) + (nxt1 * 10) + (nxt2 / 1000); - weight += 1; - } - else - { - NumericDigit nxt2 = (ndigits > 2) ? var->digits[2] : 0; - - /* already have 1 digit, add 6 more */ - result = (result * 1000000) + (nxt1 * 100) + (nxt2 / 100); - } - - result = result | (weight << 24); - } - - /* the abbrev is negated relative to the original */ - if (var->sign == NUMERIC_POS) - result = -result; - - if (nss->estimating) - { - uint32 tmp = (uint32) result; - - addHyperLogLog(&nss->abbr_card, DatumGetUInt32(hash_uint32(tmp))); - } - - return NumericAbbrevGetDatum(result); -} - -#endif /* NUMERIC_ABBREV_BITS == 32 */ /* * Ordinary (non-sortsupport) comparisons follow. @@ -2967,20 +2868,18 @@ numeric_add(PG_FUNCTION_ARGS) Numeric num2 = PG_GETARG_NUMERIC(1); Numeric res; - res = numeric_add_opt_error(num1, num2, NULL); + res = numeric_add_safe(num1, num2, NULL); PG_RETURN_NUMERIC(res); } /* - * numeric_add_opt_error() - + * numeric_add_safe() - * - * Internal version of numeric_add(). If "*have_error" flag is provided, - * on error it's set to true, NULL returned. This is helpful when caller - * need to handle errors by itself. + * Internal version of numeric_add() with support for soft error reporting. */ Numeric -numeric_add_opt_error(Numeric num1, Numeric num2, bool *have_error) +numeric_add_safe(Numeric num1, Numeric num2, Node *escontext) { NumericVar arg1; NumericVar arg2; @@ -3024,7 +2923,7 @@ numeric_add_opt_error(Numeric num1, Numeric num2, bool *have_error) init_var(&result); add_var(&arg1, &arg2, &result); - res = make_result_opt_error(&result, have_error); + res = make_result_safe(&result, escontext); free_var(&result); @@ -3044,21 +2943,19 @@ numeric_sub(PG_FUNCTION_ARGS) Numeric num2 = PG_GETARG_NUMERIC(1); Numeric res; - res = numeric_sub_opt_error(num1, num2, NULL); + res = numeric_sub_safe(num1, num2, NULL); PG_RETURN_NUMERIC(res); } /* - * numeric_sub_opt_error() - + * numeric_sub_safe() - * - * Internal version of numeric_sub(). If "*have_error" flag is provided, - * on error it's set to true, NULL returned. This is helpful when caller - * need to handle errors by itself. + * Internal version of numeric_sub() with support for soft error reporting. */ Numeric -numeric_sub_opt_error(Numeric num1, Numeric num2, bool *have_error) +numeric_sub_safe(Numeric num1, Numeric num2, Node *escontext) { NumericVar arg1; NumericVar arg2; @@ -3102,7 +2999,7 @@ numeric_sub_opt_error(Numeric num1, Numeric num2, bool *have_error) init_var(&result); sub_var(&arg1, &arg2, &result); - res = make_result_opt_error(&result, have_error); + res = make_result_safe(&result, escontext); free_var(&result); @@ -3122,21 +3019,19 @@ numeric_mul(PG_FUNCTION_ARGS) Numeric num2 = PG_GETARG_NUMERIC(1); Numeric res; - res = numeric_mul_opt_error(num1, num2, NULL); + res = numeric_mul_safe(num1, num2, NULL); PG_RETURN_NUMERIC(res); } /* - * numeric_mul_opt_error() - + * numeric_mul_safe() - * - * Internal version of numeric_mul(). If "*have_error" flag is provided, - * on error it's set to true, NULL returned. This is helpful when caller - * need to handle errors by itself. + * Internal version of numeric_mul() with support for soft error reporting. */ Numeric -numeric_mul_opt_error(Numeric num1, Numeric num2, bool *have_error) +numeric_mul_safe(Numeric num1, Numeric num2, Node *escontext) { NumericVar arg1; NumericVar arg2; @@ -3223,7 +3118,7 @@ numeric_mul_opt_error(Numeric num1, Numeric num2, bool *have_error) if (result.dscale > NUMERIC_DSCALE_MAX) round_var(&result, NUMERIC_DSCALE_MAX); - res = make_result_opt_error(&result, have_error); + res = make_result_safe(&result, escontext); free_var(&result); @@ -3243,21 +3138,19 @@ numeric_div(PG_FUNCTION_ARGS) Numeric num2 = PG_GETARG_NUMERIC(1); Numeric res; - res = numeric_div_opt_error(num1, num2, NULL); + res = numeric_div_safe(num1, num2, NULL); PG_RETURN_NUMERIC(res); } /* - * numeric_div_opt_error() - + * numeric_div_safe() - * - * Internal version of numeric_div(). If "*have_error" flag is provided, - * on error it's set to true, NULL returned. This is helpful when caller - * need to handle errors by itself. + * Internal version of numeric_div() with support for soft error reporting. */ Numeric -numeric_div_opt_error(Numeric num1, Numeric num2, bool *have_error) +numeric_div_safe(Numeric num1, Numeric num2, Node *escontext) { NumericVar arg1; NumericVar arg2; @@ -3265,9 +3158,6 @@ numeric_div_opt_error(Numeric num1, Numeric num2, bool *have_error) Numeric res; int rscale; - if (have_error) - *have_error = false; - /* * Handle NaN and infinities */ @@ -3282,15 +3172,7 @@ numeric_div_opt_error(Numeric num1, Numeric num2, bool *have_error) switch (numeric_sign_internal(num2)) { case 0: - if (have_error) - { - *have_error = true; - return NULL; - } - ereport(ERROR, - (errcode(ERRCODE_DIVISION_BY_ZERO), - errmsg("division by zero"))); - break; + goto division_by_zero; case 1: return make_result(&const_pinf); case -1: @@ -3305,15 +3187,7 @@ numeric_div_opt_error(Numeric num1, Numeric num2, bool *have_error) switch (numeric_sign_internal(num2)) { case 0: - if (have_error) - { - *have_error = true; - return NULL; - } - ereport(ERROR, - (errcode(ERRCODE_DIVISION_BY_ZERO), - errmsg("division by zero"))); - break; + goto division_by_zero; case 1: return make_result(&const_ninf); case -1: @@ -3344,25 +3218,25 @@ numeric_div_opt_error(Numeric num1, Numeric num2, bool *have_error) */ rscale = select_div_scale(&arg1, &arg2); - /* - * If "have_error" is provided, check for division by zero here - */ - if (have_error && (arg2.ndigits == 0 || arg2.digits[0] == 0)) - { - *have_error = true; - return NULL; - } + /* Check for division by zero */ + if (arg2.ndigits == 0 || arg2.digits[0] == 0) + goto division_by_zero; /* * Do the divide and return the result */ div_var(&arg1, &arg2, &result, rscale, true, true); - res = make_result_opt_error(&result, have_error); + res = make_result_safe(&result, escontext); free_var(&result); return res; + +division_by_zero: + ereturn(escontext, NULL, + errcode(ERRCODE_DIVISION_BY_ZERO), + errmsg("division by zero")); } @@ -3467,30 +3341,25 @@ numeric_mod(PG_FUNCTION_ARGS) Numeric num2 = PG_GETARG_NUMERIC(1); Numeric res; - res = numeric_mod_opt_error(num1, num2, NULL); + res = numeric_mod_safe(num1, num2, NULL); PG_RETURN_NUMERIC(res); } /* - * numeric_mod_opt_error() - + * numeric_mod_safe() - * - * Internal version of numeric_mod(). If "*have_error" flag is provided, - * on error it's set to true, NULL returned. This is helpful when caller - * need to handle errors by itself. + * Internal version of numeric_mod() with support for soft error reporting. */ Numeric -numeric_mod_opt_error(Numeric num1, Numeric num2, bool *have_error) +numeric_mod_safe(Numeric num1, Numeric num2, Node *escontext) { Numeric res; NumericVar arg1; NumericVar arg2; NumericVar result; - if (have_error) - *have_error = false; - /* * Handle NaN and infinities. We follow POSIX fmod() on this, except that * POSIX treats x-is-infinite and y-is-zero identically, raising EDOM and @@ -3503,16 +3372,8 @@ numeric_mod_opt_error(Numeric num1, Numeric num2, bool *have_error) if (NUMERIC_IS_INF(num1)) { if (numeric_sign_internal(num2) == 0) - { - if (have_error) - { - *have_error = true; - return NULL; - } - ereport(ERROR, - (errcode(ERRCODE_DIVISION_BY_ZERO), - errmsg("division by zero"))); - } + goto division_by_zero; + /* Inf % any nonzero = NaN */ return make_result(&const_nan); } @@ -3525,22 +3386,22 @@ numeric_mod_opt_error(Numeric num1, Numeric num2, bool *have_error) init_var(&result); - /* - * If "have_error" is provided, check for division by zero here - */ - if (have_error && (arg2.ndigits == 0 || arg2.digits[0] == 0)) - { - *have_error = true; - return NULL; - } + /* Check for division by zero */ + if (arg2.ndigits == 0 || arg2.digits[0] == 0) + goto division_by_zero; mod_var(&arg1, &arg2, &result); - res = make_result_opt_error(&result, NULL); + res = make_result_safe(&result, escontext); free_var(&result); return res; + +division_by_zero: + ereturn(escontext, NULL, + errcode(ERRCODE_DIVISION_BY_ZERO), + errmsg("division by zero")); } @@ -4463,25 +4324,13 @@ int64_div_fast_to_numeric(int64 val1, int log10val2) if (unlikely(pg_mul_s64_overflow(val1, factor, &new_val1))) { -#ifdef HAVE_INT128 /* do the multiplication using 128-bit integers */ - int128 tmp; + INT128 tmp; - tmp = (int128) val1 * (int128) factor; + tmp = int64_to_int128(0); + int128_add_int64_mul_int64(&tmp, val1, factor); int128_to_numericvar(tmp, &result); -#else - /* do the multiplication using numerics */ - NumericVar tmp; - - init_var(&tmp); - - int64_to_numericvar(val1, &result); - int64_to_numericvar(factor, &tmp); - mul_var(&result, &tmp, &result, 0); - - free_var(&tmp); -#endif } else int64_to_numericvar(new_val1, &result); @@ -4509,52 +4358,34 @@ int4_numeric(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(int64_to_numeric(val)); } +/* + * Internal version of int4_numeric() with support for soft error reporting. + */ int32 -numeric_int4_opt_error(Numeric num, bool *have_error) +numeric_int4_safe(Numeric num, Node *escontext) { NumericVar x; int32 result; - if (have_error) - *have_error = false; - if (NUMERIC_IS_SPECIAL(num)) { - if (have_error) - { - *have_error = true; - return 0; - } + if (NUMERIC_IS_NAN(num)) + ereturn(escontext, 0, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot convert NaN to %s", "integer"))); else - { - if (NUMERIC_IS_NAN(num)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert NaN to %s", "integer"))); - else - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert infinity to %s", "integer"))); - } + ereturn(escontext, 0, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot convert infinity to %s", "integer"))); } /* Convert to variable format, then convert to int4 */ init_var_from_num(num, &x); if (!numericvar_to_int32(&x, &result)) - { - if (have_error) - { - *have_error = true; - return 0; - } - else - { - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("integer out of range"))); - } - } + ereturn(escontext, 0, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("integer out of range"))); return result; } @@ -4564,7 +4395,7 @@ numeric_int4(PG_FUNCTION_ARGS) { Numeric num = PG_GETARG_NUMERIC(0); - PG_RETURN_INT32(numeric_int4_opt_error(num, NULL)); + PG_RETURN_INT32(numeric_int4_safe(num, NULL)); } /* @@ -4597,52 +4428,34 @@ int8_numeric(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(int64_to_numeric(val)); } +/* + * Internal version of int8_numeric() with support for soft error reporting. + */ int64 -numeric_int8_opt_error(Numeric num, bool *have_error) +numeric_int8_safe(Numeric num, Node *escontext) { NumericVar x; int64 result; - if (have_error) - *have_error = false; - if (NUMERIC_IS_SPECIAL(num)) { - if (have_error) - { - *have_error = true; - return 0; - } + if (NUMERIC_IS_NAN(num)) + ereturn(escontext, 0, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot convert NaN to %s", "bigint"))); else - { - if (NUMERIC_IS_NAN(num)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert NaN to %s", "bigint"))); - else - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert infinity to %s", "bigint"))); - } + ereturn(escontext, 0, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot convert infinity to %s", "bigint"))); } /* Convert to variable format, then convert to int8 */ init_var_from_num(num, &x); if (!numericvar_to_int64(&x, &result)) - { - if (have_error) - { - *have_error = true; - return 0; - } - else - { - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - } - } + ereturn(escontext, 0, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("bigint out of range"))); return result; } @@ -4652,7 +4465,7 @@ numeric_int8(PG_FUNCTION_ARGS) { Numeric num = PG_GETARG_NUMERIC(0); - PG_RETURN_INT64(numeric_int8_opt_error(num, NULL)); + PG_RETURN_INT64(numeric_int8_safe(num, NULL)); } @@ -4901,8 +4714,8 @@ numeric_pg_lsn(PG_FUNCTION_ARGS) * Actually, it's a pointer to a NumericAggState allocated in the aggregate * context. The digit buffers for the NumericVars will be there too. * - * On platforms which support 128-bit integers some aggregates instead use a - * 128-bit integer based transition datatype to speed up calculations. + * For integer inputs, some aggregates use special-purpose 64-bit or 128-bit + * integer based transition datatypes to speed up calculations. * * ---------------------------------------------------------------------- */ @@ -5566,26 +5379,27 @@ numeric_accum_inv(PG_FUNCTION_ARGS) /* - * Integer data types in general use Numeric accumulators to share code - * and avoid risk of overflow. + * Integer data types in general use Numeric accumulators to share code and + * avoid risk of overflow. However for performance reasons optimized + * special-purpose accumulator routines are used when possible: * - * However for performance reasons optimized special-purpose accumulator - * routines are used when possible. + * For 16-bit and 32-bit inputs, N and sum(X) fit into 64-bit, so 64-bit + * accumulators are used for SUM and AVG of these data types. * - * On platforms with 128-bit integer support, the 128-bit routines will be - * used when sum(X) or sum(X*X) fit into 128-bit. + * For 16-bit and 32-bit inputs, sum(X^2) fits into 128-bit, so 128-bit + * accumulators are used for STDDEV_POP, STDDEV_SAMP, VAR_POP, and VAR_SAMP of + * these data types. * - * For 16 and 32 bit inputs, the N and sum(X) fit into 64-bit so the 64-bit - * accumulators will be used for SUM and AVG of these data types. + * For 64-bit inputs, sum(X) fits into 128-bit, so a 128-bit accumulator is + * used for SUM(int8) and AVG(int8). */ -#ifdef HAVE_INT128 typedef struct Int128AggState { bool calcSumX2; /* if true, calculate sumX2 */ int64 N; /* count of processed numbers */ - int128 sumX; /* sum of processed numbers */ - int128 sumX2; /* sum of squares of processed numbers */ + INT128 sumX; /* sum of processed numbers */ + INT128 sumX2; /* sum of squares of processed numbers */ } Int128AggState; /* @@ -5631,12 +5445,12 @@ makeInt128AggStateCurrentContext(bool calcSumX2) * Accumulate a new input value for 128-bit aggregate functions. */ static void -do_int128_accum(Int128AggState *state, int128 newval) +do_int128_accum(Int128AggState *state, int64 newval) { if (state->calcSumX2) - state->sumX2 += newval * newval; + int128_add_int64_mul_int64(&state->sumX2, newval, newval); - state->sumX += newval; + int128_add_int64(&state->sumX, newval); state->N++; } @@ -5644,43 +5458,28 @@ do_int128_accum(Int128AggState *state, int128 newval) * Remove an input value from the aggregated state. */ static void -do_int128_discard(Int128AggState *state, int128 newval) +do_int128_discard(Int128AggState *state, int64 newval) { if (state->calcSumX2) - state->sumX2 -= newval * newval; + int128_sub_int64_mul_int64(&state->sumX2, newval, newval); - state->sumX -= newval; + int128_sub_int64(&state->sumX, newval); state->N--; } -typedef Int128AggState PolyNumAggState; -#define makePolyNumAggState makeInt128AggState -#define makePolyNumAggStateCurrentContext makeInt128AggStateCurrentContext -#else -typedef NumericAggState PolyNumAggState; -#define makePolyNumAggState makeNumericAggState -#define makePolyNumAggStateCurrentContext makeNumericAggStateCurrentContext -#endif - Datum int2_accum(PG_FUNCTION_ARGS) { - PolyNumAggState *state; + Int128AggState *state; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); /* Create the state data on the first call */ if (state == NULL) - state = makePolyNumAggState(fcinfo, true); + state = makeInt128AggState(fcinfo, true); if (!PG_ARGISNULL(1)) - { -#ifdef HAVE_INT128 - do_int128_accum(state, (int128) PG_GETARG_INT16(1)); -#else - do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT16(1))); -#endif - } + do_int128_accum(state, PG_GETARG_INT16(1)); PG_RETURN_POINTER(state); } @@ -5688,22 +5487,16 @@ int2_accum(PG_FUNCTION_ARGS) Datum int4_accum(PG_FUNCTION_ARGS) { - PolyNumAggState *state; + Int128AggState *state; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); /* Create the state data on the first call */ if (state == NULL) - state = makePolyNumAggState(fcinfo, true); + state = makeInt128AggState(fcinfo, true); if (!PG_ARGISNULL(1)) - { -#ifdef HAVE_INT128 - do_int128_accum(state, (int128) PG_GETARG_INT32(1)); -#else - do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT32(1))); -#endif - } + do_int128_accum(state, PG_GETARG_INT32(1)); PG_RETURN_POINTER(state); } @@ -5726,21 +5519,21 @@ int8_accum(PG_FUNCTION_ARGS) } /* - * Combine function for numeric aggregates which require sumX2 + * Combine function for Int128AggState for aggregates which require sumX2 */ Datum numeric_poly_combine(PG_FUNCTION_ARGS) { - PolyNumAggState *state1; - PolyNumAggState *state2; + Int128AggState *state1; + Int128AggState *state2; MemoryContext agg_context; MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); - state1 = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); - state2 = PG_ARGISNULL(1) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(1); + state1 = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); + state2 = PG_ARGISNULL(1) ? NULL : (Int128AggState *) PG_GETARG_POINTER(1); if (state2 == NULL) PG_RETURN_POINTER(state1); @@ -5750,16 +5543,10 @@ numeric_poly_combine(PG_FUNCTION_ARGS) { old_context = MemoryContextSwitchTo(agg_context); - state1 = makePolyNumAggState(fcinfo, true); + state1 = makeInt128AggState(fcinfo, true); state1->N = state2->N; - -#ifdef HAVE_INT128 state1->sumX = state2->sumX; state1->sumX2 = state2->sumX2; -#else - accum_sum_copy(&state1->sumX, &state2->sumX); - accum_sum_copy(&state1->sumX2, &state2->sumX2); -#endif MemoryContextSwitchTo(old_context); @@ -5769,54 +5556,51 @@ numeric_poly_combine(PG_FUNCTION_ARGS) if (state2->N > 0) { state1->N += state2->N; + int128_add_int128(&state1->sumX, state2->sumX); + int128_add_int128(&state1->sumX2, state2->sumX2); + } + PG_RETURN_POINTER(state1); +} -#ifdef HAVE_INT128 - state1->sumX += state2->sumX; - state1->sumX2 += state2->sumX2; -#else - /* The rest of this needs to work in the aggregate context */ - old_context = MemoryContextSwitchTo(agg_context); - - /* Accumulate sums */ - accum_sum_combine(&state1->sumX, &state2->sumX); - accum_sum_combine(&state1->sumX2, &state2->sumX2); +/* + * int128_serialize - serialize a 128-bit integer to binary format + */ +static inline void +int128_serialize(StringInfo buf, INT128 val) +{ + pq_sendint64(buf, PG_INT128_HI_INT64(val)); + pq_sendint64(buf, PG_INT128_LO_UINT64(val)); +} - MemoryContextSwitchTo(old_context); -#endif +/* + * int128_deserialize - deserialize binary format to a 128-bit integer. + */ +static inline INT128 +int128_deserialize(StringInfo buf) +{ + int64 hi = pq_getmsgint64(buf); + uint64 lo = pq_getmsgint64(buf); - } - PG_RETURN_POINTER(state1); + return make_int128(hi, lo); } /* * numeric_poly_serialize - * Serialize PolyNumAggState into bytea for aggregate functions which + * Serialize Int128AggState into bytea for aggregate functions which * require sumX2. */ Datum numeric_poly_serialize(PG_FUNCTION_ARGS) { - PolyNumAggState *state; + Int128AggState *state; StringInfoData buf; bytea *result; - NumericVar tmp_var; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); - state = (PolyNumAggState *) PG_GETARG_POINTER(0); - - /* - * If the platform supports int128 then sumX and sumX2 will be a 128 bit - * integer type. Here we'll convert that into a numeric type so that the - * combine state is in the same format for both int128 enabled machines - * and machines which don't support that type. The logic here is that one - * day we might like to send these over to another server for further - * processing and we want a standard format to work with. - */ - - init_var(&tmp_var); + state = (Int128AggState *) PG_GETARG_POINTER(0); pq_begintypsend(&buf); @@ -5824,48 +5608,33 @@ numeric_poly_serialize(PG_FUNCTION_ARGS) pq_sendint64(&buf, state->N); /* sumX */ -#ifdef HAVE_INT128 - int128_to_numericvar(state->sumX, &tmp_var); -#else - accum_sum_final(&state->sumX, &tmp_var); -#endif - numericvar_serialize(&buf, &tmp_var); + int128_serialize(&buf, state->sumX); /* sumX2 */ -#ifdef HAVE_INT128 - int128_to_numericvar(state->sumX2, &tmp_var); -#else - accum_sum_final(&state->sumX2, &tmp_var); -#endif - numericvar_serialize(&buf, &tmp_var); + int128_serialize(&buf, state->sumX2); result = pq_endtypsend(&buf); - free_var(&tmp_var); - PG_RETURN_BYTEA_P(result); } /* * numeric_poly_deserialize - * Deserialize PolyNumAggState from bytea for aggregate functions which + * Deserialize Int128AggState from bytea for aggregate functions which * require sumX2. */ Datum numeric_poly_deserialize(PG_FUNCTION_ARGS) { bytea *sstate; - PolyNumAggState *result; + Int128AggState *result; StringInfoData buf; - NumericVar tmp_var; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); sstate = PG_GETARG_BYTEA_PP(0); - init_var(&tmp_var); - /* * Initialize a StringInfo so that we can "receive" it using the standard * recv-function infrastructure. @@ -5873,31 +5642,19 @@ numeric_poly_deserialize(PG_FUNCTION_ARGS) initReadOnlyStringInfo(&buf, VARDATA_ANY(sstate), VARSIZE_ANY_EXHDR(sstate)); - result = makePolyNumAggStateCurrentContext(false); + result = makeInt128AggStateCurrentContext(false); /* N */ result->N = pq_getmsgint64(&buf); /* sumX */ - numericvar_deserialize(&buf, &tmp_var); -#ifdef HAVE_INT128 - numericvar_to_int128(&tmp_var, &result->sumX); -#else - accum_sum_add(&result->sumX, &tmp_var); -#endif + result->sumX = int128_deserialize(&buf); /* sumX2 */ - numericvar_deserialize(&buf, &tmp_var); -#ifdef HAVE_INT128 - numericvar_to_int128(&tmp_var, &result->sumX2); -#else - accum_sum_add(&result->sumX2, &tmp_var); -#endif + result->sumX2 = int128_deserialize(&buf); pq_getmsgend(&buf); - free_var(&tmp_var); - PG_RETURN_POINTER(result); } @@ -5907,43 +5664,37 @@ numeric_poly_deserialize(PG_FUNCTION_ARGS) Datum int8_avg_accum(PG_FUNCTION_ARGS) { - PolyNumAggState *state; + Int128AggState *state; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); /* Create the state data on the first call */ if (state == NULL) - state = makePolyNumAggState(fcinfo, false); + state = makeInt128AggState(fcinfo, false); if (!PG_ARGISNULL(1)) - { -#ifdef HAVE_INT128 - do_int128_accum(state, (int128) PG_GETARG_INT64(1)); -#else - do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT64(1))); -#endif - } + do_int128_accum(state, PG_GETARG_INT64(1)); PG_RETURN_POINTER(state); } /* - * Combine function for PolyNumAggState for aggregates which don't require + * Combine function for Int128AggState for aggregates which don't require * sumX2 */ Datum int8_avg_combine(PG_FUNCTION_ARGS) { - PolyNumAggState *state1; - PolyNumAggState *state2; + Int128AggState *state1; + Int128AggState *state2; MemoryContext agg_context; MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); - state1 = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); - state2 = PG_ARGISNULL(1) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(1); + state1 = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); + state2 = PG_ARGISNULL(1) ? NULL : (Int128AggState *) PG_GETARG_POINTER(1); if (state2 == NULL) PG_RETURN_POINTER(state1); @@ -5953,14 +5704,10 @@ int8_avg_combine(PG_FUNCTION_ARGS) { old_context = MemoryContextSwitchTo(agg_context); - state1 = makePolyNumAggState(fcinfo, false); + state1 = makeInt128AggState(fcinfo, false); state1->N = state2->N; - -#ifdef HAVE_INT128 state1->sumX = state2->sumX; -#else - accum_sum_copy(&state1->sumX, &state2->sumX); -#endif + MemoryContextSwitchTo(old_context); PG_RETURN_POINTER(state1); @@ -5969,52 +5716,28 @@ int8_avg_combine(PG_FUNCTION_ARGS) if (state2->N > 0) { state1->N += state2->N; - -#ifdef HAVE_INT128 - state1->sumX += state2->sumX; -#else - /* The rest of this needs to work in the aggregate context */ - old_context = MemoryContextSwitchTo(agg_context); - - /* Accumulate sums */ - accum_sum_combine(&state1->sumX, &state2->sumX); - - MemoryContextSwitchTo(old_context); -#endif - + int128_add_int128(&state1->sumX, state2->sumX); } PG_RETURN_POINTER(state1); } /* * int8_avg_serialize - * Serialize PolyNumAggState into bytea using the standard - * recv-function infrastructure. + * Serialize Int128AggState into bytea for aggregate functions which + * don't require sumX2. */ Datum int8_avg_serialize(PG_FUNCTION_ARGS) { - PolyNumAggState *state; + Int128AggState *state; StringInfoData buf; bytea *result; - NumericVar tmp_var; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); - state = (PolyNumAggState *) PG_GETARG_POINTER(0); - - /* - * If the platform supports int128 then sumX will be a 128 integer type. - * Here we'll convert that into a numeric type so that the combine state - * is in the same format for both int128 enabled machines and machines - * which don't support that type. The logic here is that one day we might - * like to send these over to another server for further processing and we - * want a standard format to work with. - */ - - init_var(&tmp_var); + state = (Int128AggState *) PG_GETARG_POINTER(0); pq_begintypsend(&buf); @@ -6022,39 +5745,30 @@ int8_avg_serialize(PG_FUNCTION_ARGS) pq_sendint64(&buf, state->N); /* sumX */ -#ifdef HAVE_INT128 - int128_to_numericvar(state->sumX, &tmp_var); -#else - accum_sum_final(&state->sumX, &tmp_var); -#endif - numericvar_serialize(&buf, &tmp_var); + int128_serialize(&buf, state->sumX); result = pq_endtypsend(&buf); - free_var(&tmp_var); - PG_RETURN_BYTEA_P(result); } /* * int8_avg_deserialize - * Deserialize bytea back into PolyNumAggState. + * Deserialize Int128AggState from bytea for aggregate functions which + * don't require sumX2. */ Datum int8_avg_deserialize(PG_FUNCTION_ARGS) { bytea *sstate; - PolyNumAggState *result; + Int128AggState *result; StringInfoData buf; - NumericVar tmp_var; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); sstate = PG_GETARG_BYTEA_PP(0); - init_var(&tmp_var); - /* * Initialize a StringInfo so that we can "receive" it using the standard * recv-function infrastructure. @@ -6062,23 +5776,16 @@ int8_avg_deserialize(PG_FUNCTION_ARGS) initReadOnlyStringInfo(&buf, VARDATA_ANY(sstate), VARSIZE_ANY_EXHDR(sstate)); - result = makePolyNumAggStateCurrentContext(false); + result = makeInt128AggStateCurrentContext(false); /* N */ result->N = pq_getmsgint64(&buf); /* sumX */ - numericvar_deserialize(&buf, &tmp_var); -#ifdef HAVE_INT128 - numericvar_to_int128(&tmp_var, &result->sumX); -#else - accum_sum_add(&result->sumX, &tmp_var); -#endif + result->sumX = int128_deserialize(&buf); pq_getmsgend(&buf); - free_var(&tmp_var); - PG_RETURN_POINTER(result); } @@ -6089,24 +5796,16 @@ int8_avg_deserialize(PG_FUNCTION_ARGS) Datum int2_accum_inv(PG_FUNCTION_ARGS) { - PolyNumAggState *state; + Int128AggState *state; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); /* Should not get here with no state */ if (state == NULL) elog(ERROR, "int2_accum_inv called with NULL state"); if (!PG_ARGISNULL(1)) - { -#ifdef HAVE_INT128 - do_int128_discard(state, (int128) PG_GETARG_INT16(1)); -#else - /* Should never fail, all inputs have dscale 0 */ - if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT16(1)))) - elog(ERROR, "do_numeric_discard failed unexpectedly"); -#endif - } + do_int128_discard(state, PG_GETARG_INT16(1)); PG_RETURN_POINTER(state); } @@ -6114,24 +5813,16 @@ int2_accum_inv(PG_FUNCTION_ARGS) Datum int4_accum_inv(PG_FUNCTION_ARGS) { - PolyNumAggState *state; + Int128AggState *state; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); /* Should not get here with no state */ if (state == NULL) elog(ERROR, "int4_accum_inv called with NULL state"); if (!PG_ARGISNULL(1)) - { -#ifdef HAVE_INT128 - do_int128_discard(state, (int128) PG_GETARG_INT32(1)); -#else - /* Should never fail, all inputs have dscale 0 */ - if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT32(1)))) - elog(ERROR, "do_numeric_discard failed unexpectedly"); -#endif - } + do_int128_discard(state, PG_GETARG_INT32(1)); PG_RETURN_POINTER(state); } @@ -6160,24 +5851,16 @@ int8_accum_inv(PG_FUNCTION_ARGS) Datum int8_avg_accum_inv(PG_FUNCTION_ARGS) { - PolyNumAggState *state; + Int128AggState *state; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); /* Should not get here with no state */ if (state == NULL) elog(ERROR, "int8_avg_accum_inv called with NULL state"); if (!PG_ARGISNULL(1)) - { -#ifdef HAVE_INT128 - do_int128_discard(state, (int128) PG_GETARG_INT64(1)); -#else - /* Should never fail, all inputs have dscale 0 */ - if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT64(1)))) - elog(ERROR, "do_numeric_discard failed unexpectedly"); -#endif - } + do_int128_discard(state, PG_GETARG_INT64(1)); PG_RETURN_POINTER(state); } @@ -6185,12 +5868,11 @@ int8_avg_accum_inv(PG_FUNCTION_ARGS) Datum numeric_poly_sum(PG_FUNCTION_ARGS) { -#ifdef HAVE_INT128 - PolyNumAggState *state; + Int128AggState *state; Numeric res; NumericVar result; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); /* If there were no non-null inputs, return NULL */ if (state == NULL || state->N == 0) @@ -6205,21 +5887,17 @@ numeric_poly_sum(PG_FUNCTION_ARGS) free_var(&result); PG_RETURN_NUMERIC(res); -#else - return numeric_sum(fcinfo); -#endif } Datum numeric_poly_avg(PG_FUNCTION_ARGS) { -#ifdef HAVE_INT128 - PolyNumAggState *state; + Int128AggState *state; NumericVar result; Datum countd, sumd; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); /* If there were no non-null inputs, return NULL */ if (state == NULL || state->N == 0) @@ -6235,9 +5913,6 @@ numeric_poly_avg(PG_FUNCTION_ARGS) free_var(&result); PG_RETURN_DATUM(DirectFunctionCall2(numeric_div, sumd, countd)); -#else - return numeric_avg(fcinfo); -#endif } Datum @@ -6470,7 +6145,6 @@ numeric_stddev_pop(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(res); } -#ifdef HAVE_INT128 static Numeric numeric_poly_stddev_internal(Int128AggState *state, bool variance, bool sample, @@ -6514,17 +6188,15 @@ numeric_poly_stddev_internal(Int128AggState *state, return res; } -#endif Datum numeric_poly_var_samp(PG_FUNCTION_ARGS) { -#ifdef HAVE_INT128 - PolyNumAggState *state; + Int128AggState *state; Numeric res; bool is_null; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); res = numeric_poly_stddev_internal(state, true, true, &is_null); @@ -6532,20 +6204,16 @@ numeric_poly_var_samp(PG_FUNCTION_ARGS) PG_RETURN_NULL(); else PG_RETURN_NUMERIC(res); -#else - return numeric_var_samp(fcinfo); -#endif } Datum numeric_poly_stddev_samp(PG_FUNCTION_ARGS) { -#ifdef HAVE_INT128 - PolyNumAggState *state; + Int128AggState *state; Numeric res; bool is_null; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); res = numeric_poly_stddev_internal(state, false, true, &is_null); @@ -6553,20 +6221,16 @@ numeric_poly_stddev_samp(PG_FUNCTION_ARGS) PG_RETURN_NULL(); else PG_RETURN_NUMERIC(res); -#else - return numeric_stddev_samp(fcinfo); -#endif } Datum numeric_poly_var_pop(PG_FUNCTION_ARGS) { -#ifdef HAVE_INT128 - PolyNumAggState *state; + Int128AggState *state; Numeric res; bool is_null; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); res = numeric_poly_stddev_internal(state, true, false, &is_null); @@ -6574,20 +6238,16 @@ numeric_poly_var_pop(PG_FUNCTION_ARGS) PG_RETURN_NULL(); else PG_RETURN_NUMERIC(res); -#else - return numeric_var_pop(fcinfo); -#endif } Datum numeric_poly_stddev_pop(PG_FUNCTION_ARGS) { -#ifdef HAVE_INT128 - PolyNumAggState *state; + Int128AggState *state; Numeric res; bool is_null; - state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0); + state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0); res = numeric_poly_stddev_internal(state, false, false, &is_null); @@ -6595,9 +6255,6 @@ numeric_poly_stddev_pop(PG_FUNCTION_ARGS) PG_RETURN_NULL(); else PG_RETURN_NUMERIC(res); -#else - return numeric_stddev_pop(fcinfo); -#endif } /* @@ -6623,6 +6280,7 @@ numeric_poly_stddev_pop(PG_FUNCTION_ARGS) Datum int2_sum(PG_FUNCTION_ARGS) { + int64 oldsum; int64 newval; if (PG_ARGISNULL(0)) @@ -6635,43 +6293,22 @@ int2_sum(PG_FUNCTION_ARGS) PG_RETURN_INT64(newval); } - /* - * If we're invoked as an aggregate, we can cheat and modify our first - * parameter in-place to avoid palloc overhead. If not, we need to return - * the new value of the transition variable. (If int8 is pass-by-value, - * then of course this is useless as well as incorrect, so just ifdef it - * out.) - */ -#ifndef USE_FLOAT8_BYVAL /* controls int8 too */ - if (AggCheckCallContext(fcinfo, NULL)) - { - int64 *oldsum = (int64 *) PG_GETARG_POINTER(0); - - /* Leave the running sum unchanged in the new input is null */ - if (!PG_ARGISNULL(1)) - *oldsum = *oldsum + (int64) PG_GETARG_INT16(1); + oldsum = PG_GETARG_INT64(0); - PG_RETURN_POINTER(oldsum); - } - else -#endif - { - int64 oldsum = PG_GETARG_INT64(0); - - /* Leave sum unchanged if new input is null. */ - if (PG_ARGISNULL(1)) - PG_RETURN_INT64(oldsum); + /* Leave sum unchanged if new input is null. */ + if (PG_ARGISNULL(1)) + PG_RETURN_INT64(oldsum); - /* OK to do the addition. */ - newval = oldsum + (int64) PG_GETARG_INT16(1); + /* OK to do the addition. */ + newval = oldsum + (int64) PG_GETARG_INT16(1); - PG_RETURN_INT64(newval); - } + PG_RETURN_INT64(newval); } Datum int4_sum(PG_FUNCTION_ARGS) { + int64 oldsum; int64 newval; if (PG_ARGISNULL(0)) @@ -6684,38 +6321,16 @@ int4_sum(PG_FUNCTION_ARGS) PG_RETURN_INT64(newval); } - /* - * If we're invoked as an aggregate, we can cheat and modify our first - * parameter in-place to avoid palloc overhead. If not, we need to return - * the new value of the transition variable. (If int8 is pass-by-value, - * then of course this is useless as well as incorrect, so just ifdef it - * out.) - */ -#ifndef USE_FLOAT8_BYVAL /* controls int8 too */ - if (AggCheckCallContext(fcinfo, NULL)) - { - int64 *oldsum = (int64 *) PG_GETARG_POINTER(0); - - /* Leave the running sum unchanged in the new input is null */ - if (!PG_ARGISNULL(1)) - *oldsum = *oldsum + (int64) PG_GETARG_INT32(1); - - PG_RETURN_POINTER(oldsum); - } - else -#endif - { - int64 oldsum = PG_GETARG_INT64(0); + oldsum = PG_GETARG_INT64(0); - /* Leave sum unchanged if new input is null. */ - if (PG_ARGISNULL(1)) - PG_RETURN_INT64(oldsum); + /* Leave sum unchanged if new input is null. */ + if (PG_ARGISNULL(1)) + PG_RETURN_INT64(oldsum); - /* OK to do the addition. */ - newval = oldsum + (int64) PG_GETARG_INT32(1); + /* OK to do the addition. */ + newval = oldsum + (int64) PG_GETARG_INT32(1); - PG_RETURN_INT64(newval); - } + PG_RETURN_INT64(newval); } /* @@ -7886,16 +7501,13 @@ duplicate_numeric(Numeric num) } /* - * make_result_opt_error() - + * make_result_safe() - * * Create the packed db numeric format in palloc()'d memory from * a variable. This will handle NaN and Infinity cases. - * - * If "have_error" isn't NULL, on overflow *have_error is set to true and - * NULL is returned. This is helpful when caller needs to handle errors. */ static Numeric -make_result_opt_error(const NumericVar *var, bool *have_error) +make_result_safe(const NumericVar *var, Node *escontext) { Numeric result; NumericDigit *digits = var->digits; @@ -7904,9 +7516,6 @@ make_result_opt_error(const NumericVar *var, bool *have_error) int n; Size len; - if (have_error) - *have_error = false; - if ((sign & NUMERIC_SIGN_MASK) == NUMERIC_SPECIAL) { /* @@ -7979,19 +7588,9 @@ make_result_opt_error(const NumericVar *var, bool *have_error) /* Check for overflow of int16 fields */ if (NUMERIC_WEIGHT(result) != weight || NUMERIC_DSCALE(result) != var->dscale) - { - if (have_error) - { - *have_error = true; - return NULL; - } - else - { - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("value overflows numeric format"))); - } - } + ereturn(escontext, NULL, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("value overflows numeric format"))); dump_numeric("make_result()", result); return result; @@ -8001,12 +7600,12 @@ make_result_opt_error(const NumericVar *var, bool *have_error) /* * make_result() - * - * An interface to make_result_opt_error() without "have_error" argument. + * An interface to make_result_safe() without "escontext" argument. */ static Numeric make_result(const NumericVar *var) { - return make_result_opt_error(var, NULL); + return make_result_safe(var, NULL); } @@ -8330,105 +7929,23 @@ numericvar_to_uint64(const NumericVar *var, uint64 *result) return true; } -#ifdef HAVE_INT128 -/* - * Convert numeric to int128, rounding if needed. - * - * If overflow, return false (no error is raised). Return true if okay. - */ -static bool -numericvar_to_int128(const NumericVar *var, int128 *result) -{ - NumericDigit *digits; - int ndigits; - int weight; - int i; - int128 val, - oldval; - bool neg; - NumericVar rounded; - - /* Round to nearest integer */ - init_var(&rounded); - set_var_from_var(var, &rounded); - round_var(&rounded, 0); - - /* Check for zero input */ - strip_var(&rounded); - ndigits = rounded.ndigits; - if (ndigits == 0) - { - *result = 0; - free_var(&rounded); - return true; - } - - /* - * For input like 10000000000, we must treat stripped digits as real. So - * the loop assumes there are weight+1 digits before the decimal point. - */ - weight = rounded.weight; - Assert(weight >= 0 && ndigits <= weight + 1); - - /* Construct the result */ - digits = rounded.digits; - neg = (rounded.sign == NUMERIC_NEG); - val = digits[0]; - for (i = 1; i <= weight; i++) - { - oldval = val; - val *= NBASE; - if (i < ndigits) - val += digits[i]; - - /* - * The overflow check is a bit tricky because we want to accept - * INT128_MIN, which will overflow the positive accumulator. We can - * detect this case easily though because INT128_MIN is the only - * nonzero value for which -val == val (on a two's complement machine, - * anyway). - */ - if ((val / NBASE) != oldval) /* possible overflow? */ - { - if (!neg || (-val) != val || val == 0 || oldval < 0) - { - free_var(&rounded); - return false; - } - } - } - - free_var(&rounded); - - *result = neg ? -val : val; - return true; -} - /* * Convert 128 bit integer to numeric. */ static void -int128_to_numericvar(int128 val, NumericVar *var) +int128_to_numericvar(INT128 val, NumericVar *var) { - uint128 uval, - newuval; + int sign; NumericDigit *ptr; int ndigits; + int32 dig; /* int128 can require at most 39 decimal digits; add one for safety */ alloc_var(var, 40 / DEC_DIGITS); - if (val < 0) - { - var->sign = NUMERIC_NEG; - uval = -val; - } - else - { - var->sign = NUMERIC_POS; - uval = val; - } + sign = int128_sign(val); + var->sign = sign < 0 ? NUMERIC_NEG : NUMERIC_POS; var->dscale = 0; - if (val == 0) + if (sign == 0) { var->ndigits = 0; var->weight = 0; @@ -8440,15 +7957,13 @@ int128_to_numericvar(int128 val, NumericVar *var) { ptr--; ndigits++; - newuval = uval / NBASE; - *ptr = uval - newuval * NBASE; - uval = newuval; - } while (uval); + int128_div_mod_int32(&val, NBASE, &dig); + *ptr = (NumericDigit) abs(dig); + } while (!int128_is_zero(val)); var->digits = ptr; var->ndigits = ndigits; var->weight = ndigits - 1; } -#endif /* * Convert a NumericVar to float8; if out of range, return +/- HUGE_VAL diff --git a/src/backend/utils/adt/orderedsetaggs.c b/src/backend/utils/adt/orderedsetaggs.c index 9457d23971581..c41b191be6217 100644 --- a/src/backend/utils/adt/orderedsetaggs.c +++ b/src/backend/utils/adt/orderedsetaggs.c @@ -1007,7 +1007,7 @@ percentile_cont_float8_multi_final(PG_FUNCTION_ARGS) FLOAT8OID, /* hard-wired info on type float8 */ sizeof(float8), - FLOAT8PASSBYVAL, + true, TYPALIGN_DOUBLE, float8_lerp); } diff --git a/src/backend/utils/adt/pg_lsn.c b/src/backend/utils/adt/pg_lsn.c index 12de2446f5b69..e1ec5f3bc69cf 100644 --- a/src/backend/utils/adt/pg_lsn.c +++ b/src/backend/utils/adt/pg_lsn.c @@ -25,8 +25,11 @@ * Formatting and conversion routines. *---------------------------------------------------------*/ +/* + * Internal version of pg_lsn_in() with support for soft error reporting. + */ XLogRecPtr -pg_lsn_in_internal(const char *str, bool *have_error) +pg_lsn_in_safe(const char *str, Node *escontext) { int len1, len2; @@ -34,22 +37,14 @@ pg_lsn_in_internal(const char *str, bool *have_error) off; XLogRecPtr result; - Assert(have_error != NULL); - *have_error = false; - /* Sanity check input format. */ len1 = strspn(str, "0123456789abcdefABCDEF"); if (len1 < 1 || len1 > MAXPG_LSNCOMPONENT || str[len1] != '/') - { - *have_error = true; - return InvalidXLogRecPtr; - } + goto syntax_error; + len2 = strspn(str + len1 + 1, "0123456789abcdefABCDEF"); if (len2 < 1 || len2 > MAXPG_LSNCOMPONENT || str[len1 + 1 + len2] != '\0') - { - *have_error = true; - return InvalidXLogRecPtr; - } + goto syntax_error; /* Decode result. */ id = (uint32) strtoul(str, NULL, 16); @@ -57,6 +52,12 @@ pg_lsn_in_internal(const char *str, bool *have_error) result = ((uint64) id << 32) | off; return result; + +syntax_error: + ereturn(escontext, InvalidXLogRecPtr, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type %s: \"%s\"", + "pg_lsn", str))); } Datum @@ -64,14 +65,8 @@ pg_lsn_in(PG_FUNCTION_ARGS) { char *str = PG_GETARG_CSTRING(0); XLogRecPtr result; - bool have_error = false; - - result = pg_lsn_in_internal(str, &have_error); - if (have_error) - ereturn(fcinfo->context, (Datum) 0, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type %s: \"%s\"", - "pg_lsn", str))); + + result = pg_lsn_in_safe(str, fcinfo->context); PG_RETURN_LSN(result); } diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 1c12ddbae493c..c756c2bebaaa0 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -2171,7 +2171,7 @@ pg_stat_get_replication_slot(PG_FUNCTION_ARGS) Datum pg_stat_get_subscription_stats(PG_FUNCTION_ARGS) { -#define PG_STAT_GET_SUBSCRIPTION_STATS_COLS 11 +#define PG_STAT_GET_SUBSCRIPTION_STATS_COLS 12 Oid subid = PG_GETARG_OID(0); TupleDesc tupdesc; Datum values[PG_STAT_GET_SUBSCRIPTION_STATS_COLS] = {0}; @@ -2197,15 +2197,17 @@ pg_stat_get_subscription_stats(PG_FUNCTION_ARGS) INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 6, "confl_update_exists", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 7, "confl_update_missing", + TupleDescInitEntry(tupdesc, (AttrNumber) 7, "confl_update_deleted", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 8, "confl_delete_origin_differs", + TupleDescInitEntry(tupdesc, (AttrNumber) 8, "confl_update_missing", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 9, "confl_delete_missing", + TupleDescInitEntry(tupdesc, (AttrNumber) 9, "confl_delete_origin_differs", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 10, "confl_multiple_unique_conflicts", + TupleDescInitEntry(tupdesc, (AttrNumber) 10, "confl_delete_missing", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 11, "stats_reset", + TupleDescInitEntry(tupdesc, (AttrNumber) 11, "confl_multiple_unique_conflicts", + INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 12, "stats_reset", TIMESTAMPTZOID, -1, 0); BlessTupleDesc(tupdesc); diff --git a/src/backend/utils/adt/pseudorandomfuncs.c b/src/backend/utils/adt/pseudorandomfuncs.c index e7b8045f92508..1d2a981491bf5 100644 --- a/src/backend/utils/adt/pseudorandomfuncs.c +++ b/src/backend/utils/adt/pseudorandomfuncs.c @@ -17,6 +17,7 @@ #include "common/pg_prng.h" #include "miscadmin.h" +#include "utils/date.h" #include "utils/fmgrprotos.h" #include "utils/numeric.h" #include "utils/timestamp.h" @@ -25,6 +26,18 @@ static pg_prng_state prng_state; static bool prng_seed_set = false; +/* + * Macro for checking the range bounds of random(min, max) functions. Throws + * an error if they're the wrong way round. + */ +#define CHECK_RANGE_BOUNDS(rmin, rmax) \ + do { \ + if ((rmin) > (rmax)) \ + ereport(ERROR, \ + errcode(ERRCODE_INVALID_PARAMETER_VALUE), \ + errmsg("lower bound must be less than or equal to upper bound")); \ + } while (0) + /* * initialize_prng() - * @@ -129,10 +142,7 @@ int4random(PG_FUNCTION_ARGS) int32 rmax = PG_GETARG_INT32(1); int32 result; - if (rmin > rmax) - ereport(ERROR, - errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("lower bound must be less than or equal to upper bound")); + CHECK_RANGE_BOUNDS(rmin, rmax); initialize_prng(); @@ -153,10 +163,7 @@ int8random(PG_FUNCTION_ARGS) int64 rmax = PG_GETARG_INT64(1); int64 result; - if (rmin > rmax) - ereport(ERROR, - errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("lower bound must be less than or equal to upper bound")); + CHECK_RANGE_BOUNDS(rmin, rmax); initialize_prng(); @@ -177,9 +184,90 @@ numeric_random(PG_FUNCTION_ARGS) Numeric rmax = PG_GETARG_NUMERIC(1); Numeric result; + /* Leave range bound checking to random_numeric() */ + initialize_prng(); result = random_numeric(&prng_state, rmin, rmax); PG_RETURN_NUMERIC(result); } + + +/* + * date_random() - + * + * Returns a random date chosen uniformly in the specified range. + */ +Datum +date_random(PG_FUNCTION_ARGS) +{ + int32 rmin = (int32) PG_GETARG_DATEADT(0); + int32 rmax = (int32) PG_GETARG_DATEADT(1); + DateADT result; + + CHECK_RANGE_BOUNDS(rmin, rmax); + + if (DATE_IS_NOBEGIN(rmin) || DATE_IS_NOEND(rmax)) + ereport(ERROR, + errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("lower and upper bounds must be finite")); + + initialize_prng(); + + result = (DateADT) pg_prng_int64_range(&prng_state, rmin, rmax); + + PG_RETURN_DATEADT(result); +} + +/* + * timestamp_random() - + * + * Returns a random timestamp chosen uniformly in the specified range. + */ +Datum +timestamp_random(PG_FUNCTION_ARGS) +{ + int64 rmin = (int64) PG_GETARG_TIMESTAMP(0); + int64 rmax = (int64) PG_GETARG_TIMESTAMP(1); + Timestamp result; + + CHECK_RANGE_BOUNDS(rmin, rmax); + + if (TIMESTAMP_IS_NOBEGIN(rmin) || TIMESTAMP_IS_NOEND(rmax)) + ereport(ERROR, + errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("lower and upper bounds must be finite")); + + initialize_prng(); + + result = (Timestamp) pg_prng_int64_range(&prng_state, rmin, rmax); + + PG_RETURN_TIMESTAMP(result); +} + +/* + * timestamptz_random() - + * + * Returns a random timestamptz chosen uniformly in the specified range. + */ +Datum +timestamptz_random(PG_FUNCTION_ARGS) +{ + int64 rmin = (int64) PG_GETARG_TIMESTAMPTZ(0); + int64 rmax = (int64) PG_GETARG_TIMESTAMPTZ(1); + TimestampTz result; + + CHECK_RANGE_BOUNDS(rmin, rmax); + + if (TIMESTAMP_IS_NOBEGIN(rmin) || TIMESTAMP_IS_NOEND(rmax)) + ereport(ERROR, + errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("lower and upper bounds must be finite")); + + initialize_prng(); + + result = (TimestampTz) pg_prng_int64_range(&prng_state, rmin, rmax); + + PG_RETURN_TIMESTAMPTZ(result); +} diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c index 66cc0acf4a712..18e467bccd3a0 100644 --- a/src/backend/utils/adt/rangetypes.c +++ b/src/backend/utils/adt/rangetypes.c @@ -285,8 +285,7 @@ range_send(PG_FUNCTION_ARGS) if (RANGE_HAS_LBOUND(flags)) { - Datum bound = PointerGetDatum(SendFunctionCall(&cache->typioproc, - lower.val)); + bytea *bound = SendFunctionCall(&cache->typioproc, lower.val); uint32 bound_len = VARSIZE(bound) - VARHDRSZ; char *bound_data = VARDATA(bound); @@ -296,8 +295,7 @@ range_send(PG_FUNCTION_ARGS) if (RANGE_HAS_UBOUND(flags)) { - Datum bound = PointerGetDatum(SendFunctionCall(&cache->typioproc, - upper.val)); + bytea *bound = SendFunctionCall(&cache->typioproc, upper.val); uint32 bound_len = VARSIZE(bound) - VARHDRSZ; char *bound_data = VARDATA(bound); @@ -1077,8 +1075,8 @@ range_union_internal(TypeCacheEntry *typcache, RangeType *r1, RangeType *r2, return r1; if (strict && - !DatumGetBool(range_overlaps_internal(typcache, r1, r2)) && - !DatumGetBool(range_adjacent_internal(typcache, r1, r2))) + !range_overlaps_internal(typcache, r1, r2) && + !range_adjacent_internal(typcache, r1, r2)) ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("result of range union would not be contiguous"))); @@ -1345,9 +1343,9 @@ range_fast_cmp(Datum a, Datum b, SortSupport ssup) cmp = range_cmp_bounds(typcache, &upper1, &upper2); } - if ((Datum) range_a != a) + if ((Pointer) range_a != DatumGetPointer(a)) pfree(range_a); - if ((Datum) range_b != b) + if ((Pointer) range_b != DatumGetPointer(b)) pfree(range_b); return cmp; @@ -1358,7 +1356,7 @@ range_fast_cmp(Datum a, Datum b, SortSupport ssup) Datum range_lt(PG_FUNCTION_ARGS) { - int cmp = range_cmp(fcinfo); + int cmp = DatumGetInt32(range_cmp(fcinfo)); PG_RETURN_BOOL(cmp < 0); } @@ -1366,7 +1364,7 @@ range_lt(PG_FUNCTION_ARGS) Datum range_le(PG_FUNCTION_ARGS) { - int cmp = range_cmp(fcinfo); + int cmp = DatumGetInt32(range_cmp(fcinfo)); PG_RETURN_BOOL(cmp <= 0); } @@ -1374,7 +1372,7 @@ range_le(PG_FUNCTION_ARGS) Datum range_ge(PG_FUNCTION_ARGS) { - int cmp = range_cmp(fcinfo); + int cmp = DatumGetInt32(range_cmp(fcinfo)); PG_RETURN_BOOL(cmp >= 0); } @@ -1382,7 +1380,7 @@ range_ge(PG_FUNCTION_ARGS) Datum range_gt(PG_FUNCTION_ARGS) { - int cmp = range_cmp(fcinfo); + int cmp = DatumGetInt32(range_cmp(fcinfo)); PG_RETURN_BOOL(cmp > 0); } @@ -1444,7 +1442,7 @@ hash_range(PG_FUNCTION_ARGS) upper_hash = 0; /* Merge hashes of flags and bounds */ - result = hash_uint32((uint32) flags); + result = hash_bytes_uint32((uint32) flags); result ^= lower_hash; result = pg_rotate_left32(result, 1); result ^= upper_hash; diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c index 9b6d7061a1812..be51965488088 100644 --- a/src/backend/utils/adt/rangetypes_spgist.c +++ b/src/backend/utils/adt/rangetypes_spgist.c @@ -757,7 +757,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) * because it's range */ previousCentroid = datumCopy(in->prefixDatum, false, -1); - out->traversalValues[out->nNodes] = (void *) previousCentroid; + out->traversalValues[out->nNodes] = DatumGetPointer(previousCentroid); } out->nodeNumbers[out->nNodes] = i - 1; out->nNodes++; diff --git a/src/backend/utils/adt/rangetypes_typanalyze.c b/src/backend/utils/adt/rangetypes_typanalyze.c index a18196d8a34a5..36e885af2dd17 100644 --- a/src/backend/utils/adt/rangetypes_typanalyze.c +++ b/src/backend/utils/adt/rangetypes_typanalyze.c @@ -397,7 +397,7 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, stats->numvalues[slot_idx] = num_hist; stats->statypid[slot_idx] = FLOAT8OID; stats->statyplen[slot_idx] = sizeof(float8); - stats->statypbyval[slot_idx] = FLOAT8PASSBYVAL; + stats->statypbyval[slot_idx] = true; stats->statypalign[slot_idx] = 'd'; /* Store the fraction of empty ranges */ diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index b8bbe95e82eb8..af17a3421a02d 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -25,12 +25,12 @@ #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_collation.h" +#include "catalog/pg_database.h" #include "catalog/pg_operator.h" #include "catalog/pg_proc.h" #include "catalog/pg_ts_config.h" #include "catalog/pg_ts_dict.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "lib/stringinfo.h" #include "mb/pg_wchar.h" #include "miscadmin.h" diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c index fe5edc0027da3..9e5449f17d7c0 100644 --- a/src/backend/utils/adt/rowtypes.c +++ b/src/backend/utils/adt/rowtypes.c @@ -1529,9 +1529,9 @@ record_image_cmp(FunctionCallInfo fcinfo) if ((cmpresult == 0) && (len1 != len2)) cmpresult = (len1 < len2) ? -1 : 1; - if ((Pointer) arg1val != (Pointer) values1[i1]) + if ((Pointer) arg1val != DatumGetPointer(values1[i1])) pfree(arg1val); - if ((Pointer) arg2val != (Pointer) values2[i2]) + if ((Pointer) arg2val != DatumGetPointer(values2[i2])) pfree(arg2val); } else diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 17fbfa9b41063..1c480cfaaf781 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -5288,8 +5288,8 @@ ReleaseDummy(HeapTuple tuple) * unique for this query. (Caution: this should be trusted for * statistical purposes only, since we do not check indimmediate nor * verify that the exact same definition of equality applies.) - * acl_ok: true if current user has permission to read the column(s) - * underlying the pg_statistic entry. This is consulted by + * acl_ok: true if current user has permission to read all table rows from + * the column(s) underlying the pg_statistic entry. This is consulted by * statistic_proc_security_check(). * * Caller is responsible for doing ReleaseVariableStats() before exiting. @@ -5408,7 +5408,6 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, */ ListCell *ilist; ListCell *slist; - Oid userid; /* * The nullingrels bits within the expression could prevent us from @@ -5418,17 +5417,6 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, if (bms_overlap(varnos, root->outer_join_rels)) node = remove_nulling_relids(node, root->outer_join_rels, NULL); - /* - * Determine the user ID to use for privilege checks: either - * onerel->userid if it's set (e.g., in case we're accessing the table - * via a view), or the current user otherwise. - * - * If we drill down to child relations, we keep using the same userid: - * it's going to be the same anyway, due to how we set up the relation - * tree (q.v. build_simple_rel). - */ - userid = OidIsValid(onerel->userid) ? onerel->userid : GetUserId(); - foreach(ilist, onerel->indexlist) { IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist); @@ -5496,69 +5484,32 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, if (HeapTupleIsValid(vardata->statsTuple)) { - /* Get index's table for permission check */ - RangeTblEntry *rte; - - rte = planner_rt_fetch(index->rel->relid, root); - Assert(rte->rtekind == RTE_RELATION); - /* + * Test if user has permission to access all + * rows from the index's table. + * * For simplicity, we insist on the whole * table being selectable, rather than trying * to identify which column(s) the index - * depends on. Also require all rows to be - * selectable --- there must be no - * securityQuals from security barrier views - * or RLS policies. + * depends on. + * + * Note that for an inheritance child, + * permissions are checked on the inheritance + * root parent, and whole-table select + * privilege on the parent doesn't quite + * guarantee that the user could read all + * columns of the child. But in practice it's + * unlikely that any interesting security + * violation could result from allowing access + * to the expression index's stats, so we + * allow it anyway. See similar code in + * examine_simple_variable() for additional + * comments. */ vardata->acl_ok = - rte->securityQuals == NIL && - (pg_class_aclcheck(rte->relid, userid, - ACL_SELECT) == ACLCHECK_OK); - - /* - * If the user doesn't have permissions to - * access an inheritance child relation, check - * the permissions of the table actually - * mentioned in the query, since most likely - * the user does have that permission. Note - * that whole-table select privilege on the - * parent doesn't quite guarantee that the - * user could read all columns of the child. - * But in practice it's unlikely that any - * interesting security violation could result - * from allowing access to the expression - * index's stats, so we allow it anyway. See - * similar code in examine_simple_variable() - * for additional comments. - */ - if (!vardata->acl_ok && - root->append_rel_array != NULL) - { - AppendRelInfo *appinfo; - Index varno = index->rel->relid; - - appinfo = root->append_rel_array[varno]; - while (appinfo && - planner_rt_fetch(appinfo->parent_relid, - root)->rtekind == RTE_RELATION) - { - varno = appinfo->parent_relid; - appinfo = root->append_rel_array[varno]; - } - if (varno != index->rel->relid) - { - /* Repeat access check on this rel */ - rte = planner_rt_fetch(varno, root); - Assert(rte->rtekind == RTE_RELATION); - - vardata->acl_ok = - rte->securityQuals == NIL && - (pg_class_aclcheck(rte->relid, - userid, - ACL_SELECT) == ACLCHECK_OK); - } - } + all_rows_selectable(root, + index->rel->relid, + NULL); } else { @@ -5628,58 +5579,26 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, vardata->freefunc = ReleaseDummy; /* + * Test if user has permission to access all rows from the + * table. + * * For simplicity, we insist on the whole table being * selectable, rather than trying to identify which - * column(s) the statistics object depends on. Also - * require all rows to be selectable --- there must be no - * securityQuals from security barrier views or RLS - * policies. + * column(s) the statistics object depends on. + * + * Note that for an inheritance child, permissions are + * checked on the inheritance root parent, and whole-table + * select privilege on the parent doesn't quite guarantee + * that the user could read all columns of the child. But + * in practice it's unlikely that any interesting security + * violation could result from allowing access to the + * expression stats, so we allow it anyway. See similar + * code in examine_simple_variable() for additional + * comments. */ - vardata->acl_ok = - rte->securityQuals == NIL && - (pg_class_aclcheck(rte->relid, userid, - ACL_SELECT) == ACLCHECK_OK); - - /* - * If the user doesn't have permissions to access an - * inheritance child relation, check the permissions of - * the table actually mentioned in the query, since most - * likely the user does have that permission. Note that - * whole-table select privilege on the parent doesn't - * quite guarantee that the user could read all columns of - * the child. But in practice it's unlikely that any - * interesting security violation could result from - * allowing access to the expression stats, so we allow it - * anyway. See similar code in examine_simple_variable() - * for additional comments. - */ - if (!vardata->acl_ok && - root->append_rel_array != NULL) - { - AppendRelInfo *appinfo; - Index varno = onerel->relid; - - appinfo = root->append_rel_array[varno]; - while (appinfo && - planner_rt_fetch(appinfo->parent_relid, - root)->rtekind == RTE_RELATION) - { - varno = appinfo->parent_relid; - appinfo = root->append_rel_array[varno]; - } - if (varno != onerel->relid) - { - /* Repeat access check on this rel */ - rte = planner_rt_fetch(varno, root); - Assert(rte->rtekind == RTE_RELATION); - - vardata->acl_ok = - rte->securityQuals == NIL && - (pg_class_aclcheck(rte->relid, - userid, - ACL_SELECT) == ACLCHECK_OK); - } - } + vardata->acl_ok = all_rows_selectable(root, + onerel->relid, + NULL); break; } @@ -5734,109 +5653,20 @@ examine_simple_variable(PlannerInfo *root, Var *var, if (HeapTupleIsValid(vardata->statsTuple)) { - RelOptInfo *onerel = find_base_rel_noerr(root, var->varno); - Oid userid; - /* - * Check if user has permission to read this column. We require - * all rows to be accessible, so there must be no securityQuals - * from security barrier views or RLS policies. + * Test if user has permission to read all rows from this column. * - * Normally the Var will have an associated RelOptInfo from which - * we can find out which userid to do the check as; but it might - * not if it's a RETURNING Var for an INSERT target relation. In - * that case use the RTEPermissionInfo associated with the RTE. + * This requires that the user has the appropriate SELECT + * privileges and that there are no securityQuals from security + * barrier views or RLS policies. If that's not the case, then we + * only permit leakproof functions to be passed pg_statistic data + * in vardata, otherwise the functions might reveal data that the + * user doesn't have permission to see --- see + * statistic_proc_security_check(). */ - if (onerel) - userid = onerel->userid; - else - { - RTEPermissionInfo *perminfo; - - perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte); - userid = perminfo->checkAsUser; - } - if (!OidIsValid(userid)) - userid = GetUserId(); - vardata->acl_ok = - rte->securityQuals == NIL && - ((pg_class_aclcheck(rte->relid, userid, - ACL_SELECT) == ACLCHECK_OK) || - (pg_attribute_aclcheck(rte->relid, var->varattno, userid, - ACL_SELECT) == ACLCHECK_OK)); - - /* - * If the user doesn't have permissions to access an inheritance - * child relation or specifically this attribute, check the - * permissions of the table/column actually mentioned in the - * query, since most likely the user does have that permission - * (else the query will fail at runtime), and if the user can read - * the column there then he can get the values of the child table - * too. To do that, we must find out which of the root parent's - * attributes the child relation's attribute corresponds to. - */ - if (!vardata->acl_ok && var->varattno > 0 && - root->append_rel_array != NULL) - { - AppendRelInfo *appinfo; - Index varno = var->varno; - int varattno = var->varattno; - bool found = false; - - appinfo = root->append_rel_array[varno]; - - /* - * Partitions are mapped to their immediate parent, not the - * root parent, so must be ready to walk up multiple - * AppendRelInfos. But stop if we hit a parent that is not - * RTE_RELATION --- that's a flattened UNION ALL subquery, not - * an inheritance parent. - */ - while (appinfo && - planner_rt_fetch(appinfo->parent_relid, - root)->rtekind == RTE_RELATION) - { - int parent_varattno; - - found = false; - if (varattno <= 0 || varattno > appinfo->num_child_cols) - break; /* safety check */ - parent_varattno = appinfo->parent_colnos[varattno - 1]; - if (parent_varattno == 0) - break; /* Var is local to child */ - - varno = appinfo->parent_relid; - varattno = parent_varattno; - found = true; - - /* If the parent is itself a child, continue up. */ - appinfo = root->append_rel_array[varno]; - } - - /* - * In rare cases, the Var may be local to the child table, in - * which case, we've got to live with having no access to this - * column's stats. - */ - if (!found) - return; - - /* Repeat the access check on this parent rel & column */ - rte = planner_rt_fetch(varno, root); - Assert(rte->rtekind == RTE_RELATION); - - /* - * Fine to use the same userid as it's the same in all - * relations of a given inheritance tree. - */ - vardata->acl_ok = - rte->securityQuals == NIL && - ((pg_class_aclcheck(rte->relid, userid, - ACL_SELECT) == ACLCHECK_OK) || - (pg_attribute_aclcheck(rte->relid, varattno, userid, - ACL_SELECT) == ACLCHECK_OK)); - } + all_rows_selectable(root, var->varno, + bms_make_singleton(var->varattno - FirstLowInvalidHeapAttributeNumber)); } else { @@ -6033,6 +5863,214 @@ examine_simple_variable(PlannerInfo *root, Var *var, } } +/* + * all_rows_selectable + * Test whether the user has permission to select all rows from a given + * relation. + * + * Inputs: + * root: the planner info + * varno: the index of the relation (assumed to be an RTE_RELATION) + * varattnos: the attributes for which permission is required, or NULL if + * whole-table access is required + * + * Returns true if the user has the required select permissions, and there are + * no securityQuals from security barrier views or RLS policies. + * + * Note that if the relation is an inheritance child relation, securityQuals + * and access permissions are checked against the inheritance root parent (the + * relation actually mentioned in the query) --- see the comments in + * expand_single_inheritance_child() for an explanation of why it has to be + * done this way. + * + * If varattnos is non-NULL, its attribute numbers should be offset by + * FirstLowInvalidHeapAttributeNumber so that system attributes can be + * checked. If varattnos is NULL, only table-level SELECT privileges are + * checked, not any column-level privileges. + * + * Note: if the relation is accessed via a view, this function actually tests + * whether the view owner has permission to select from the relation. To + * ensure that the current user has permission, it is also necessary to check + * that the current user has permission to select from the view, which we do + * at planner-startup --- see subquery_planner(). + * + * This is exported so that other estimation functions can use it. + */ +bool +all_rows_selectable(PlannerInfo *root, Index varno, Bitmapset *varattnos) +{ + RelOptInfo *rel = find_base_rel_noerr(root, varno); + RangeTblEntry *rte = planner_rt_fetch(varno, root); + Oid userid; + int varattno; + + Assert(rte->rtekind == RTE_RELATION); + + /* + * Determine the user ID to use for privilege checks (either the current + * user or the view owner, if we're accessing the table via a view). + * + * Normally the relation will have an associated RelOptInfo from which we + * can find the userid, but it might not if it's a RETURNING Var for an + * INSERT target relation. In that case use the RTEPermissionInfo + * associated with the RTE. + * + * If we navigate up to a parent relation, we keep using the same userid, + * since it's the same in all relations of a given inheritance tree. + */ + if (rel) + userid = rel->userid; + else + { + RTEPermissionInfo *perminfo; + + perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte); + userid = perminfo->checkAsUser; + } + if (!OidIsValid(userid)) + userid = GetUserId(); + + /* + * Permissions and securityQuals must be checked on the table actually + * mentioned in the query, so if this is an inheritance child, navigate up + * to the inheritance root parent. If the user can read the whole table + * or the required columns there, then they can read from the child table + * too. For per-column checks, we must find out which of the root + * parent's attributes the child relation's attributes correspond to. + */ + if (root->append_rel_array != NULL) + { + AppendRelInfo *appinfo; + + appinfo = root->append_rel_array[varno]; + + /* + * Partitions are mapped to their immediate parent, not the root + * parent, so must be ready to walk up multiple AppendRelInfos. But + * stop if we hit a parent that is not RTE_RELATION --- that's a + * flattened UNION ALL subquery, not an inheritance parent. + */ + while (appinfo && + planner_rt_fetch(appinfo->parent_relid, + root)->rtekind == RTE_RELATION) + { + Bitmapset *parent_varattnos = NULL; + + /* + * For each child attribute, find the corresponding parent + * attribute. In rare cases, the attribute may be local to the + * child table, in which case, we've got to live with having no + * access to this column. + */ + varattno = -1; + while ((varattno = bms_next_member(varattnos, varattno)) >= 0) + { + AttrNumber attno; + AttrNumber parent_attno; + + attno = varattno + FirstLowInvalidHeapAttributeNumber; + + if (attno == InvalidAttrNumber) + { + /* + * Whole-row reference, so must map each column of the + * child to the parent table. + */ + for (attno = 1; attno <= appinfo->num_child_cols; attno++) + { + parent_attno = appinfo->parent_colnos[attno - 1]; + if (parent_attno == 0) + return false; /* attr is local to child */ + parent_varattnos = + bms_add_member(parent_varattnos, + parent_attno - FirstLowInvalidHeapAttributeNumber); + } + } + else + { + if (attno < 0) + { + /* System attnos are the same in all tables */ + parent_attno = attno; + } + else + { + if (attno > appinfo->num_child_cols) + return false; /* safety check */ + parent_attno = appinfo->parent_colnos[attno - 1]; + if (parent_attno == 0) + return false; /* attr is local to child */ + } + parent_varattnos = + bms_add_member(parent_varattnos, + parent_attno - FirstLowInvalidHeapAttributeNumber); + } + } + + /* If the parent is itself a child, continue up */ + varno = appinfo->parent_relid; + varattnos = parent_varattnos; + appinfo = root->append_rel_array[varno]; + } + + /* Perform the access check on this parent rel */ + rte = planner_rt_fetch(varno, root); + Assert(rte->rtekind == RTE_RELATION); + } + + /* + * For all rows to be accessible, there must be no securityQuals from + * security barrier views or RLS policies. + */ + if (rte->securityQuals != NIL) + return false; + + /* + * Test for table-level SELECT privilege. + * + * If varattnos is non-NULL, this is sufficient to give access to all + * requested attributes, even for a child table, since we have verified + * that all required child columns have matching parent columns. + * + * If varattnos is NULL (whole-table access requested), this doesn't + * necessarily guarantee that the user can read all columns of a child + * table, but we allow it anyway (see comments in examine_variable()) and + * don't bother checking any column privileges. + */ + if (pg_class_aclcheck(rte->relid, userid, ACL_SELECT) == ACLCHECK_OK) + return true; + + if (varattnos == NULL) + return false; /* whole-table access requested */ + + /* + * Don't have table-level SELECT privilege, so check per-column + * privileges. + */ + varattno = -1; + while ((varattno = bms_next_member(varattnos, varattno)) >= 0) + { + AttrNumber attno = varattno + FirstLowInvalidHeapAttributeNumber; + + if (attno == InvalidAttrNumber) + { + /* Whole-row reference, so must have access to all columns */ + if (pg_attribute_aclcheck_all(rte->relid, userid, ACL_SELECT, + ACLMASK_ALL) != ACLCHECK_OK) + return false; + } + else + { + if (pg_attribute_aclcheck(rte->relid, attno, userid, + ACL_SELECT) != ACLCHECK_OK) + return false; + } + } + + /* If we reach here, have all required column privileges */ + return true; +} + /* * examine_indexcol_variable * Try to look up statistical data about an index column/expression. @@ -6121,15 +6159,17 @@ examine_indexcol_variable(PlannerInfo *root, IndexOptInfo *index, /* * Check whether it is permitted to call func_oid passing some of the - * pg_statistic data in vardata. We allow this either if the user has SELECT - * privileges on the table or column underlying the pg_statistic data or if - * the function is marked leakproof. + * pg_statistic data in vardata. We allow this if either of the following + * conditions is met: (1) the user has SELECT privileges on the table or + * column underlying the pg_statistic data and there are no securityQuals from + * security barrier views or RLS policies, or (2) the function is marked + * leakproof. */ bool statistic_proc_security_check(VariableStatData *vardata, Oid func_oid) { if (vardata->acl_ok) - return true; + return true; /* have SELECT privs and no securityQuals */ if (!OidIsValid(func_oid)) return false; diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c index 1b0df1117171a..39dab3e42df58 100644 --- a/src/backend/utils/adt/tid.c +++ b/src/backend/utils/adt/tid.c @@ -84,7 +84,7 @@ tidin(PG_FUNCTION_ARGS) /* * Cope with possibility that unsigned long is wider than BlockNumber, in * which case strtoul will not raise an error for some values that are out - * of the range of BlockNumber. (See similar code in oidin().) + * of the range of BlockNumber. (See similar code in uint32in_subr().) */ #if SIZEOF_LONG > 4 if (cvt != (unsigned long) blockNumber && diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index 25cff56c3d07e..156a4830ffda6 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -2275,33 +2275,12 @@ timestamp_cmp(PG_FUNCTION_ARGS) PG_RETURN_INT32(timestamp_cmp_internal(dt1, dt2)); } -#if SIZEOF_DATUM < 8 -/* note: this is used for timestamptz also */ -static int -timestamp_fastcmp(Datum x, Datum y, SortSupport ssup) -{ - Timestamp a = DatumGetTimestamp(x); - Timestamp b = DatumGetTimestamp(y); - - return timestamp_cmp_internal(a, b); -} -#endif - Datum timestamp_sortsupport(PG_FUNCTION_ARGS) { SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); -#if SIZEOF_DATUM >= 8 - - /* - * If this build has pass-by-value timestamps, then we can use a standard - * comparator function. - */ ssup->comparator = ssup_datum_signed_cmp; -#else - ssup->comparator = timestamp_fastcmp; -#endif PG_RETURN_VOID(); } @@ -4954,7 +4933,7 @@ timestamptz_trunc_internal(text *units, TimestampTz timestamp, pg_tz *tzp) case DTK_SECOND: case DTK_MILLISEC: case DTK_MICROSEC: - PG_RETURN_TIMESTAMPTZ(timestamp); + return timestamp; break; default: @@ -5650,11 +5629,11 @@ timestamp_part_common(PG_FUNCTION_ARGS, bool retnumeric) case DTK_JULIAN: if (retnumeric) - PG_RETURN_NUMERIC(numeric_add_opt_error(int64_to_numeric(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday)), - numeric_div_opt_error(int64_to_numeric(((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) * INT64CONST(1000000) + fsec), - int64_to_numeric(SECS_PER_DAY * INT64CONST(1000000)), - NULL), - NULL)); + PG_RETURN_NUMERIC(numeric_add_safe(int64_to_numeric(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday)), + numeric_div_safe(int64_to_numeric(((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) * INT64CONST(1000000) + fsec), + int64_to_numeric(SECS_PER_DAY * INT64CONST(1000000)), + NULL), + NULL)); else PG_RETURN_FLOAT8(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) + @@ -5706,11 +5685,11 @@ timestamp_part_common(PG_FUNCTION_ARGS, bool retnumeric) result = int64_div_fast_to_numeric(timestamp - epoch, 6); else { - result = numeric_div_opt_error(numeric_sub_opt_error(int64_to_numeric(timestamp), - int64_to_numeric(epoch), - NULL), - int64_to_numeric(1000000), - NULL); + result = numeric_div_safe(numeric_sub_safe(int64_to_numeric(timestamp), + int64_to_numeric(epoch), + NULL), + int64_to_numeric(1000000), + NULL); result = DatumGetNumeric(DirectFunctionCall2(numeric_round, NumericGetDatum(result), Int32GetDatum(6))); @@ -5924,11 +5903,11 @@ timestamptz_part_common(PG_FUNCTION_ARGS, bool retnumeric) case DTK_JULIAN: if (retnumeric) - PG_RETURN_NUMERIC(numeric_add_opt_error(int64_to_numeric(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday)), - numeric_div_opt_error(int64_to_numeric(((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) * INT64CONST(1000000) + fsec), - int64_to_numeric(SECS_PER_DAY * INT64CONST(1000000)), - NULL), - NULL)); + PG_RETURN_NUMERIC(numeric_add_safe(int64_to_numeric(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday)), + numeric_div_safe(int64_to_numeric(((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) * INT64CONST(1000000) + fsec), + int64_to_numeric(SECS_PER_DAY * INT64CONST(1000000)), + NULL), + NULL)); else PG_RETURN_FLOAT8(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) + @@ -5977,11 +5956,11 @@ timestamptz_part_common(PG_FUNCTION_ARGS, bool retnumeric) result = int64_div_fast_to_numeric(timestamp - epoch, 6); else { - result = numeric_div_opt_error(numeric_sub_opt_error(int64_to_numeric(timestamp), - int64_to_numeric(epoch), - NULL), - int64_to_numeric(1000000), - NULL); + result = numeric_div_safe(numeric_sub_safe(int64_to_numeric(timestamp), + int64_to_numeric(epoch), + NULL), + int64_to_numeric(1000000), + NULL); result = DatumGetNumeric(DirectFunctionCall2(numeric_round, NumericGetDatum(result), Int32GetDatum(6))); @@ -6268,9 +6247,9 @@ interval_part_common(PG_FUNCTION_ARGS, bool retnumeric) result = int64_div_fast_to_numeric(val, 6); else result = - numeric_add_opt_error(int64_div_fast_to_numeric(interval->time, 6), - int64_to_numeric(secs_from_day_month), - NULL); + numeric_add_safe(int64_div_fast_to_numeric(interval->time, 6), + int64_to_numeric(secs_from_day_month), + NULL); PG_RETURN_NUMERIC(result); } diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c index 1fa1275ca63b2..0625da9532f6c 100644 --- a/src/backend/utils/adt/tsvector_op.c +++ b/src/backend/utils/adt/tsvector_op.c @@ -329,8 +329,8 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS) if (nulls[i]) continue; - lex = VARDATA(dlexemes[i]); - lex_len = VARSIZE(dlexemes[i]) - VARHDRSZ; + lex = VARDATA(DatumGetPointer(dlexemes[i])); + lex_len = VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ; lex_pos = tsvector_bsearch(tsout, lex, lex_len); if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0) @@ -443,10 +443,10 @@ compare_text_lexemes(const void *va, const void *vb) { Datum a = *((const Datum *) va); Datum b = *((const Datum *) vb); - char *alex = VARDATA_ANY(a); - int alex_len = VARSIZE_ANY_EXHDR(a); - char *blex = VARDATA_ANY(b); - int blex_len = VARSIZE_ANY_EXHDR(b); + char *alex = VARDATA_ANY(DatumGetPointer(a)); + int alex_len = VARSIZE_ANY_EXHDR(DatumGetPointer(a)); + char *blex = VARDATA_ANY(DatumGetPointer(b)); + int blex_len = VARSIZE_ANY_EXHDR(DatumGetPointer(b)); return tsCompareString(alex, alex_len, blex, blex_len, false); } @@ -605,8 +605,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS) if (nulls[i]) continue; - lex = VARDATA(dlexemes[i]); - lex_len = VARSIZE(dlexemes[i]) - VARHDRSZ; + lex = VARDATA(DatumGetPointer(dlexemes[i])); + lex_len = VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ; lex_pos = tsvector_bsearch(tsin, lex, lex_len); if (lex_pos >= 0) @@ -770,7 +770,7 @@ array_to_tsvector(PG_FUNCTION_ARGS) (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("lexeme array may not contain nulls"))); - if (VARSIZE(dlexemes[i]) - VARHDRSZ == 0) + if (VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ == 0) ereport(ERROR, (errcode(ERRCODE_ZERO_LENGTH_CHARACTER_STRING), errmsg("lexeme array may not contain empty strings"))); @@ -786,7 +786,7 @@ array_to_tsvector(PG_FUNCTION_ARGS) /* Calculate space needed for surviving lexemes. */ for (i = 0; i < nitems; i++) - datalen += VARSIZE(dlexemes[i]) - VARHDRSZ; + datalen += VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ; tslen = CALCDATASIZE(nitems, datalen); /* Allocate and fill tsvector. */ @@ -798,8 +798,8 @@ array_to_tsvector(PG_FUNCTION_ARGS) cur = STRPTR(tsout); for (i = 0; i < nitems; i++) { - char *lex = VARDATA(dlexemes[i]); - int lex_len = VARSIZE(dlexemes[i]) - VARHDRSZ; + char *lex = VARDATA(DatumGetPointer(dlexemes[i])); + int lex_len = VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ; memcpy(cur, lex, lex_len); arrout[i].haspos = 0; diff --git a/src/backend/utils/adt/uuid.c b/src/backend/utils/adt/uuid.c index bce7309c1833a..e5f27ff892ba6 100644 --- a/src/backend/utils/adt/uuid.c +++ b/src/backend/utils/adt/uuid.c @@ -398,11 +398,7 @@ uuid_abbrev_convert(Datum original, SortSupport ssup) { uint32 tmp; -#if SIZEOF_DATUM == 8 - tmp = (uint32) res ^ (uint32) ((uint64) res >> 32); -#else /* SIZEOF_DATUM != 8 */ - tmp = (uint32) res; -#endif + tmp = DatumGetUInt32(res) ^ (uint32) (DatumGetUInt64(res) >> 32); addHyperLogLog(&uss->abbr_card, DatumGetUInt32(hash_uint32(tmp))); } @@ -752,7 +748,7 @@ uuid_extract_timestamp(PG_FUNCTION_ARGS) + (((uint64) uuid->data[0]) << 40); /* convert ms to us, then adjust */ - ts = (TimestampTz) (tms * NS_PER_US) - + ts = (TimestampTz) (tms * US_PER_MS) - (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY * USECS_PER_SEC; PG_RETURN_TIMESTAMPTZ(ts); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index ffae8c23abfaf..2c398cd9e5cb1 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -408,13 +408,12 @@ text_length(Datum str) { /* fastpath when max encoding length is one */ if (pg_database_encoding_max_length() == 1) - PG_RETURN_INT32(toast_raw_datum_size(str) - VARHDRSZ); + return (toast_raw_datum_size(str) - VARHDRSZ); else { text *t = DatumGetTextPP(str); - PG_RETURN_INT32(pg_mbstrlen_with_len(VARDATA_ANY(t), - VARSIZE_ANY_EXHDR(t))); + return (pg_mbstrlen_with_len(VARDATA_ANY(t), VARSIZE_ANY_EXHDR(t))); } } @@ -1672,14 +1671,13 @@ varstr_sortsupport(SortSupport ssup, Oid typid, Oid collid) * * Even apart from the risk of broken locales, it's possible that * there are platforms where the use of abbreviated keys should be - * disabled at compile time. Having only 4 byte datums could make - * worst-case performance drastically more likely, for example. - * Moreover, macOS's strxfrm() implementation is known to not - * effectively concentrate a significant amount of entropy from the - * original string in earlier transformed blobs. It's possible that - * other supported platforms are similarly encumbered. So, if we ever - * get past disabling this categorically, we may still want or need to - * disable it for particular platforms. + * disabled at compile time. For example, macOS's strxfrm() + * implementation is known to not effectively concentrate a + * significant amount of entropy from the original string in earlier + * transformed blobs. It's possible that other supported platforms + * are similarly encumbered. So, if we ever get past disabling this + * categorically, we may still want or need to disable it for + * particular platforms. */ if (!pg_strxfrm_enabled(locale)) abbreviate = false; @@ -2133,18 +2131,12 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) addHyperLogLog(&sss->full_card, hash); /* Hash abbreviated key */ -#if SIZEOF_DATUM == 8 { - uint32 lohalf, - hihalf; + uint32 tmp; - lohalf = (uint32) res; - hihalf = (uint32) (res >> 32); - hash = DatumGetUInt32(hash_uint32(lohalf ^ hihalf)); + tmp = DatumGetUInt32(res) ^ (uint32) (DatumGetUInt64(res) >> 32); + hash = DatumGetUInt32(hash_uint32(tmp)); } -#else /* SIZEOF_DATUM != 8 */ - hash = DatumGetUInt32(hash_uint32((uint32) res)); -#endif addHyperLogLog(&sss->abbr_card, hash); diff --git a/src/backend/utils/adt/waitfuncs.c b/src/backend/utils/adt/waitfuncs.c index ddd0a57c0c597..f01cad72a0feb 100644 --- a/src/backend/utils/adt/waitfuncs.c +++ b/src/backend/utils/adt/waitfuncs.c @@ -73,7 +73,7 @@ pg_isolation_test_session_is_blocked(PG_FUNCTION_ARGS) * acquire heavyweight locks. */ blocking_pids_a = - DatumGetArrayTypeP(DirectFunctionCall1(pg_blocking_pids, blocked_pid)); + DatumGetArrayTypeP(DirectFunctionCall1(pg_blocking_pids, Int32GetDatum(blocked_pid))); Assert(ARR_ELEMTYPE(blocking_pids_a) == INT4OID); Assert(!array_contains_nulls(blocking_pids_a)); diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index f7b731825fca0..7b7396cdf830c 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -84,7 +84,6 @@ #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" -#include "commands/dbcommands.h" #include "executor/spi.h" #include "executor/tablefunc.h" #include "fmgr.h" @@ -1769,7 +1768,7 @@ xml_doctype_in_content(const xmlChar *str) * xmloption_arg, but a DOCTYPE node in the input can force DOCUMENT mode). * * If parsed_nodes isn't NULL and we parse in CONTENT mode, the list - * of parsed nodes from the xmlParseInNodeContext call will be returned + * of parsed nodes from the xmlParseBalancedChunkMemory call will be returned * to *parsed_nodes. (It is caller's responsibility to free that.) * * Errors normally result in ereport(ERROR), but if escontext is an @@ -1795,6 +1794,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, PgXmlErrorContext *xmlerrcxt; volatile xmlParserCtxtPtr ctxt = NULL; volatile xmlDocPtr doc = NULL; + volatile int save_keep_blanks = -1; /* * This step looks annoyingly redundant, but we must do it to have a @@ -1822,7 +1822,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg, PG_TRY(); { bool parse_as_document = false; - int options; int res_code; size_t count = 0; xmlChar *version = NULL; @@ -1853,18 +1852,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg, parse_as_document = true; } - /* - * Select parse options. - * - * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR) - * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined by - * internal DTD are applied'. As for external DTDs, we try to support - * them too (see SQL/XML:2008 GR 10.16.7.e), but that doesn't really - * happen because xmlPgEntityLoader prevents it. - */ - options = XML_PARSE_NOENT | XML_PARSE_DTDATTR - | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS); - /* initialize output parameters */ if (parsed_xmloptiontype != NULL) *parsed_xmloptiontype = parse_as_document ? XMLOPTION_DOCUMENT : @@ -1874,11 +1861,26 @@ xml_parse(text *data, XmlOptionType xmloption_arg, if (parse_as_document) { + int options; + + /* set up parser context used by xmlCtxtReadDoc */ ctxt = xmlNewParserCtxt(); if (ctxt == NULL || xmlerrcxt->err_occurred) xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, "could not allocate parser context"); + /* + * Select parse options. + * + * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR) + * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined + * by internal DTD are applied'. As for external DTDs, we try to + * support them too (see SQL/XML:2008 GR 10.16.7.e), but that + * doesn't really happen because xmlPgEntityLoader prevents it. + */ + options = XML_PARSE_NOENT | XML_PARSE_DTDATTR + | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS); + doc = xmlCtxtReadDoc(ctxt, utf8string, NULL, /* no URL */ "UTF-8", @@ -1900,10 +1902,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, } else { - xmlNodePtr root; - xmlNodePtr oldroot PG_USED_FOR_ASSERTS_ONLY; - - /* set up document with empty root node to be the context node */ + /* set up document that xmlParseBalancedChunkMemory will add to */ doc = xmlNewDoc(version); if (doc == NULL || xmlerrcxt->err_occurred) xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, @@ -1916,43 +1915,22 @@ xml_parse(text *data, XmlOptionType xmloption_arg, "could not allocate XML document"); doc->standalone = standalone; - root = xmlNewNode(NULL, (const xmlChar *) "content-root"); - if (root == NULL || xmlerrcxt->err_occurred) - xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, - "could not allocate xml node"); - - /* - * This attaches root to doc, so we need not free it separately; - * and there can't yet be any old root to free. - */ - oldroot = xmlDocSetRootElement(doc, root); - Assert(oldroot == NULL); + /* set parse options --- have to do this the ugly way */ + save_keep_blanks = xmlKeepBlanksDefault(preserve_whitespace ? 1 : 0); /* allow empty content */ if (*(utf8string + count)) { - xmlNodePtr node_list = NULL; - xmlParserErrors res; - - res = xmlParseInNodeContext(root, - (char *) utf8string + count, - strlen((char *) utf8string + count), - options, - &node_list); - - if (res != XML_ERR_OK || xmlerrcxt->err_occurred) + res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0, + utf8string + count, + parsed_nodes); + if (res_code != 0 || xmlerrcxt->err_occurred) { - xmlFreeNodeList(node_list); xml_errsave(escontext, xmlerrcxt, ERRCODE_INVALID_XML_CONTENT, "invalid XML content"); goto fail; } - - if (parsed_nodes != NULL) - *parsed_nodes = node_list; - else - xmlFreeNodeList(node_list); } } @@ -1961,6 +1939,8 @@ xml_parse(text *data, XmlOptionType xmloption_arg, } PG_CATCH(); { + if (save_keep_blanks != -1) + xmlKeepBlanksDefault(save_keep_blanks); if (doc != NULL) xmlFreeDoc(doc); if (ctxt != NULL) @@ -1972,6 +1952,9 @@ xml_parse(text *data, XmlOptionType xmloption_arg, } PG_END_TRY(); + if (save_keep_blanks != -1) + xmlKeepBlanksDefault(save_keep_blanks); + if (ctxt != NULL) xmlFreeParserCtxt(ctxt); diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c index 5c8360c08b5f8..45d1e2be007ba 100644 --- a/src/backend/utils/cache/attoptcache.c +++ b/src/backend/utils/cache/attoptcache.c @@ -86,7 +86,7 @@ relatt_cache_syshash(const void *key, Size keysize) const AttoptCacheKey *ckey = key; Assert(keysize == sizeof(*ckey)); - return GetSysCacheHashValue2(ATTNUM, ckey->attrelid, ckey->attnum); + return GetSysCacheHashValue2(ATTNUM, ObjectIdGetDatum(ckey->attrelid), Int32GetDatum(ckey->attnum)); } /* diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index d1b25214376ed..e2cd3feaf81d3 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -213,7 +213,7 @@ namehashfast(Datum datum) { char *key = NameStr(*DatumGetName(datum)); - return hash_any((unsigned char *) key, strlen(key)); + return hash_bytes((unsigned char *) key, strlen(key)); } static bool diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c index ce596bf563856..b9d5a5998be50 100644 --- a/src/backend/utils/cache/evtcache.c +++ b/src/backend/utils/cache/evtcache.c @@ -78,7 +78,6 @@ BuildEventTriggerCache(void) { HASHCTL ctl; HTAB *cache; - MemoryContext oldcontext; Relation rel; Relation irel; SysScanDesc scan; @@ -110,9 +109,6 @@ BuildEventTriggerCache(void) (Datum) 0); } - /* Switch to correct memory context. */ - oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext); - /* Prevent the memory context from being nuked while we're rebuilding. */ EventTriggerCacheState = ETCS_REBUILD_STARTED; @@ -145,6 +141,7 @@ BuildEventTriggerCache(void) bool evttags_isnull; EventTriggerCacheEntry *entry; bool found; + MemoryContext oldcontext; /* Get next tuple. */ tup = systable_getnext_ordered(scan, ForwardScanDirection); @@ -171,6 +168,9 @@ BuildEventTriggerCache(void) else continue; + /* Switch to correct memory context. */ + oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext); + /* Allocate new cache item. */ item = palloc0(sizeof(EventTriggerCacheItem)); item->fnoid = form->evtfoid; @@ -188,6 +188,9 @@ BuildEventTriggerCache(void) entry->triggerlist = lappend(entry->triggerlist, item); else entry->triggerlist = list_make1(item); + + /* Restore previous memory context. */ + MemoryContextSwitchTo(oldcontext); } /* Done with pg_event_trigger scan. */ @@ -195,9 +198,6 @@ BuildEventTriggerCache(void) index_close(irel, AccessShareLock); relation_close(rel, AccessShareLock); - /* Restore previous memory context. */ - MemoryContextSwitchTo(oldcontext); - /* Install new cache. */ EventTriggerCache = cache; @@ -240,6 +240,8 @@ DecodeTextArrayToBitmapset(Datum array) } pfree(elems); + if ((Pointer) arr != DatumGetPointer(array)) + pfree(arr); return bms; } diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index c460a72b75d90..fa7cd7e06a7ab 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -26,6 +26,7 @@ #include "catalog/pg_class.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" +#include "catalog/pg_database.h" #include "catalog/pg_index.h" #include "catalog/pg_language.h" #include "catalog/pg_namespace.h" @@ -1247,6 +1248,32 @@ get_constraint_type(Oid conoid) return contype; } +/* ---------- DATABASE CACHE ---------- */ + +/* + * get_database_name - given a database OID, look up the name + * + * Returns a palloc'd string, or NULL if no such database. + */ +char * +get_database_name(Oid dbid) +{ + HeapTuple dbtuple; + char *result; + + dbtuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbid)); + if (HeapTupleIsValid(dbtuple)) + { + result = pstrdup(NameStr(((Form_pg_database) GETSTRUCT(dbtuple))->datname)); + ReleaseSysCache(dbtuple); + } + else + result = NULL; + + return result; +} + + /* ---------- LANGUAGE CACHE ---------- */ char * @@ -3817,7 +3844,7 @@ get_subscription_oid(const char *subname, bool missing_ok) Oid oid; oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid, - MyDatabaseId, CStringGetDatum(subname)); + ObjectIdGetDatum(MyDatabaseId), CStringGetDatum(subname)); if (!OidIsValid(oid) && !missing_ok) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index f4d2b9458a5ea..6661d2c6b7391 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -463,8 +463,7 @@ CompleteCachedPlan(CachedPlanSource *plansource, /* * Save the final parameter types (or other parameter specification data) - * into the source_context, as well as our other parameters. Also save - * the result tuple descriptor. + * into the source_context, as well as our other parameters. */ MemoryContextSwitchTo(source_context); @@ -480,9 +479,25 @@ CompleteCachedPlan(CachedPlanSource *plansource, plansource->parserSetupArg = parserSetupArg; plansource->cursor_options = cursor_options; plansource->fixed_result = fixed_result; - plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list); + /* + * Also save the result tuple descriptor. PlanCacheComputeResultDesc may + * leak some cruft; normally we just accept that to save a copy step, but + * in USE_VALGRIND mode be tidy by running it in the caller's context. + */ +#ifdef USE_VALGRIND + MemoryContextSwitchTo(oldcxt); + plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list); + if (plansource->resultDesc) + { + MemoryContextSwitchTo(source_context); + plansource->resultDesc = CreateTupleDescCopy(plansource->resultDesc); + MemoryContextSwitchTo(oldcxt); + } +#else + plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list); MemoryContextSwitchTo(oldcxt); +#endif plansource->is_complete = true; plansource->is_valid = true; @@ -1390,7 +1405,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams, { PlannedStmt *pstmt = (PlannedStmt *) lfirst(lc); - pstmt->cached_plan_type = customplan ? PLAN_CACHE_CUSTOM : PLAN_CACHE_GENERIC; + pstmt->planOrigin = customplan ? PLAN_STMT_CACHE_CUSTOM : PLAN_STMT_CACHE_GENERIC; } return plan; diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 559ba9cdb2cde..6fe268a8eec1f 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -3184,7 +3184,7 @@ AssertPendingSyncs_RelationCache(void) if ((LockTagType) locallock->tag.lock.locktag_type != LOCKTAG_RELATION) continue; - relid = ObjectIdGetDatum(locallock->tag.lock.locktag_field2); + relid = locallock->tag.lock.locktag_field2; r = RelationIdGetRelation(relid); if (!RelationIsValid(r)) continue; @@ -6991,5 +6991,5 @@ ResOwnerReleaseRelation(Datum res) Assert(rel->rd_refcnt > 0); rel->rd_refcnt -= 1; - RelationCloseCleanup((Relation) res); + RelationCloseCleanup((Relation) DatumGetPointer(res)); } diff --git a/src/backend/utils/cache/relfilenumbermap.c b/src/backend/utils/cache/relfilenumbermap.c index 8a2f6f8c69318..0b6f9cf3fa191 100644 --- a/src/backend/utils/cache/relfilenumbermap.c +++ b/src/backend/utils/cache/relfilenumbermap.c @@ -130,6 +130,11 @@ InitializeRelfilenumberMap(void) * Map a relation's (tablespace, relfilenumber) to a relation's oid and cache * the result. * + * A temporary relation may share its relfilenumber with a permanent relation + * or temporary relations created in other backends. Being able to uniquely + * identify a temporary relation would require a backend's proc number, which + * we do not know about. Hence, this function ignores this case. + * * Returns InvalidOid if no relation matching the criteria could be found. */ Oid @@ -208,6 +213,9 @@ RelidByRelfilenumber(Oid reltablespace, RelFileNumber relfilenumber) { Form_pg_class classform = (Form_pg_class) GETSTRUCT(ntp); + if (classform->relpersistence == RELPERSISTENCE_TEMP) + continue; + if (found) elog(ERROR, "unexpected duplicate for tablespace %u, relfilenumber %u", diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index f944453a1d884..7828bdcba8f64 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -459,9 +459,9 @@ GetSysCacheOid(int cacheId, tuple = SearchSysCache(cacheId, key1, key2, key3, key4); if (!HeapTupleIsValid(tuple)) return InvalidOid; - result = heap_getattr(tuple, oidcol, - SysCache[cacheId]->cc_tupdesc, - &isNull); + result = DatumGetObjectId(heap_getattr(tuple, oidcol, + SysCache[cacheId]->cc_tupdesc, + &isNull)); Assert(!isNull); /* columns used as oids should never be NULL */ ReleaseSysCache(tuple); return result; diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c index 18cccd778fd8c..e8ae53238d07a 100644 --- a/src/backend/utils/cache/ts_cache.c +++ b/src/backend/utils/cache/ts_cache.c @@ -321,7 +321,9 @@ lookup_ts_dictionary_cache(Oid dictId) /* * Init method runs in dictionary's private memory context, and we - * make sure the options are stored there too + * make sure the options are stored there too. This typically + * results in a small amount of memory leakage, but it's not worth + * complicating the API for tmplinit functions to avoid it. */ oldcontext = MemoryContextSwitchTo(entry->dictCtx); diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index f9aec38a11fb3..6a347698edffe 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -1171,9 +1171,6 @@ load_domaintype_info(TypeCacheEntry *typentry) elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin", NameStr(typTup->typname), NameStr(c->conname)); - /* Convert conbin to C string in caller context */ - constring = TextDatumGetCString(val); - /* Create the DomainConstraintCache object and context if needed */ if (dcc == NULL) { @@ -1189,9 +1186,8 @@ load_domaintype_info(TypeCacheEntry *typentry) dcc->dccRefCount = 0; } - /* Create node trees in DomainConstraintCache's context */ - oldcxt = MemoryContextSwitchTo(dcc->dccContext); - + /* Convert conbin to a node tree, still in caller's context */ + constring = TextDatumGetCString(val); check_expr = (Expr *) stringToNode(constring); /* @@ -1206,10 +1202,13 @@ load_domaintype_info(TypeCacheEntry *typentry) */ check_expr = expression_planner(check_expr); + /* Create only the minimally needed stuff in dccContext */ + oldcxt = MemoryContextSwitchTo(dcc->dccContext); + r = makeNode(DomainConstraintState); r->constrainttype = DOM_CONSTRAINT_CHECK; r->name = pstrdup(NameStr(c->conname)); - r->check_expr = check_expr; + r->check_expr = copyObject(check_expr); r->check_exprstate = NULL; MemoryContextSwitchTo(oldcxt); diff --git a/src/backend/utils/error/csvlog.c b/src/backend/utils/error/csvlog.c index fdac3c048e36a..c3159ed7d979b 100644 --- a/src/backend/utils/error/csvlog.c +++ b/src/backend/utils/error/csvlog.c @@ -120,7 +120,7 @@ write_csvlog(ErrorData *edata) appendStringInfoChar(&buf, ','); /* session id */ - appendStringInfo(&buf, INT64_HEX_FORMAT ".%x", MyStartTime, MyProcPid); + appendStringInfo(&buf, "%" PRIx64 ".%x", MyStartTime, MyProcPid); appendStringInfoChar(&buf, ','); /* Line number */ diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 47af743990fe9..b7b9692f8c884 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -1128,12 +1128,15 @@ set_backtrace(ErrorData *edata, int num_skip) nframes = backtrace(buf, lengthof(buf)); strfrms = backtrace_symbols(buf, nframes); - if (strfrms == NULL) - return; - - for (int i = num_skip; i < nframes; i++) - appendStringInfo(&errtrace, "\n%s", strfrms[i]); - free(strfrms); + if (strfrms != NULL) + { + for (int i = num_skip; i < nframes; i++) + appendStringInfo(&errtrace, "\n%s", strfrms[i]); + free(strfrms); + } + else + appendStringInfoString(&errtrace, + "insufficient memory for backtrace generation"); } #else appendStringInfoString(&errtrace, @@ -2956,12 +2959,12 @@ log_status_format(StringInfo buf, const char *format, ErrorData *edata) { char strfbuf[128]; - snprintf(strfbuf, sizeof(strfbuf) - 1, INT64_HEX_FORMAT ".%x", + snprintf(strfbuf, sizeof(strfbuf) - 1, "%" PRIx64 ".%x", MyStartTime, MyProcPid); appendStringInfo(buf, "%*s", padding, strfbuf); } else - appendStringInfo(buf, INT64_HEX_FORMAT ".%x", MyStartTime, MyProcPid); + appendStringInfo(buf, "%" PRIx64 ".%x", MyStartTime, MyProcPid); break; case 'p': if (padding != 0) diff --git a/src/backend/utils/error/jsonlog.c b/src/backend/utils/error/jsonlog.c index 519eacf17f83c..2619f49904201 100644 --- a/src/backend/utils/error/jsonlog.c +++ b/src/backend/utils/error/jsonlog.c @@ -168,7 +168,7 @@ write_jsonlog(ErrorData *edata) } /* Session id */ - appendJSONKeyValueFmt(&buf, "session_id", true, INT64_HEX_FORMAT ".%x", + appendJSONKeyValueFmt(&buf, "session_id", true, "%" PRIx64 ".%x", MyStartTime, MyProcPid); /* Line number */ diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c index 4bb84ff70870f..1366521f471e2 100644 --- a/src/backend/utils/fmgr/dfmgr.c +++ b/src/backend/utils/fmgr/dfmgr.c @@ -100,12 +100,19 @@ load_external_function(const char *filename, const char *funcname, void *retval; /* - * If the value starts with "$libdir/", strip that. This is because many - * extensions have hardcoded '$libdir/foo' as their library name, which - * prevents using the path. + * For extensions with hardcoded '$libdir/' library names, we strip the + * prefix to allow the library search path to be used. This is done only + * for simple names (e.g., "$libdir/foo"), not for nested paths (e.g., + * "$libdir/foo/bar"). + * + * For nested paths, 'expand_dynamic_library_name' directly expands the + * '$libdir' macro, so we leave them untouched. */ if (strncmp(filename, "$libdir/", 8) == 0) - filename += 8; + { + if (first_dir_separator(filename + 8) == NULL) + filename += 8; + } /* Expand the possibly-abbreviated filename to an exact path name */ fullname = expand_dynamic_library_name(filename); diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index 782291d999832..5543440a33e6c 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -1788,41 +1788,6 @@ OidSendFunctionCall(Oid functionId, Datum val) } -/*------------------------------------------------------------------------- - * Support routines for standard maybe-pass-by-reference datatypes - * - * int8 and float8 can be passed by value if Datum is wide enough. - * (For backwards-compatibility reasons, we allow pass-by-ref to be chosen - * at compile time even if pass-by-val is possible.) - * - * Note: there is only one switch controlling the pass-by-value option for - * both int8 and float8; this is to avoid making things unduly complicated - * for the timestamp types, which might have either representation. - *------------------------------------------------------------------------- - */ - -#ifndef USE_FLOAT8_BYVAL /* controls int8 too */ - -Datum -Int64GetDatum(int64 X) -{ - int64 *retval = (int64 *) palloc(sizeof(int64)); - - *retval = X; - return PointerGetDatum(retval); -} - -Datum -Float8GetDatum(float8 X) -{ - float8 *retval = (float8 *) palloc(sizeof(float8)); - - *retval = X; - return PointerGetDatum(retval); -} -#endif /* USE_FLOAT8_BYVAL */ - - /*------------------------------------------------------------------------- * Support routines for toastable datatypes *------------------------------------------------------------------------- diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 42e9be274fc6a..ac94b9e93c6e3 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -22,10 +22,11 @@ * lookup key's hash value as a partition number --- this will work because * of the way calc_bucket() maps hash values to bucket numbers. * - * For hash tables in shared memory, the memory allocator function should - * match malloc's semantics of returning NULL on failure. For hash tables - * in local memory, we typically use palloc() which will throw error on - * failure. The code in this file has to cope with both cases. + * The memory allocator function should match malloc's semantics of returning + * NULL on failure. (This is essential for hash tables in shared memory. + * For hash tables in local memory, we used to use palloc() which will throw + * error on failure; but we no longer do, so it's untested whether this + * module will still cope with that behavior.) * * dynahash.c provides support for these types of lookup keys: * @@ -79,9 +80,8 @@ * are not implemented; otherwise functionality is identical. * * Compilation controls: - * HASH_DEBUG controls some informative traces, mainly for debugging. - * HASH_STATISTICS causes HashAccesses and HashCollisions to be maintained; - * when combined with HASH_DEBUG, these are displayed by hdestroy(). + * HASH_STATISTICS causes some usage statistics to be maintained, which can be + * logged by calling hash_stats(). * * Problems & fixes to ejp@ausmelb.oz. WARNING: relies on pre-processor * concatenation property, in probably unnecessary code 'optimization'. @@ -98,10 +98,10 @@ #include "access/xact.h" #include "common/hashfn.h" +#include "lib/ilist.h" #include "port/pg_bitutils.h" #include "storage/shmem.h" #include "storage/spin.h" -#include "utils/dynahash.h" #include "utils/memutils.h" @@ -153,7 +153,7 @@ typedef HASHBUCKET *HASHSEGMENT; typedef struct { slock_t mutex; /* spinlock for this freelist */ - long nentries; /* number of entries in associated buckets */ + int64 nentries; /* number of entries in associated buckets */ HASHELEMENT *freeList; /* chain of free elements */ } FreeListData; @@ -181,8 +181,8 @@ struct HASHHDR /* These fields can change, but not in a partitioned table */ /* Also, dsize can't change in a shared table, even if unpartitioned */ - long dsize; /* directory size */ - long nsegs; /* number of allocated segments (<= dsize) */ + int64 dsize; /* directory size */ + int64 nsegs; /* number of allocated segments (<= dsize) */ uint32 max_bucket; /* ID of maximum bucket in use */ uint32 high_mask; /* mask to modulo into entire table */ uint32 low_mask; /* mask to modulo into lower half of table */ @@ -190,9 +190,9 @@ struct HASHHDR /* These fields are fixed at hashtable creation */ Size keysize; /* hash key length in bytes */ Size entrysize; /* total user element size in bytes */ - long num_partitions; /* # partitions (must be power of 2), or 0 */ - long max_dsize; /* 'dsize' limit if directory is fixed size */ - long ssize; /* segment size --- must be power of 2 */ + int64 num_partitions; /* # partitions (must be power of 2), or 0 */ + int64 max_dsize; /* 'dsize' limit if directory is fixed size */ + int64 ssize; /* segment size --- must be power of 2 */ int sshift; /* segment shift = log2(ssize) */ int nelem_alloc; /* number of entries to allocate at once */ bool isfixed; /* if true, don't enlarge */ @@ -203,8 +203,9 @@ struct HASHHDR * Count statistics here. NB: stats code doesn't bother with mutex, so * counts could be corrupted a bit in a partitioned table. */ - long accesses; - long collisions; + uint64 accesses; + uint64 collisions; + uint64 expansions; #endif }; @@ -234,8 +235,18 @@ struct HTAB /* We keep local copies of these fixed values to reduce contention */ Size keysize; /* hash key length in bytes */ - long ssize; /* segment size --- must be power of 2 */ + int64 ssize; /* segment size --- must be power of 2 */ int sshift; /* segment shift = log2(ssize) */ + + /* + * In a USE_VALGRIND build, non-shared hashtables keep an slist chain of + * all the element blocks they have allocated. This pacifies Valgrind, + * which would otherwise often claim that the element blocks are "possibly + * lost" for lack of any non-interior pointers to their starts. + */ +#ifdef USE_VALGRIND + slist_head element_blocks; +#endif }; /* @@ -254,12 +265,6 @@ struct HTAB */ #define MOD(x,y) ((x) & ((y)-1)) -#ifdef HASH_STATISTICS -static long hash_accesses, - hash_collisions, - hash_expansions; -#endif - /* * Private function prototypes */ @@ -271,12 +276,13 @@ static bool expand_table(HTAB *hashp); static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx); static void hdefault(HTAB *hashp); static int choose_nelem_alloc(Size entrysize); -static bool init_htab(HTAB *hashp, long nelem); +static bool init_htab(HTAB *hashp, int64 nelem); pg_noreturn static void hash_corrupted(HTAB *hashp); static uint32 hash_initial_lookup(HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr); -static long next_pow2_long(long num); -static int next_pow2_int(long num); +static int my_log2(int64 num); +static int64 next_pow2_int64(int64 num); +static int next_pow2_int(int64 num); static void register_seq_scan(HTAB *hashp); static void deregister_seq_scan(HTAB *hashp); static bool has_seq_scans(HTAB *hashp); @@ -349,7 +355,7 @@ string_compare(const char *key1, const char *key2, Size keysize) * large nelem will penalize hash_seq_search speed without buying much. */ HTAB * -hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags) +hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags) { HTAB *hashp; HASHHDR *hctl; @@ -649,7 +655,7 @@ hdefault(HTAB *hashp) hctl->isfixed = false; /* can be enlarged */ #ifdef HASH_STATISTICS - hctl->accesses = hctl->collisions = 0; + hctl->accesses = hctl->collisions = hctl->expansions = 0; #endif } @@ -691,7 +697,7 @@ choose_nelem_alloc(Size entrysize) * arrays */ static bool -init_htab(HTAB *hashp, long nelem) +init_htab(HTAB *hashp, int64 nelem) { HASHHDR *hctl = hashp->hctl; HASHSEGMENT *segp; @@ -763,17 +769,6 @@ init_htab(HTAB *hashp, long nelem) /* Choose number of entries to allocate at a time */ hctl->nelem_alloc = choose_nelem_alloc(hctl->entrysize); -#ifdef HASH_DEBUG - fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n", - "TABLE POINTER ", hashp, - "DIRECTORY SIZE ", hctl->dsize, - "SEGMENT SIZE ", hctl->ssize, - "SEGMENT SHIFT ", hctl->sshift, - "MAX BUCKET ", hctl->max_bucket, - "HIGH MASK ", hctl->high_mask, - "LOW MASK ", hctl->low_mask, - "NSEGS ", hctl->nsegs); -#endif return true; } @@ -785,10 +780,10 @@ init_htab(HTAB *hashp, long nelem) * NB: assumes that all hash structure parameters have default values! */ Size -hash_estimate_size(long num_entries, Size entrysize) +hash_estimate_size(int64 num_entries, Size entrysize) { Size size; - long nBuckets, + int64 nBuckets, nSegments, nDirEntries, nElementAllocs, @@ -796,9 +791,9 @@ hash_estimate_size(long num_entries, Size entrysize) elementAllocCnt; /* estimate number of buckets wanted */ - nBuckets = next_pow2_long(num_entries); + nBuckets = next_pow2_int64(num_entries); /* # of segments needed for nBuckets */ - nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1); + nSegments = next_pow2_int64((nBuckets - 1) / DEF_SEGSIZE + 1); /* directory entries */ nDirEntries = DEF_DIRSIZE; while (nDirEntries < nSegments) @@ -831,17 +826,17 @@ hash_estimate_size(long num_entries, Size entrysize) * * XXX this had better agree with the behavior of init_htab()... */ -long -hash_select_dirsize(long num_entries) +int64 +hash_select_dirsize(int64 num_entries) { - long nBuckets, + int64 nBuckets, nSegments, nDirEntries; /* estimate number of buckets wanted */ - nBuckets = next_pow2_long(num_entries); + nBuckets = next_pow2_int64(num_entries); /* # of segments needed for nBuckets */ - nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1); + nSegments = next_pow2_int64((nBuckets - 1) / DEF_SEGSIZE + 1); /* directory entries */ nDirEntries = DEF_DIRSIZE; while (nDirEntries < nSegments) @@ -876,7 +871,7 @@ hash_destroy(HTAB *hashp) /* so this hashtable must have its own context */ Assert(hashp->hcxt != NULL); - hash_stats("destroy", hashp); + hash_stats(__func__, hashp); /* * Free everything by destroying the hash table's memory context. @@ -886,19 +881,16 @@ hash_destroy(HTAB *hashp) } void -hash_stats(const char *where, HTAB *hashp) +hash_stats(const char *caller, HTAB *hashp) { #ifdef HASH_STATISTICS - fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n", - where, hashp->hctl->accesses, hashp->hctl->collisions); - - fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n", - hash_get_num_entries(hashp), (long) hashp->hctl->keysize, - hashp->hctl->max_bucket, hashp->hctl->nsegs); - fprintf(stderr, "%s: total accesses %ld total collisions %ld\n", - where, hash_accesses, hash_collisions); - fprintf(stderr, "hash_stats: total expansions %ld\n", - hash_expansions); + HASHHDR *hctl = hashp->hctl; + + elog(DEBUG4, + "hash_stats: Caller: %s Table Name: \"%s\" Accesses: " UINT64_FORMAT " Collisions: " UINT64_FORMAT " Expansions: " UINT64_FORMAT " Entries: " INT64_FORMAT " Key Size: %zu Max Bucket: %u Segment Count: " INT64_FORMAT, + caller != NULL ? caller : "(unknown)", hashp->tabname, hctl->accesses, + hctl->collisions, hctl->expansions, hash_get_num_entries(hashp), + hctl->keysize, hctl->max_bucket, hctl->nsegs); #endif } @@ -984,7 +976,6 @@ hash_search_with_hash_value(HTAB *hashp, HashCompareFunc match; #ifdef HASH_STATISTICS - hash_accesses++; hctl->accesses++; #endif @@ -1002,7 +993,7 @@ hash_search_with_hash_value(HTAB *hashp, * Can't split if running in partitioned mode, nor if frozen, nor if * table is the subject of any active hash_seq_search scans. */ - if (hctl->freeList[0].nentries > (long) hctl->max_bucket && + if (hctl->freeList[0].nentries > (int64) hctl->max_bucket && !IS_PARTITIONED(hctl) && !hashp->frozen && !has_seq_scans(hashp)) (void) expand_table(hashp); @@ -1028,7 +1019,6 @@ hash_search_with_hash_value(HTAB *hashp, prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; #ifdef HASH_STATISTICS - hash_collisions++; hctl->collisions++; #endif } @@ -1162,7 +1152,8 @@ hash_update_hash_key(HTAB *hashp, HashCompareFunc match; #ifdef HASH_STATISTICS - hash_accesses++; + HASHHDR *hctl = hashp->hctl; + hctl->accesses++; #endif @@ -1216,7 +1207,6 @@ hash_update_hash_key(HTAB *hashp, prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; #ifdef HASH_STATISTICS - hash_collisions++; hctl->collisions++; #endif } @@ -1342,11 +1332,11 @@ get_hash_entry(HTAB *hashp, int freelist_idx) /* * hash_get_num_entries -- get the number of entries in a hashtable */ -long +int64 hash_get_num_entries(HTAB *hashp) { int i; - long sum = hashp->hctl->freeList[0].nentries; + int64 sum = hashp->hctl->freeList[0].nentries; /* * We currently don't bother with acquiring the mutexes; it's only @@ -1427,9 +1417,9 @@ hash_seq_search(HASH_SEQ_STATUS *status) HTAB *hashp; HASHHDR *hctl; uint32 max_bucket; - long ssize; - long segment_num; - long segment_ndx; + int64 ssize; + int64 segment_num; + int64 segment_ndx; HASHSEGMENT segp; uint32 curBucket; HASHELEMENT *curElem; @@ -1558,11 +1548,11 @@ expand_table(HTAB *hashp) HASHHDR *hctl = hashp->hctl; HASHSEGMENT old_seg, new_seg; - long old_bucket, + int64 old_bucket, new_bucket; - long new_segnum, + int64 new_segnum, new_segndx; - long old_segnum, + int64 old_segnum, old_segndx; HASHBUCKET *oldlink, *newlink; @@ -1572,7 +1562,7 @@ expand_table(HTAB *hashp) Assert(!IS_PARTITIONED(hctl)); #ifdef HASH_STATISTICS - hash_expansions++; + hctl->expansions++; #endif new_bucket = hctl->max_bucket + 1; @@ -1630,7 +1620,7 @@ expand_table(HTAB *hashp) currElement = nextElement) { nextElement = currElement->link; - if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket) + if ((int64) calc_bucket(hctl, currElement->hashvalue) == old_bucket) { *oldlink = currElement; oldlink = &currElement->link; @@ -1654,9 +1644,9 @@ dir_realloc(HTAB *hashp) { HASHSEGMENT *p; HASHSEGMENT *old_p; - long new_dsize; - long old_dirsize; - long new_dirsize; + int64 new_dsize; + int64 old_dirsize; + int64 new_dirsize; if (hashp->hctl->max_dsize != NO_MAX_DSIZE) return false; @@ -1712,6 +1702,8 @@ element_alloc(HTAB *hashp, int nelem, int freelist_idx) { HASHHDR *hctl = hashp->hctl; Size elementSize; + Size requestSize; + char *allocedBlock; HASHELEMENT *firstElement; HASHELEMENT *tmpElement; HASHELEMENT *prevElement; @@ -1723,12 +1715,38 @@ element_alloc(HTAB *hashp, int nelem, int freelist_idx) /* Each element has a HASHELEMENT header plus user data. */ elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize); + requestSize = nelem * elementSize; + + /* Add space for slist_node list link if we need one. */ +#ifdef USE_VALGRIND + if (!hashp->isshared) + requestSize += MAXALIGN(sizeof(slist_node)); +#endif + + /* Allocate the memory. */ CurrentDynaHashCxt = hashp->hcxt; - firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize); + allocedBlock = hashp->alloc(requestSize); - if (!firstElement) + if (!allocedBlock) return false; + /* + * If USE_VALGRIND, each allocated block of elements of a non-shared + * hashtable is chained into a list, so that Valgrind won't think it's + * been leaked. + */ +#ifdef USE_VALGRIND + if (hashp->isshared) + firstElement = (HASHELEMENT *) allocedBlock; + else + { + slist_push_head(&hashp->element_blocks, (slist_node *) allocedBlock); + firstElement = (HASHELEMENT *) (allocedBlock + MAXALIGN(sizeof(slist_node))); + } +#else + firstElement = (HASHELEMENT *) allocedBlock; +#endif + /* prepare to link all the new entries into the freelist */ prevElement = NULL; tmpElement = firstElement; @@ -1762,8 +1780,8 @@ hash_initial_lookup(HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr) { HASHHDR *hctl = hashp->hctl; HASHSEGMENT segp; - long segment_num; - long segment_ndx; + int64 segment_num; + int64 segment_ndx; uint32 bucket; bucket = calc_bucket(hctl, hashvalue); @@ -1795,26 +1813,22 @@ hash_corrupted(HTAB *hashp) } /* calculate ceil(log base 2) of num */ -int -my_log2(long num) +static int +my_log2(int64 num) { /* * guard against too-large input, which would be invalid for * pg_ceil_log2_*() */ - if (num > LONG_MAX / 2) - num = LONG_MAX / 2; + if (num > PG_INT64_MAX / 2) + num = PG_INT64_MAX / 2; -#if SIZEOF_LONG < 8 - return pg_ceil_log2_32(num); -#else return pg_ceil_log2_64(num); -#endif } -/* calculate first power of 2 >= num, bounded to what will fit in a long */ -static long -next_pow2_long(long num) +/* calculate first power of 2 >= num, bounded to what will fit in a int64 */ +static int64 +next_pow2_int64(int64 num) { /* my_log2's internal range check is sufficient */ return 1L << my_log2(num); @@ -1822,7 +1836,7 @@ next_pow2_long(long num) /* calculate first power of 2 >= num, bounded to what will fit in an int */ static int -next_pow2_int(long num) +next_pow2_int(int64 num) { if (num > INT_MAX / 2) num = INT_MAX / 2; diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index 43b4dbccc3de6..545d1e90fbd41 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -1099,7 +1099,8 @@ EstimateClientConnectionInfoSpace(void) * Serialize MyClientConnectionInfo for use by parallel workers. */ void -SerializeClientConnectionInfo(Size maxsize, char *start_address) +SerializeClientConnectionInfo(Size maxsize PG_USED_FOR_ASSERTS_ONLY, + char *start_address) { SerializedClientConnectionInfo serialized = {0}; @@ -1183,7 +1184,6 @@ UnlinkLockFiles(int status, Datum arg) /* Should we complain if the unlink fails? */ } /* Since we're about to exit, no need to reclaim storage */ - lock_files = NIL; /* * Lock file removal should always be the last externally visible action diff --git a/src/backend/utils/misc/gen_guc_tables.pl b/src/backend/utils/misc/gen_guc_tables.pl new file mode 100644 index 0000000000000..bc8233f2d3933 --- /dev/null +++ b/src/backend/utils/misc/gen_guc_tables.pl @@ -0,0 +1,131 @@ +#!/usr/bin/perl +#---------------------------------------------------------------------- +# +# Generate guc_tables.c from guc_parameters.dat. +# +# Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/backend/utils/misc/gen_guc_tables.pl +# +#---------------------------------------------------------------------- + +use strict; +use warnings FATAL => 'all'; + +use FindBin; +use lib "$FindBin::RealBin/../../catalog"; +use Catalog; + +die "Usage: $0 INPUT_FILE OUTPUT_FILE\n" unless @ARGV == 2; +my ($input_fname, $output_fname) = @ARGV; + +my $parse = Catalog::ParseData($input_fname); + +open my $ofh, '>', $output_fname or die; + +print_boilerplate($ofh, $output_fname, 'GUC tables'); +foreach my $type (qw(bool int real string enum)) +{ + print_one_table($ofh, $type); +} + +close $ofh; + + +# Adds double quotes and escapes as necessary for C strings. +sub dquote +{ + my ($s) = @_; + + return q{"} . $s =~ s/"/\\"/gr . q{"}; +} + +# Print GUC table for one type. +sub print_one_table +{ + my ($ofh, $type) = @_; + my $Type = ucfirst $type; + + print $ofh "\n\n"; + print $ofh "struct config_${type} ConfigureNames${Type}[] =\n"; + print $ofh "{\n"; + + foreach my $entry (@{$parse}) + { + next if $entry->{type} ne $type; + + print $ofh "#ifdef $entry->{ifdef}\n" if $entry->{ifdef}; + print $ofh "\t{\n"; + printf $ofh "\t\t{%s, %s, %s,\n", + dquote($entry->{name}), + $entry->{context}, + $entry->{group}; + printf $ofh "\t\t\tgettext_noop(%s),\n", dquote($entry->{short_desc}); + if ($entry->{long_desc}) + { + printf $ofh "\t\t\tgettext_noop(%s)", dquote($entry->{long_desc}); + } + else + { + print $ofh "\t\t\tNULL"; + } + if ($entry->{flags}) + { + print $ofh ",\n\t\t\t$entry->{flags}\n"; + } + else + { + print $ofh "\n"; + } + print $ofh "\t\t},\n"; + print $ofh "\t\t&$entry->{variable},\n"; + print $ofh "\t\t$entry->{boot_val},"; + print $ofh " $entry->{min}," + if $entry->{type} eq 'int' || $entry->{type} eq 'real'; + print $ofh " $entry->{max}," + if $entry->{type} eq 'int' || $entry->{type} eq 'real'; + print $ofh " $entry->{options}," + if $entry->{type} eq 'enum'; + print $ofh "\n"; + printf $ofh "\t\t%s, %s, %s\n", + ($entry->{check_hook} || 'NULL'), + ($entry->{assign_hook} || 'NULL'), + ($entry->{show_hook} || 'NULL'); + print $ofh "\t},\n"; + print $ofh "#endif\n" if $entry->{ifdef}; + print $ofh "\n"; + } + + print $ofh "\t/* End-of-list marker */\n"; + print $ofh "\t{{0}}\n"; + print $ofh "};\n"; + + return; +} + +sub print_boilerplate +{ + my ($fh, $fname, $descr) = @_; + printf $fh <gen.sourcefile, pHolder->gen.sourceline); - /* - * Free up as much as we conveniently can of the placeholder structure. - * (This neglects any stack items, so it's possible for some memory to be - * leaked. Since this can only happen once per session per variable, it - * doesn't seem worth spending much code on.) - */ - set_string_field(pHolder, pHolder->variable, NULL); - set_string_field(pHolder, &pHolder->reset_val, NULL); - - guc_free(pHolder); + /* Now we can free the no-longer-referenced placeholder variable */ + free_placeholder(pHolder); } /* @@ -5126,6 +5128,25 @@ reapply_stacked_values(struct config_generic *variable, } } +/* + * Free up a no-longer-referenced placeholder GUC variable. + * + * This neglects any stack items, so it's possible for some memory to be + * leaked. Since this can only happen once per session per variable, it + * doesn't seem worth spending much code on. + */ +static void +free_placeholder(struct config_string *pHolder) +{ + /* Placeholders are always STRING type, so free their values */ + Assert(pHolder->gen.vartype == PGC_STRING); + set_string_field(pHolder, pHolder->variable, NULL); + set_string_field(pHolder, &pHolder->reset_val, NULL); + + guc_free(unconstify(char *, pHolder->gen.name)); + guc_free(pHolder); +} + /* * Functions for extensions to call to define their custom GUC variables. */ @@ -5286,9 +5307,7 @@ MarkGUCPrefixReserved(const char *className) /* * Check for existing placeholders. We must actually remove invalid - * placeholders, else future parallel worker startups will fail. (We - * don't bother trying to free associated memory, since this shouldn't - * happen often.) + * placeholders, else future parallel worker startups will fail. */ hash_seq_init(&status, guc_hashtab); while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL) @@ -5312,6 +5331,8 @@ MarkGUCPrefixReserved(const char *className) NULL); /* Remove it from any lists it's in, too */ RemoveGUCFromLists(var); + /* And free it */ + free_placeholder((struct config_string *) var); } } @@ -6711,6 +6732,7 @@ validate_option_array_item(const char *name, const char *value, { struct config_generic *gconf; + bool reset_custom; /* * There are three cases to consider: @@ -6729,16 +6751,21 @@ validate_option_array_item(const char *name, const char *value, * it's assumed to be fully validated.) * * name is not known and can't be created as a placeholder. Throw error, - * unless skipIfNoPermissions is true, in which case return false. + * unless skipIfNoPermissions or reset_custom is true. If reset_custom is + * true, this is a RESET or RESET ALL operation for an unknown custom GUC + * with a reserved prefix, in which case we want to fall through to the + * placeholder case described in the preceding paragraph (else there'd be + * no way for users to remove them). Otherwise, return false. */ - gconf = find_option(name, true, skipIfNoPermissions, ERROR); - if (!gconf) + reset_custom = (!value && valid_custom_variable_name(name)); + gconf = find_option(name, true, skipIfNoPermissions || reset_custom, ERROR); + if (!gconf && !reset_custom) { /* not known, failed to make a placeholder */ return false; } - if (gconf->flags & GUC_CUSTOM_PLACEHOLDER) + if (!gconf || gconf->flags & GUC_CUSTOM_PLACEHOLDER) { /* * We cannot do any meaningful check on the value, so only permissions diff --git a/src/backend/utils/misc/guc_parameters.dat b/src/backend/utils/misc/guc_parameters.dat new file mode 100644 index 0000000000000..0da01627cfec1 --- /dev/null +++ b/src/backend/utils/misc/guc_parameters.dat @@ -0,0 +1,3478 @@ +#---------------------------------------------------------------------- +# +# Contents of GUC tables. +# +# See src/backend/utils/misc/README for design notes. +# +# Portions Copyright (c) 2000-2025, PostgreSQL Global Development Group +# +# src/backend/utils/misc/guc_parameters.dat +# +#---------------------------------------------------------------------- + +[ + +# TO ADD AN OPTION: +# +# 1. Declare a global variable of type bool, int, double, or char* and +# make use of it. +# +# 2. Decide at what times it's safe to set the option. See guc.h for +# details. +# +# 3. Decide on a name, a default value, upper and lower bounds (if +# applicable), etc. +# +# 4. Add a record below. +# +# 5. Add it to src/backend/utils/misc/postgresql.conf.sample, if +# appropriate. +# +# 6. Don't forget to document the option (at least in config.sgml). +# +# 7. If it's a new GUC_LIST_QUOTE option, you must add it to +# variable_is_guc_list_quote() in src/bin/pg_dump/dumputils.c. + +{ name => 'enable_seqscan', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of sequential-scan plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_seqscan', + boot_val => 'true', +}, + +{ name => 'enable_indexscan', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of index-scan plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_indexscan', + boot_val => 'true', +}, + +{ name => 'enable_indexonlyscan', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of index-only-scan plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_indexonlyscan', + boot_val => 'true', +}, + +{ name => 'enable_bitmapscan', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of bitmap-scan plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_bitmapscan', + boot_val => 'true', +}, + +{ name => 'enable_tidscan', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of TID scan plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_tidscan', + boot_val => 'true', +}, + +{ name => 'enable_sort', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of explicit sort steps.', + flags => 'GUC_EXPLAIN', + variable => 'enable_sort', + boot_val => 'true', +}, + +{ name => 'enable_incremental_sort', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of incremental sort steps.', + flags => 'GUC_EXPLAIN', + variable => 'enable_incremental_sort', + boot_val => 'true', +}, + +{ name => 'enable_hashagg', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of hashed aggregation plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_hashagg', + boot_val => 'true', +}, + +{ name => 'enable_material', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of materialization.', + flags => 'GUC_EXPLAIN', + variable => 'enable_material', + boot_val => 'true', +}, + +{ name => 'enable_memoize', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of memoization.', + flags => 'GUC_EXPLAIN', + variable => 'enable_memoize', + boot_val => 'true', +}, + +{ name => 'enable_nestloop', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of nested-loop join plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_nestloop', + boot_val => 'true', +}, + +{ name => 'enable_mergejoin', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of merge join plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_mergejoin', + boot_val => 'true', +}, + +{ name => 'enable_hashjoin', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of hash join plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_hashjoin', + boot_val => 'true', +}, + +{ name => 'enable_gathermerge', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of gather merge plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_gathermerge', + boot_val => 'true', +}, + +{ name => 'enable_partitionwise_join', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables partitionwise join.', + flags => 'GUC_EXPLAIN', + variable => 'enable_partitionwise_join', + boot_val => 'false', +}, + +{ name => 'enable_partitionwise_aggregate', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables partitionwise aggregation and grouping.', + flags => 'GUC_EXPLAIN', + variable => 'enable_partitionwise_aggregate', + boot_val => 'false', +}, + +{ name => 'enable_parallel_append', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of parallel append plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_parallel_append', + boot_val => 'true', +}, + +{ name => 'enable_parallel_hash', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of parallel hash plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_parallel_hash', + boot_val => 'true', +}, + +{ name => 'enable_partition_pruning', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables plan-time and execution-time partition pruning.', + long_desc => 'Allows the query planner and executor to compare partition bounds to conditions in the query to determine which partitions must be scanned.', + flags => 'GUC_EXPLAIN', + variable => 'enable_partition_pruning', + boot_val => 'true', +}, + +{ name => 'enable_presorted_aggregate', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s ability to produce plans that provide presorted input for ORDER BY / DISTINCT aggregate functions.', + long_desc => 'Allows the query planner to build plans that provide presorted input for aggregate functions with an ORDER BY / DISTINCT clause. When disabled, implicit sorts are always performed during execution.', + flags => 'GUC_EXPLAIN', + variable => 'enable_presorted_aggregate', + boot_val => 'true', +}, + +{ name => 'enable_async_append', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables the planner\'s use of async append plans.', + flags => 'GUC_EXPLAIN', + variable => 'enable_async_append', + boot_val => 'true', +}, + +{ name => 'enable_self_join_elimination', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables removal of unique self-joins.', + flags => 'GUC_EXPLAIN', + variable => 'enable_self_join_elimination', + boot_val => 'true', +}, + +{ name => 'enable_group_by_reordering', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables reordering of GROUP BY keys.', + flags => 'GUC_EXPLAIN', + variable => 'enable_group_by_reordering', + boot_val => 'true', +}, + +{ name => 'enable_distinct_reordering', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables reordering of DISTINCT keys.', + flags => 'GUC_EXPLAIN', + variable => 'enable_distinct_reordering', + boot_val => 'true', +}, + +{ name => 'geqo', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_GEQO', + short_desc => 'Enables genetic query optimization.', + long_desc => 'This algorithm attempts to do planning without exhaustive searching.', + flags => 'GUC_EXPLAIN', + variable => 'enable_geqo', + boot_val => 'true', +}, + +# Not for general use --- used by SET SESSION AUTHORIZATION and SET +# ROLE +{ name => 'is_superuser', type => 'bool', context => 'PGC_INTERNAL', group => 'UNGROUPED', + short_desc => 'Shows whether the current user is a superuser.', + flags => 'GUC_REPORT | GUC_NO_SHOW_ALL | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_ALLOW_IN_PARALLEL', + variable => 'current_role_is_superuser', + boot_val => 'false', +}, + +# This setting itself cannot be set by ALTER SYSTEM to avoid an +# operator turning this setting off by using ALTER SYSTEM, without a +# way to turn it back on. +{ name => 'allow_alter_system', type => 'bool', context => 'PGC_SIGHUP', group => 'COMPAT_OPTIONS_OTHER', + short_desc => 'Allows running the ALTER SYSTEM command.', + long_desc => 'Can be set to off for environments where global configuration changes should be made using a different method.', + flags => 'GUC_DISALLOW_IN_AUTO_FILE', + variable => 'AllowAlterSystem', + boot_val => 'true', +}, + +{ name => 'bonjour', type => 'bool', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Enables advertising the server via Bonjour.', + variable => 'enable_bonjour', + boot_val => 'false', + check_hook => 'check_bonjour', +}, + +{ name => 'track_commit_timestamp', type => 'bool', context => 'PGC_POSTMASTER', group => 'REPLICATION_SENDING', + short_desc => 'Collects transaction commit time.', + variable => 'track_commit_timestamp', + boot_val => 'false', +}, + +{ name => 'ssl', type => 'bool', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Enables SSL connections.', + variable => 'EnableSSL', + boot_val => 'false', + check_hook => 'check_ssl', +}, + +{ name => 'ssl_passphrase_command_supports_reload', type => 'bool', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Controls whether "ssl_passphrase_command" is called during server reload.', + variable => 'ssl_passphrase_command_supports_reload', + boot_val => 'false', +}, + +{ name => 'ssl_prefer_server_ciphers', type => 'bool', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Give priority to server ciphersuite order.', + variable => 'SSLPreferServerCiphers', + boot_val => 'true', +}, + +{ name => 'fsync', type => 'bool', context => 'PGC_SIGHUP', group => 'WAL_SETTINGS', + short_desc => 'Forces synchronization of updates to disk.', + long_desc => 'The server will use the fsync() system call in several places to make sure that updates are physically written to disk. This ensures that a database cluster will recover to a consistent state after an operating system or hardware crash.', + variable => 'enableFsync', + boot_val => 'true', +}, + +{ name => 'ignore_checksum_failure', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Continues processing after a checksum failure.', + long_desc => 'Detection of a checksum failure normally causes PostgreSQL to report an error, aborting the current transaction. Setting ignore_checksum_failure to true causes the system to ignore the failure (but still report a warning), and continue processing. This behavior could cause crashes or other serious problems. Only has an effect if checksums are enabled.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'ignore_checksum_failure', + boot_val => 'false', +}, + +{ name => 'zero_damaged_pages', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Continues processing past damaged page headers.', + long_desc => 'Detection of a damaged page header normally causes PostgreSQL to report an error, aborting the current transaction. Setting "zero_damaged_pages" to true causes the system to instead report a warning, zero out the damaged page, and continue processing. This behavior will destroy data, namely all the rows on the damaged page.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'zero_damaged_pages', + boot_val => 'false', +}, + +{ name => 'ignore_invalid_pages', type => 'bool', context => 'PGC_POSTMASTER', group => 'DEVELOPER_OPTIONS', + short_desc => 'Continues recovery after an invalid pages failure.', + long_desc => 'Detection of WAL records having references to invalid pages during recovery causes PostgreSQL to raise a PANIC-level error, aborting the recovery. Setting "ignore_invalid_pages" to true causes the system to ignore invalid page references in WAL records (but still report a warning), and continue recovery. This behavior may cause crashes, data loss, propagate or hide corruption, or other serious problems. Only has an effect during recovery or in standby mode.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'ignore_invalid_pages', + boot_val => 'false', +}, + +{ name => 'full_page_writes', type => 'bool', context => 'PGC_SIGHUP', group => 'WAL_SETTINGS', + short_desc => 'Writes full pages to WAL when first modified after a checkpoint.', + long_desc => 'A page write in process during an operating system crash might be only partially written to disk. During recovery, the row changes stored in WAL are not enough to recover. This option writes pages when first modified after a checkpoint to WAL so full recovery is possible.', + variable => 'fullPageWrites', + boot_val => 'true', +}, + +{ name => 'wal_log_hints', type => 'bool', context => 'PGC_POSTMASTER', group => 'WAL_SETTINGS', + short_desc => 'Writes full pages to WAL when first modified after a checkpoint, even for a non-critical modification.', + variable => 'wal_log_hints', + boot_val => 'false', +}, + +{ name => 'wal_init_zero', type => 'bool', context => 'PGC_SUSET', group => 'WAL_SETTINGS', + short_desc => 'Writes zeroes to new WAL files before first use.', + variable => 'wal_init_zero', + boot_val => 'true', +}, + +{ name => 'wal_recycle', type => 'bool', context => 'PGC_SUSET', group => 'WAL_SETTINGS', + short_desc => 'Recycles WAL files by renaming them.', + variable => 'wal_recycle', + boot_val => 'true', +}, + +{ name => 'log_checkpoints', type => 'bool', context => 'PGC_SIGHUP', group => 'LOGGING_WHAT', + short_desc => 'Logs each checkpoint.', + variable => 'log_checkpoints', + boot_val => 'true', +}, + +{ name => 'trace_connection_negotiation', type => 'bool', context => 'PGC_POSTMASTER', group => 'DEVELOPER_OPTIONS', + short_desc => 'Logs details of pre-authentication connection handshake.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Trace_connection_negotiation', + boot_val => 'false', +}, + +{ name => 'log_disconnections', type => 'bool', context => 'PGC_SU_BACKEND', group => 'LOGGING_WHAT', + short_desc => 'Logs end of a session, including duration.', + variable => 'Log_disconnections', + boot_val => 'false', +}, + +{ name => 'log_replication_commands', type => 'bool', context => 'PGC_SUSET', group => 'LOGGING_WHAT', + short_desc => 'Logs each replication command.', + variable => 'log_replication_commands', + boot_val => 'false', +}, + +{ name => 'debug_assertions', type => 'bool', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows whether the running server has assertion checks enabled.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'assert_enabled', + boot_val => 'DEFAULT_ASSERT_ENABLED', +}, + +{ name => 'exit_on_error', type => 'bool', context => 'PGC_USERSET', group => 'ERROR_HANDLING_OPTIONS', + short_desc => 'Terminate session on any error.', + variable => 'ExitOnAnyError', + boot_val => 'false', +}, + +{ name => 'restart_after_crash', type => 'bool', context => 'PGC_SIGHUP', group => 'ERROR_HANDLING_OPTIONS', + short_desc => 'Reinitialize server after backend crash.', + variable => 'restart_after_crash', + boot_val => 'true', +}, + +{ name => 'remove_temp_files_after_crash', type => 'bool', context => 'PGC_SIGHUP', group => 'DEVELOPER_OPTIONS', + short_desc => 'Remove temporary files after backend crash.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'remove_temp_files_after_crash', + boot_val => 'true', +}, + +{ name => 'send_abort_for_crash', type => 'bool', context => 'PGC_SIGHUP', group => 'DEVELOPER_OPTIONS', + short_desc => 'Send SIGABRT not SIGQUIT to child processes after backend crash.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'send_abort_for_crash', + boot_val => 'false', +}, + +{ name => 'send_abort_for_kill', type => 'bool', context => 'PGC_SIGHUP', group => 'DEVELOPER_OPTIONS', + short_desc => 'Send SIGABRT not SIGKILL to stuck child processes.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'send_abort_for_kill', + boot_val => 'false', +}, + +{ name => 'log_duration', type => 'bool', context => 'PGC_SUSET', group => 'LOGGING_WHAT', + short_desc => 'Logs the duration of each completed SQL statement.', + variable => 'log_duration', + boot_val => 'false', +}, + +{ name => 'debug_copy_parse_plan_trees', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Set this to force all parse and plan trees to be passed through copyObject(), to facilitate catching errors and omissions in copyObject().', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Debug_copy_parse_plan_trees', + boot_val => 'DEFAULT_DEBUG_COPY_PARSE_PLAN_TREES', + ifdef => 'DEBUG_NODE_TESTS_ENABLED', +}, + +{ name => 'debug_write_read_parse_plan_trees', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Set this to force all parse and plan trees to be passed through outfuncs.c/readfuncs.c, to facilitate catching errors and omissions in those modules.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Debug_write_read_parse_plan_trees', + boot_val => 'DEFAULT_DEBUG_READ_WRITE_PARSE_PLAN_TREES', + ifdef => 'DEBUG_NODE_TESTS_ENABLED', +}, + +{ name => 'debug_raw_expression_coverage_test', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Set this to force all raw parse trees for DML statements to be scanned by raw_expression_tree_walker(), to facilitate catching errors and omissions in that function.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Debug_raw_expression_coverage_test', + boot_val => 'DEFAULT_DEBUG_RAW_EXPRESSION_COVERAGE_TEST', + ifdef => 'DEBUG_NODE_TESTS_ENABLED', +}, + +{ name => 'debug_print_raw_parse', type => 'bool', context => 'PGC_USERSET', group => 'LOGGING_WHAT', + short_desc => 'Logs each query\'s raw parse tree.', + variable => 'Debug_print_raw_parse', + boot_val => 'false', +}, + +{ name => 'debug_print_parse', type => 'bool', context => 'PGC_USERSET', group => 'LOGGING_WHAT', + short_desc => 'Logs each query\'s parse tree.', + variable => 'Debug_print_parse', + boot_val => 'false', +}, + +{ name => 'debug_print_rewritten', type => 'bool', context => 'PGC_USERSET', group => 'LOGGING_WHAT', + short_desc => 'Logs each query\'s rewritten parse tree.', + variable => 'Debug_print_rewritten', + boot_val => 'false', +}, + +{ name => 'debug_print_plan', type => 'bool', context => 'PGC_USERSET', group => 'LOGGING_WHAT', + short_desc => 'Logs each query\'s execution plan.', + variable => 'Debug_print_plan', + boot_val => 'false', +}, + +{ name => 'debug_pretty_print', type => 'bool', context => 'PGC_USERSET', group => 'LOGGING_WHAT', + short_desc => 'Indents parse and plan tree displays.', + variable => 'Debug_pretty_print', + boot_val => 'true', +}, + +{ name => 'log_parser_stats', type => 'bool', context => 'PGC_SUSET', group => 'STATS_MONITORING', + short_desc => 'Writes parser performance statistics to the server log.', + variable => 'log_parser_stats', + boot_val => 'false', + check_hook => 'check_stage_log_stats', +}, + +{ name => 'log_planner_stats', type => 'bool', context => 'PGC_SUSET', group => 'STATS_MONITORING', + short_desc => 'Writes planner performance statistics to the server log.', + variable => 'log_planner_stats', + boot_val => 'false', + check_hook => 'check_stage_log_stats', +}, + +{ name => 'log_executor_stats', type => 'bool', context => 'PGC_SUSET', group => 'STATS_MONITORING', + short_desc => 'Writes executor performance statistics to the server log.', + variable => 'log_executor_stats', + boot_val => 'false', + check_hook => 'check_stage_log_stats', +}, + +{ name => 'log_statement_stats', type => 'bool', context => 'PGC_SUSET', group => 'STATS_MONITORING', + short_desc => 'Writes cumulative performance statistics to the server log.', + variable => 'log_statement_stats', + boot_val => 'false', + check_hook => 'check_log_stats', +}, + +{ name => 'log_btree_build_stats', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Logs system resource usage statistics (memory and CPU) on various B-tree operations.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'log_btree_build_stats', + boot_val => 'false', + ifdef => 'BTREE_BUILD_STATS', +}, + +{ name => 'track_activities', type => 'bool', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', + short_desc => 'Collects information about executing commands.', + long_desc => 'Enables the collection of information on the currently executing command of each session, along with the time at which that command began execution.', + variable => 'pgstat_track_activities', + boot_val => 'true', +}, + +{ name => 'track_counts', type => 'bool', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', + short_desc => 'Collects statistics on database activity.', + variable => 'pgstat_track_counts', + boot_val => 'true', +}, + +{ name => 'track_cost_delay_timing', type => 'bool', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', + short_desc => 'Collects timing statistics for cost-based vacuum delay.', + variable => 'track_cost_delay_timing', + boot_val => 'false', +}, + +{ name => 'track_io_timing', type => 'bool', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', + short_desc => 'Collects timing statistics for database I/O activity.', + variable => 'track_io_timing', + boot_val => 'false', +}, + +{ name => 'track_wal_io_timing', type => 'bool', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', + short_desc => 'Collects timing statistics for WAL I/O activity.', + variable => 'track_wal_io_timing', + boot_val => 'false', +}, + +{ name => 'update_process_title', type => 'bool', context => 'PGC_SUSET', group => 'PROCESS_TITLE', + short_desc => 'Updates the process title to show the active SQL command.', + long_desc => 'Enables updating of the process title every time a new SQL command is received by the server.', + variable => 'update_process_title', + boot_val => 'DEFAULT_UPDATE_PROCESS_TITLE', +}, + +{ name => 'autovacuum', type => 'bool', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Starts the autovacuum subprocess.', + variable => 'autovacuum_start_daemon', + boot_val => 'true', +}, + +{ name => 'trace_notify', type => 'bool', context => 'PGC_USERSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Generates debugging output for LISTEN and NOTIFY.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Trace_notify', + boot_val => 'false', +}, + +{ name => 'trace_locks', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Emits information about lock usage.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Trace_locks', + boot_val => 'false', + ifdef => 'LOCK_DEBUG', +}, + +{ name => 'trace_userlocks', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Emits information about user lock usage.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Trace_userlocks', + boot_val => 'false', + ifdef => 'LOCK_DEBUG', +}, + +{ name => 'trace_lwlocks', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Emits information about lightweight lock usage.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Trace_lwlocks', + boot_val => 'false', + ifdef => 'LOCK_DEBUG', +}, + +{ name => 'debug_deadlocks', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Dumps information about all current locks when a deadlock timeout occurs.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Debug_deadlocks', + boot_val => 'false', + ifdef => 'LOCK_DEBUG', +}, + +{ name => 'log_lock_waits', type => 'bool', context => 'PGC_SUSET', group => 'LOGGING_WHAT', + short_desc => 'Logs long lock waits.', + variable => 'log_lock_waits', + boot_val => 'false', +}, + +{ name => 'log_lock_failures', type => 'bool', context => 'PGC_SUSET', group => 'LOGGING_WHAT', + short_desc => 'Logs lock failures.', + variable => 'log_lock_failures', + boot_val => 'false', +}, + +{ name => 'log_recovery_conflict_waits', type => 'bool', context => 'PGC_SIGHUP', group => 'LOGGING_WHAT', + short_desc => 'Logs standby recovery conflict waits.', + variable => 'log_recovery_conflict_waits', + boot_val => 'false', +}, + +{ name => 'log_hostname', type => 'bool', context => 'PGC_SIGHUP', group => 'LOGGING_WHAT', + short_desc => 'Logs the host name in the connection logs.', + long_desc => 'By default, connection logs only show the IP address of the connecting host. If you want them to show the host name you can turn this on, but depending on your host name resolution setup it might impose a non-negligible performance penalty.', + variable => 'log_hostname', + boot_val => 'false', +}, + +{ name => 'transform_null_equals', type => 'bool', context => 'PGC_USERSET', group => 'COMPAT_OPTIONS_OTHER', + short_desc => 'Treats "expr=NULL" as "expr IS NULL".', + long_desc => 'When turned on, expressions of the form expr = NULL (or NULL = expr) are treated as expr IS NULL, that is, they return true if expr evaluates to the null value, and false otherwise. The correct behavior of expr = NULL is to always return null (unknown).', + variable => 'Transform_null_equals', + boot_val => 'false', +}, + +{ name => 'default_transaction_read_only', type => 'bool', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the default read-only status of new transactions.', + flags => 'GUC_REPORT', + variable => 'DefaultXactReadOnly', + boot_val => 'false', +}, + +{ name => 'transaction_read_only', type => 'bool', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the current transaction\'s read-only status.', + flags => 'GUC_NO_RESET | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'XactReadOnly', + boot_val => 'false', + check_hook => 'check_transaction_read_only', +}, + +{ name => 'default_transaction_deferrable', type => 'bool', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the default deferrable status of new transactions.', + variable => 'DefaultXactDeferrable', + boot_val => 'false', +}, + +{ name => 'transaction_deferrable', type => 'bool', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Whether to defer a read-only serializable transaction until it can be executed with no possible serialization failures.', + flags => 'GUC_NO_RESET | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'XactDeferrable', + boot_val => 'false', + check_hook => 'check_transaction_deferrable', +}, + +{ name => 'row_security', type => 'bool', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Enables row security.', + long_desc => 'When enabled, row security will be applied to all users.', + variable => 'row_security', + boot_val => 'true', +}, + +{ name => 'check_function_bodies', type => 'bool', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Check routine bodies during CREATE FUNCTION and CREATE PROCEDURE.', + variable => 'check_function_bodies', + boot_val => 'true', +}, + +{ name => 'array_nulls', type => 'bool', context => 'PGC_USERSET', group => 'COMPAT_OPTIONS_PREVIOUS', + short_desc => 'Enables input of NULL elements in arrays.', + long_desc => 'When turned on, unquoted NULL in an array input value means a null value; otherwise it is taken literally.', + variable => 'Array_nulls', + boot_val => 'true', +}, + +# WITH OIDS support, and consequently default_with_oids, was removed +# in PostgreSQL 12, but we tolerate the parameter being set to false +# to avoid unnecessarily breaking older dump files. +{ name => 'default_with_oids', type => 'bool', context => 'PGC_USERSET', group => 'COMPAT_OPTIONS_PREVIOUS', + short_desc => 'WITH OIDS is no longer supported; this can only be false.', + flags => 'GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE', + variable => 'default_with_oids', + boot_val => 'false', + check_hook => 'check_default_with_oids', +}, + +{ name => 'logging_collector', type => 'bool', context => 'PGC_POSTMASTER', group => 'LOGGING_WHERE', + short_desc => 'Start a subprocess to capture stderr, csvlog and/or jsonlog into log files.', + variable => 'Logging_collector', + boot_val => 'false', +}, + +{ name => 'log_truncate_on_rotation', type => 'bool', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Truncate existing log files of same name during log rotation.', + variable => 'Log_truncate_on_rotation', + boot_val => 'false', +}, + +{ name => 'trace_sort', type => 'bool', context => 'PGC_USERSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Emit information about resource usage in sorting.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'trace_sort', + boot_val => 'false', +}, + +# this is undocumented because not exposed in a standard build +{ name => 'trace_syncscan', type => 'bool', context => 'PGC_USERSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Generate debugging output for synchronized scanning.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'trace_syncscan', + boot_val => 'false', + ifdef => 'TRACE_SYNCSCAN', +}, + +# this is undocumented because not exposed in a standard build +{ name => 'optimize_bounded_sort', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_METHOD', + short_desc => 'Enables bounded sorting using heap sort.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_EXPLAIN', + variable => 'optimize_bounded_sort', + boot_val => 'true', + ifdef => 'DEBUG_BOUNDED_SORT', +}, + +{ name => 'wal_debug', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Emit WAL-related debugging output.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'XLOG_DEBUG', + boot_val => 'false', + ifdef => 'WAL_DEBUG', +}, + +{ name => 'integer_datetimes', type => 'bool', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows whether datetimes are integer based.', + flags => 'GUC_REPORT | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'integer_datetimes', + boot_val => 'true', +}, + +{ name => 'krb_caseins_users', type => 'bool', context => 'PGC_SIGHUP', group => 'CONN_AUTH_AUTH', + short_desc => 'Sets whether Kerberos and GSSAPI user names should be treated as case-insensitive.', + variable => 'pg_krb_caseins_users', + boot_val => 'false', +}, + +{ name => 'gss_accept_delegation', type => 'bool', context => 'PGC_SIGHUP', group => 'CONN_AUTH_AUTH', + short_desc => 'Sets whether GSSAPI delegation should be accepted from the client.', + variable => 'pg_gss_accept_delegation', + boot_val => 'false', +}, + +{ name => 'escape_string_warning', type => 'bool', context => 'PGC_USERSET', group => 'COMPAT_OPTIONS_PREVIOUS', + short_desc => 'Warn about backslash escapes in ordinary string literals.', + variable => 'escape_string_warning', + boot_val => 'true', +}, + +{ name => 'standard_conforming_strings', type => 'bool', context => 'PGC_USERSET', group => 'COMPAT_OPTIONS_PREVIOUS', + short_desc => 'Causes \'...\' strings to treat backslashes literally.', + flags => 'GUC_REPORT', + variable => 'standard_conforming_strings', + boot_val => 'true', +}, + +{ name => 'synchronize_seqscans', type => 'bool', context => 'PGC_USERSET', group => 'COMPAT_OPTIONS_PREVIOUS', + short_desc => 'Enables synchronized sequential scans.', + variable => 'synchronize_seqscans', + boot_val => 'true', +}, + +{ name => 'recovery_target_inclusive', type => 'bool', context => 'PGC_POSTMASTER', group => 'WAL_RECOVERY_TARGET', + short_desc => 'Sets whether to include or exclude transaction with recovery target.', + variable => 'recoveryTargetInclusive', + boot_val => 'true', +}, + +{ name => 'summarize_wal', type => 'bool', context => 'PGC_SIGHUP', group => 'WAL_SUMMARIZATION', + short_desc => 'Starts the WAL summarizer process to enable incremental backup.', + variable => 'summarize_wal', + boot_val => 'false', +}, + +{ name => 'hot_standby', type => 'bool', context => 'PGC_POSTMASTER', group => 'REPLICATION_STANDBY', + short_desc => 'Allows connections and queries during recovery.', + variable => 'EnableHotStandby', + boot_val => 'true', +}, + +{ name => 'hot_standby_feedback', type => 'bool', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Allows feedback from a hot standby to the primary that will avoid query conflicts.', + variable => 'hot_standby_feedback', + boot_val => 'false', +}, + +{ name => 'in_hot_standby', type => 'bool', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows whether hot standby is currently active.', + flags => 'GUC_REPORT | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'in_hot_standby_guc', + boot_val => 'false', + show_hook => 'show_in_hot_standby', +}, + +{ name => 'allow_system_table_mods', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Allows modifications of the structure of system tables.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'allowSystemTableMods', + boot_val => 'false', +}, + +{ name => 'ignore_system_indexes', type => 'bool', context => 'PGC_BACKEND', group => 'DEVELOPER_OPTIONS', + short_desc => 'Disables reading from system indexes.', + long_desc => 'It does not prevent updating the indexes, so it is safe to use. The worst consequence is slowness.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'IgnoreSystemIndexes', + boot_val => 'false', +}, + +{ name => 'allow_in_place_tablespaces', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Allows tablespaces directly inside pg_tblspc, for testing.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'allow_in_place_tablespaces', + boot_val => 'false', +}, + +{ name => 'lo_compat_privileges', type => 'bool', context => 'PGC_SUSET', group => 'COMPAT_OPTIONS_PREVIOUS', + short_desc => 'Enables backward compatibility mode for privilege checks on large objects.', + long_desc => 'Skips privilege checks when reading or modifying large objects, for compatibility with PostgreSQL releases prior to 9.0.', + variable => 'lo_compat_privileges', + boot_val => 'false', +}, + +{ name => 'quote_all_identifiers', type => 'bool', context => 'PGC_USERSET', group => 'COMPAT_OPTIONS_PREVIOUS', + short_desc => 'When generating SQL fragments, quote all identifiers.', + variable => 'quote_all_identifiers', + boot_val => 'false', +}, + +{ name => 'data_checksums', type => 'bool', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows whether data checksums are turned on for this cluster.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED', + variable => 'data_checksums', + boot_val => 'false', +}, + +{ name => 'syslog_sequence_numbers', type => 'bool', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Add sequence number to syslog messages to avoid duplicate suppression.', + variable => 'syslog_sequence_numbers', + boot_val => 'true', +}, + +{ name => 'syslog_split_messages', type => 'bool', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Split messages sent to syslog by lines and to fit into 1024 bytes.', + variable => 'syslog_split_messages', + boot_val => 'true', +}, + +{ name => 'parallel_leader_participation', type => 'bool', context => 'PGC_USERSET', group => 'RESOURCES_WORKER_PROCESSES', + short_desc => 'Controls whether Gather and Gather Merge also run subplans.', + long_desc => 'Should gather nodes also run subplans or just gather tuples?', + flags => 'GUC_EXPLAIN', + variable => 'parallel_leader_participation', + boot_val => 'true', +}, + +{ name => 'jit', type => 'bool', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER', + short_desc => 'Allow JIT compilation.', + flags => 'GUC_EXPLAIN', + variable => 'jit_enabled', + boot_val => 'true', +}, + +# This is not guaranteed to be available, but given it's a developer +# oriented option, it doesn't seem worth adding code checking +# availability. +{ name => 'jit_debugging_support', type => 'bool', context => 'PGC_SU_BACKEND', group => 'DEVELOPER_OPTIONS', + short_desc => 'Register JIT-compiled functions with debugger.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'jit_debugging_support', + boot_val => 'false', +}, + +{ name => 'jit_dump_bitcode', type => 'bool', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Write out LLVM bitcode to facilitate JIT debugging.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'jit_dump_bitcode', + boot_val => 'false', +}, + +{ name => 'jit_expressions', type => 'bool', context => 'PGC_USERSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Allow JIT compilation of expressions.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'jit_expressions', + boot_val => 'true', +}, + +# This is not guaranteed to be available, but given it's a developer +# oriented option, it doesn't seem worth adding code checking +# availability. +{ name => 'jit_profiling_support', type => 'bool', context => 'PGC_SU_BACKEND', group => 'DEVELOPER_OPTIONS', + short_desc => 'Register JIT-compiled functions with perf profiler.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'jit_profiling_support', + boot_val => 'false', +}, + +{ name => 'jit_tuple_deforming', type => 'bool', context => 'PGC_USERSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Allow JIT compilation of tuple deforming.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'jit_tuple_deforming', + boot_val => 'true', +}, + +{ name => 'data_sync_retry', type => 'bool', context => 'PGC_POSTMASTER', group => 'ERROR_HANDLING_OPTIONS', + short_desc => 'Whether to continue running after a failure to sync data files.', + variable => 'data_sync_retry', + boot_val => 'false', +}, + +{ name => 'wal_receiver_create_temp_slot', type => 'bool', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Sets whether a WAL receiver should create a temporary replication slot if no permanent slot is configured.', + variable => 'wal_receiver_create_temp_slot', + boot_val => 'false', +}, + +{ name => 'event_triggers', type => 'bool', context => 'PGC_SUSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Enables event triggers.', + long_desc => 'When enabled, event triggers will fire for all applicable statements.', + variable => 'event_triggers', + boot_val => 'true', +}, + +{ name => 'sync_replication_slots', type => 'bool', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Enables a physical standby to synchronize logical failover replication slots from the primary server.', + variable => 'sync_replication_slots', + boot_val => 'false', +}, + +{ name => 'md5_password_warnings', type => 'bool', context => 'PGC_USERSET', group => 'CONN_AUTH_AUTH', + short_desc => 'Enables deprecation warnings for MD5 passwords.', + variable => 'md5_password_warnings', + boot_val => 'true', +}, + +{ name => 'vacuum_truncate', type => 'bool', context => 'PGC_USERSET', group => 'VACUUM_DEFAULT', + short_desc => 'Enables vacuum to truncate empty pages at the end of the table.', + variable => 'vacuum_truncate', + boot_val => 'true', +}, + +{ name => 'archive_timeout', type => 'int', context => 'PGC_SIGHUP', group => 'WAL_ARCHIVING', + short_desc => 'Sets the amount of time to wait before forcing a switch to the next WAL file.', + long_desc => '0 disables the timeout.', + flags => 'GUC_UNIT_S', + variable => 'XLogArchiveTimeout', + boot_val => '0', + min => '0', + max => 'INT_MAX / 2', +}, + +{ name => 'post_auth_delay', type => 'int', context => 'PGC_BACKEND', group => 'DEVELOPER_OPTIONS', + short_desc => 'Sets the amount of time to wait after authentication on connection startup.', + long_desc => 'This allows attaching a debugger to the process.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_UNIT_S', + variable => 'PostAuthDelay', + boot_val => '0', + min => '0', + max => 'INT_MAX / 1000000', +}, + +{ name => 'default_statistics_target', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER', + short_desc => 'Sets the default statistics target.', + long_desc => 'This applies to table columns that have not had a column-specific target set via ALTER TABLE SET STATISTICS.', + variable => 'default_statistics_target', + boot_val => '100', + min => '1', + max => 'MAX_STATISTICS_TARGET', +}, + +{ name => 'from_collapse_limit', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER', + short_desc => 'Sets the FROM-list size beyond which subqueries are not collapsed.', + long_desc => 'The planner will merge subqueries into upper queries if the resulting FROM list would have no more than this many items.', + flags => 'GUC_EXPLAIN', + variable => 'from_collapse_limit', + boot_val => '8', + min => '1', + max => 'INT_MAX', +}, + +{ name => 'join_collapse_limit', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER', + short_desc => 'Sets the FROM-list size beyond which JOIN constructs are not flattened.', + long_desc => 'The planner will flatten explicit JOIN constructs into lists of FROM items whenever a list of no more than this many items would result.', + flags => 'GUC_EXPLAIN', + variable => 'join_collapse_limit', + boot_val => '8', + min => '1', + max => 'INT_MAX', +}, + +{ name => 'geqo_threshold', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_GEQO', + short_desc => 'Sets the threshold of FROM items beyond which GEQO is used.', + flags => 'GUC_EXPLAIN', + variable => 'geqo_threshold', + boot_val => '12', + min => '2', + max => 'INT_MAX', +}, + +{ name => 'geqo_effort', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_GEQO', + short_desc => 'GEQO: effort is used to set the default for other GEQO parameters.', + flags => 'GUC_EXPLAIN', + variable => 'Geqo_effort', + boot_val => 'DEFAULT_GEQO_EFFORT', + min => 'MIN_GEQO_EFFORT', + max => 'MAX_GEQO_EFFORT', +}, + +{ name => 'geqo_pool_size', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_GEQO', + short_desc => 'GEQO: number of individuals in the population.', + long_desc => '0 means use a suitable default value.', + flags => 'GUC_EXPLAIN', + variable => 'Geqo_pool_size', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'geqo_generations', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_GEQO', + short_desc => 'GEQO: number of iterations of the algorithm.', + long_desc => '0 means use a suitable default value.', + flags => 'GUC_EXPLAIN', + variable => 'Geqo_generations', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +# This is PGC_SUSET to prevent hiding from log_lock_waits. +{ name => 'deadlock_timeout', type => 'int', context => 'PGC_SUSET', group => 'LOCK_MANAGEMENT', + short_desc => 'Sets the time to wait on a lock before checking for deadlock.', + flags => 'GUC_UNIT_MS', + variable => 'DeadlockTimeout', + boot_val => '1000', + min => '1', + max => 'INT_MAX', +}, + +{ name => 'max_standby_archive_delay', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Sets the maximum delay before canceling queries when a hot standby server is processing archived WAL data.', + long_desc => '-1 means wait forever.', + flags => 'GUC_UNIT_MS', + variable => 'max_standby_archive_delay', + boot_val => '30 * 1000', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'max_standby_streaming_delay', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Sets the maximum delay before canceling queries when a hot standby server is processing streamed WAL data.', + long_desc => '-1 means wait forever.', + flags => 'GUC_UNIT_MS', + variable => 'max_standby_streaming_delay', + boot_val => '30 * 1000', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'recovery_min_apply_delay', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Sets the minimum delay for applying changes during recovery.', + flags => 'GUC_UNIT_MS', + variable => 'recovery_min_apply_delay', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'wal_receiver_status_interval', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Sets the maximum interval between WAL receiver status reports to the sending server.', + flags => 'GUC_UNIT_S', + variable => 'wal_receiver_status_interval', + boot_val => '10', + min => '0', + max => 'INT_MAX / 1000', +}, + +{ name => 'wal_receiver_timeout', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Sets the maximum wait time to receive data from the sending server.', + long_desc => '0 disables the timeout.', + flags => 'GUC_UNIT_MS', + variable => 'wal_receiver_timeout', + boot_val => '60 * 1000', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'max_connections', type => 'int', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Sets the maximum number of concurrent connections.', + variable => 'MaxConnections', + boot_val => '100', + min => '1', + max => 'MAX_BACKENDS', +}, + +# see max_connections +{ name => 'superuser_reserved_connections', type => 'int', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Sets the number of connection slots reserved for superusers.', + variable => 'SuperuserReservedConnections', + boot_val => '3', + min => '0', + max => 'MAX_BACKENDS', +}, + +{ name => 'reserved_connections', type => 'int', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Sets the number of connection slots reserved for roles with privileges of pg_use_reserved_connections.', + variable => 'ReservedConnections', + boot_val => '0', + min => '0', + max => 'MAX_BACKENDS', +}, + +{ name => 'min_dynamic_shared_memory', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Amount of dynamic shared memory reserved at startup.', + flags => 'GUC_UNIT_MB', + variable => 'min_dynamic_shared_memory', + boot_val => '0', + min => '0', + max => '(int) Min((size_t) INT_MAX, SIZE_MAX / (1024 * 1024))', +}, + +# We sometimes multiply the number of shared buffers by two without +# checking for overflow, so we mustn't allow more than INT_MAX / 2. +{ name => 'shared_buffers', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Sets the number of shared memory buffers used by the server.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'NBuffers', + boot_val => '16384', + min => '16', + max => 'INT_MAX / 2', +}, + +{ name => 'vacuum_buffer_usage_limit', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_MEM', + short_desc => 'Sets the buffer pool size for VACUUM, ANALYZE, and autovacuum.', + flags => 'GUC_UNIT_KB', + variable => 'VacuumBufferUsageLimit', + boot_val => '2048', + min => '0', + max => 'MAX_BAS_VAC_RING_SIZE_KB', + check_hook => 'check_vacuum_buffer_usage_limit', +}, + +{ name => 'shared_memory_size', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the size of the server\'s main shared memory area (rounded up to the nearest MB).', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_UNIT_MB | GUC_RUNTIME_COMPUTED', + variable => 'shared_memory_size_mb', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'shared_memory_size_in_huge_pages', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the number of huge pages needed for the main shared memory area.', + long_desc => '-1 means huge pages are not supported.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED', + variable => 'shared_memory_size_in_huge_pages', + boot_val => '-1', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'num_os_semaphores', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the number of semaphores required for the server.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED', + variable => 'num_os_semaphores', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'commit_timestamp_buffers', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Sets the size of the dedicated buffer pool used for the commit timestamp cache.', + long_desc => '0 means use a fraction of "shared_buffers".', + flags => 'GUC_UNIT_BLOCKS', + variable => 'commit_timestamp_buffers', + boot_val => '0', + min => '0', + max => 'SLRU_MAX_ALLOWED_BUFFERS', + check_hook => 'check_commit_ts_buffers', +}, + +{ name => 'multixact_member_buffers', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Sets the size of the dedicated buffer pool used for the MultiXact member cache.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'multixact_member_buffers', + boot_val => '32', + min => '16', + max => 'SLRU_MAX_ALLOWED_BUFFERS', + check_hook => 'check_multixact_member_buffers', +}, + +{ name => 'multixact_offset_buffers', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Sets the size of the dedicated buffer pool used for the MultiXact offset cache.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'multixact_offset_buffers', + boot_val => '16', + min => '16', + max => 'SLRU_MAX_ALLOWED_BUFFERS', + check_hook => 'check_multixact_offset_buffers', +}, + +{ name => 'notify_buffers', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Sets the size of the dedicated buffer pool used for the LISTEN/NOTIFY message cache.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'notify_buffers', + boot_val => '16', + min => '16', + max => 'SLRU_MAX_ALLOWED_BUFFERS', + check_hook => 'check_notify_buffers', +}, + +{ name => 'serializable_buffers', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Sets the size of the dedicated buffer pool used for the serializable transaction cache.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'serializable_buffers', + boot_val => '32', + min => '16', + max => 'SLRU_MAX_ALLOWED_BUFFERS', + check_hook => 'check_serial_buffers', +}, + +{ name => 'subtransaction_buffers', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Sets the size of the dedicated buffer pool used for the subtransaction cache.', + long_desc => '0 means use a fraction of "shared_buffers".', + flags => 'GUC_UNIT_BLOCKS', + variable => 'subtransaction_buffers', + boot_val => '0', + min => '0', + max => 'SLRU_MAX_ALLOWED_BUFFERS', + check_hook => 'check_subtrans_buffers', +}, + +{ name => 'transaction_buffers', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Sets the size of the dedicated buffer pool used for the transaction status cache.', + long_desc => '0 means use a fraction of "shared_buffers".', + flags => 'GUC_UNIT_BLOCKS', + variable => 'transaction_buffers', + boot_val => '0', + min => '0', + max => 'SLRU_MAX_ALLOWED_BUFFERS', + check_hook => 'check_transaction_buffers', +}, + +{ name => 'temp_buffers', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_MEM', + short_desc => 'Sets the maximum number of temporary buffers used by each session.', + flags => 'GUC_UNIT_BLOCKS | GUC_EXPLAIN', + variable => 'num_temp_buffers', + boot_val => '1024', + min => '100', + max => 'INT_MAX / 2', + check_hook => 'check_temp_buffers', +}, + +{ name => 'port', type => 'int', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Sets the TCP port the server listens on.', + variable => 'PostPortNumber', + boot_val => 'DEF_PGPORT', + min => '1', + max => '65535', +}, + +{ name => 'unix_socket_permissions', type => 'int', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Sets the access permissions of the Unix-domain socket.', + long_desc => 'Unix-domain sockets use the usual Unix file system permission set. The parameter value is expected to be a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)', + variable => 'Unix_socket_permissions', + boot_val => '0777', + min => '0000', + max => '0777', + show_hook => 'show_unix_socket_permissions', +}, + +{ name => 'log_file_mode', type => 'int', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Sets the file permissions for log files.', + long_desc => 'The parameter value is expected to be a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)', + variable => 'Log_file_mode', + boot_val => '0600', + min => '0000', + max => '0777', + show_hook => 'show_log_file_mode', +}, + +{ name => 'data_directory_mode', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the mode of the data directory.', + long_desc => 'The parameter value is a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED', + variable => 'data_directory_mode', + boot_val => '0700', + min => '0000', + max => '0777', + show_hook => 'show_data_directory_mode', +}, + +{ name => 'work_mem', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_MEM', + short_desc => 'Sets the maximum memory to be used for query workspaces.', + long_desc => 'This much memory can be used by each internal sort operation and hash table before switching to temporary disk files.', + flags => 'GUC_UNIT_KB | GUC_EXPLAIN', + variable => 'work_mem', + boot_val => '4096', + min => '64', + max => 'MAX_KILOBYTES', +}, + +# Dynamic shared memory has a higher overhead than local memory +# contexts, so when testing low-memory scenarios that could use shared +# memory, the recommended minimum is 1MB. +{ name => 'maintenance_work_mem', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_MEM', + short_desc => 'Sets the maximum memory to be used for maintenance operations.', + long_desc => 'This includes operations such as VACUUM and CREATE INDEX.', + flags => 'GUC_UNIT_KB', + variable => 'maintenance_work_mem', + boot_val => '65536', + min => '64', + max => 'MAX_KILOBYTES', +}, + +{ name => 'logical_decoding_work_mem', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_MEM', + short_desc => 'Sets the maximum memory to be used for logical decoding.', + long_desc => 'This much memory can be used by each internal reorder buffer before spilling to disk.', + flags => 'GUC_UNIT_KB', + variable => 'logical_decoding_work_mem', + boot_val => '65536', + min => '64', + max => 'MAX_KILOBYTES', +}, + +# We use the hopefully-safely-small value of 100kB as the compiled-in +# default for max_stack_depth. InitializeGUCOptions will increase it +# if possible, depending on the actual platform-specific stack limit. +{ name => 'max_stack_depth', type => 'int', context => 'PGC_SUSET', group => 'RESOURCES_MEM', + short_desc => 'Sets the maximum stack depth, in kilobytes.', + flags => 'GUC_UNIT_KB', + variable => 'max_stack_depth', + boot_val => '100', + min => '100', + max => 'MAX_KILOBYTES', + check_hook => 'check_max_stack_depth', + assign_hook => 'assign_max_stack_depth', +}, + +{ name => 'temp_file_limit', type => 'int', context => 'PGC_SUSET', group => 'RESOURCES_DISK', + short_desc => 'Limits the total size of all temporary files used by each process.', + long_desc => '-1 means no limit.', + flags => 'GUC_UNIT_KB', + variable => 'temp_file_limit', + boot_val => '-1', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'vacuum_cost_page_hit', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_COST_DELAY', + short_desc => 'Vacuum cost for a page found in the buffer cache.', + variable => 'VacuumCostPageHit', + boot_val => '1', + min => '0', + max => '10000', +}, + +{ name => 'vacuum_cost_page_miss', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_COST_DELAY', + short_desc => 'Vacuum cost for a page not found in the buffer cache.', + variable => 'VacuumCostPageMiss', + boot_val => '2', + min => '0', + max => '10000', +}, + +{ name => 'vacuum_cost_page_dirty', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_COST_DELAY', + short_desc => 'Vacuum cost for a page dirtied by vacuum.', + variable => 'VacuumCostPageDirty', + boot_val => '20', + min => '0', + max => '10000', +}, + +{ name => 'vacuum_cost_limit', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_COST_DELAY', + short_desc => 'Vacuum cost amount available before napping.', + variable => 'VacuumCostLimit', + boot_val => '200', + min => '1', + max => '10000', +}, + +{ name => 'autovacuum_vacuum_cost_limit', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Vacuum cost amount available before napping, for autovacuum.', + long_desc => '-1 means use "vacuum_cost_limit".', + variable => 'autovacuum_vac_cost_limit', + boot_val => '-1', + min => '-1', + max => '10000', +}, + +{ name => 'max_files_per_process', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_KERNEL', + short_desc => 'Sets the maximum number of files each server process is allowed to open simultaneously.', + variable => 'max_files_per_process', + boot_val => '1000', + min => '64', + max => 'INT_MAX', +}, + +# See also CheckRequiredParameterValues() if this parameter changes +{ name => 'max_prepared_transactions', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Sets the maximum number of simultaneously prepared transactions.', + variable => 'max_prepared_xacts', + boot_val => '0', + min => '0', + max => 'MAX_BACKENDS', +}, + +{ name => 'trace_lock_oidmin', type => 'int', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Sets the minimum OID of tables for tracking locks.', + long_desc => 'Is used to avoid output on system tables.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Trace_lock_oidmin', + boot_val => 'FirstNormalObjectId', + min => '0', + max => 'INT_MAX', + ifdef => 'LOCK_DEBUG', +}, + +{ name => 'trace_lock_table', type => 'int', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Sets the OID of the table with unconditionally lock tracing.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'Trace_lock_table', + boot_val => '0', + min => '0', + max => 'INT_MAX', + ifdef => 'LOCK_DEBUG', +}, + +{ name => 'statement_timeout', type => 'int', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the maximum allowed duration of any statement.', + long_desc => '0 disables the timeout.', + flags => 'GUC_UNIT_MS', + variable => 'StatementTimeout', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'lock_timeout', type => 'int', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the maximum allowed duration of any wait for a lock.', + long_desc => '0 disables the timeout.', + flags => 'GUC_UNIT_MS', + variable => 'LockTimeout', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'idle_in_transaction_session_timeout', type => 'int', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the maximum allowed idle time between queries, when in a transaction.', + long_desc => '0 disables the timeout.', + flags => 'GUC_UNIT_MS', + variable => 'IdleInTransactionSessionTimeout', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'transaction_timeout', type => 'int', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the maximum allowed duration of any transaction within a session (not a prepared transaction).', + long_desc => '0 disables the timeout.', + flags => 'GUC_UNIT_MS', + variable => 'TransactionTimeout', + boot_val => '0', + min => '0', + max => 'INT_MAX', + assign_hook => 'assign_transaction_timeout', +}, + +{ name => 'idle_session_timeout', type => 'int', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the maximum allowed idle time between queries, when not in a transaction.', + long_desc => '0 disables the timeout.', + flags => 'GUC_UNIT_MS', + variable => 'IdleSessionTimeout', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'vacuum_freeze_min_age', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_FREEZING', + short_desc => 'Minimum age at which VACUUM should freeze a table row.', + variable => 'vacuum_freeze_min_age', + boot_val => '50000000', + min => '0', + max => '1000000000', +}, + +{ name => 'vacuum_freeze_table_age', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_FREEZING', + short_desc => 'Age at which VACUUM should scan whole table to freeze tuples.', + variable => 'vacuum_freeze_table_age', + boot_val => '150000000', + min => '0', + max => '2000000000', +}, + +{ name => 'vacuum_multixact_freeze_min_age', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_FREEZING', + short_desc => 'Minimum age at which VACUUM should freeze a MultiXactId in a table row.', + variable => 'vacuum_multixact_freeze_min_age', + boot_val => '5000000', + min => '0', + max => '1000000000', +}, + +{ name => 'vacuum_multixact_freeze_table_age', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_FREEZING', + short_desc => 'Multixact age at which VACUUM should scan whole table to freeze tuples.', + variable => 'vacuum_multixact_freeze_table_age', + boot_val => '150000000', + min => '0', + max => '2000000000', +}, + +{ name => 'vacuum_failsafe_age', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_FREEZING', + short_desc => 'Age at which VACUUM should trigger failsafe to avoid a wraparound outage.', + variable => 'vacuum_failsafe_age', + boot_val => '1600000000', + min => '0', + max => '2100000000', +}, + +{ name => 'vacuum_multixact_failsafe_age', type => 'int', context => 'PGC_USERSET', group => 'VACUUM_FREEZING', + short_desc => 'Multixact age at which VACUUM should trigger failsafe to avoid a wraparound outage.', + variable => 'vacuum_multixact_failsafe_age', + boot_val => '1600000000', + min => '0', + max => '2100000000', +}, + +# See also CheckRequiredParameterValues() if this parameter changes +{ name => 'max_locks_per_transaction', type => 'int', context => 'PGC_POSTMASTER', group => 'LOCK_MANAGEMENT', + short_desc => 'Sets the maximum number of locks per transaction.', + long_desc => 'The shared lock table is sized on the assumption that at most "max_locks_per_transaction" objects per server process or prepared transaction will need to be locked at any one time.', + variable => 'max_locks_per_xact', + boot_val => '64', + min => '10', + max => 'INT_MAX', +}, + +{ name => 'max_pred_locks_per_transaction', type => 'int', context => 'PGC_POSTMASTER', group => 'LOCK_MANAGEMENT', + short_desc => 'Sets the maximum number of predicate locks per transaction.', + long_desc => 'The shared predicate lock table is sized on the assumption that at most "max_pred_locks_per_transaction" objects per server process or prepared transaction will need to be locked at any one time.', + variable => 'max_predicate_locks_per_xact', + boot_val => '64', + min => '10', + max => 'INT_MAX', +}, + +{ name => 'max_pred_locks_per_relation', type => 'int', context => 'PGC_SIGHUP', group => 'LOCK_MANAGEMENT', + short_desc => 'Sets the maximum number of predicate-locked pages and tuples per relation.', + long_desc => 'If more than this total of pages and tuples in the same relation are locked by a connection, those locks are replaced by a relation-level lock.', + variable => 'max_predicate_locks_per_relation', + boot_val => '-2', + min => 'INT_MIN', + max => 'INT_MAX', +}, + +{ name => 'max_pred_locks_per_page', type => 'int', context => 'PGC_SIGHUP', group => 'LOCK_MANAGEMENT', + short_desc => 'Sets the maximum number of predicate-locked tuples per page.', + long_desc => 'If more than this number of tuples on the same page are locked by a connection, those locks are replaced by a page-level lock.', + variable => 'max_predicate_locks_per_page', + boot_val => '2', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'authentication_timeout', type => 'int', context => 'PGC_SIGHUP', group => 'CONN_AUTH_AUTH', + short_desc => 'Sets the maximum allowed time to complete client authentication.', + flags => 'GUC_UNIT_S', + variable => 'AuthenticationTimeout', + boot_val => '60', + min => '1', + max => '600', +}, + +# Not for general use +{ name => 'pre_auth_delay', type => 'int', context => 'PGC_SIGHUP', group => 'DEVELOPER_OPTIONS', + short_desc => 'Sets the amount of time to wait before authentication on connection startup.', + long_desc => 'This allows attaching a debugger to the process.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_UNIT_S', + variable => 'PreAuthDelay', + boot_val => '0', + min => '0', + max => '60', +}, + +{ name => 'max_notify_queue_pages', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_DISK', + short_desc => 'Sets the maximum number of allocated pages for NOTIFY / LISTEN queue.', + variable => 'max_notify_queue_pages', + boot_val => '1048576', + min => '64', + max => 'INT_MAX', +}, + +{ name => 'wal_decode_buffer_size', type => 'int', context => 'PGC_POSTMASTER', group => 'WAL_RECOVERY', + short_desc => 'Buffer size for reading ahead in the WAL during recovery.', + long_desc => 'Maximum distance to read ahead in the WAL to prefetch referenced data blocks.', + flags => 'GUC_UNIT_BYTE', + variable => 'wal_decode_buffer_size', + boot_val => '512 * 1024', + min => '64 * 1024', + max => 'MaxAllocSize', +}, + +{ name => 'wal_keep_size', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_SENDING', + short_desc => 'Sets the size of WAL files held for standby servers.', + flags => 'GUC_UNIT_MB', + variable => 'wal_keep_size_mb', + boot_val => '0', + min => '0', + max => 'MAX_KILOBYTES', +}, + +{ name => 'min_wal_size', type => 'int', context => 'PGC_SIGHUP', group => 'WAL_CHECKPOINTS', + short_desc => 'Sets the minimum size to shrink the WAL to.', + flags => 'GUC_UNIT_MB', + variable => 'min_wal_size_mb', + boot_val => 'DEFAULT_MIN_WAL_SEGS * (DEFAULT_XLOG_SEG_SIZE / (1024 * 1024))', + min => '2', + max => 'MAX_KILOBYTES', +}, + +{ name => 'max_wal_size', type => 'int', context => 'PGC_SIGHUP', group => 'WAL_CHECKPOINTS', + short_desc => 'Sets the WAL size that triggers a checkpoint.', + flags => 'GUC_UNIT_MB', + variable => 'max_wal_size_mb', + boot_val => 'DEFAULT_MAX_WAL_SEGS * (DEFAULT_XLOG_SEG_SIZE / (1024 * 1024))', + min => '2', + max => 'MAX_KILOBYTES', + assign_hook => 'assign_max_wal_size', +}, + +{ name => 'checkpoint_timeout', type => 'int', context => 'PGC_SIGHUP', group => 'WAL_CHECKPOINTS', + short_desc => 'Sets the maximum time between automatic WAL checkpoints.', + flags => 'GUC_UNIT_S', + variable => 'CheckPointTimeout', + boot_val => '300', + min => '30', + max => '86400', +}, + +{ name => 'checkpoint_warning', type => 'int', context => 'PGC_SIGHUP', group => 'WAL_CHECKPOINTS', + short_desc => 'Sets the maximum time before warning if checkpoints triggered by WAL volume happen too frequently.', + long_desc => 'Write a message to the server log if checkpoints caused by the filling of WAL segment files happen more frequently than this amount of time. 0 disables the warning.', + flags => 'GUC_UNIT_S', + variable => 'CheckPointWarning', + boot_val => '30', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'checkpoint_flush_after', type => 'int', context => 'PGC_SIGHUP', group => 'WAL_CHECKPOINTS', + short_desc => 'Number of pages after which previously performed writes are flushed to disk.', + long_desc => '0 disables forced writeback.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'checkpoint_flush_after', + boot_val => 'DEFAULT_CHECKPOINT_FLUSH_AFTER', + min => '0', + max => 'WRITEBACK_MAX_PENDING_FLUSHES', +}, + +{ name => 'wal_buffers', type => 'int', context => 'PGC_POSTMASTER', group => 'WAL_SETTINGS', + short_desc => 'Sets the number of disk-page buffers in shared memory for WAL.', + long_desc => '-1 means use a fraction of "shared_buffers".', + flags => 'GUC_UNIT_XBLOCKS', + variable => 'XLOGbuffers', + boot_val => '-1', + min => '-1', + max => '(INT_MAX / XLOG_BLCKSZ)', + check_hook => 'check_wal_buffers', +}, + +{ name => 'wal_writer_delay', type => 'int', context => 'PGC_SIGHUP', group => 'WAL_SETTINGS', + short_desc => 'Time between WAL flushes performed in the WAL writer.', + flags => 'GUC_UNIT_MS', + variable => 'WalWriterDelay', + boot_val => '200', + min => '1', + max => '10000', +}, + +{ name => 'wal_writer_flush_after', type => 'int', context => 'PGC_SIGHUP', group => 'WAL_SETTINGS', + short_desc => 'Amount of WAL written out by WAL writer that triggers a flush.', + flags => 'GUC_UNIT_XBLOCKS', + variable => 'WalWriterFlushAfter', + boot_val => 'DEFAULT_WAL_WRITER_FLUSH_AFTER', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'wal_skip_threshold', type => 'int', context => 'PGC_USERSET', group => 'WAL_SETTINGS', + short_desc => 'Minimum size of new file to fsync instead of writing WAL.', + flags => 'GUC_UNIT_KB', + variable => 'wal_skip_threshold', + boot_val => '2048', + min => '0', + max => 'MAX_KILOBYTES', +}, + +{ name => 'max_wal_senders', type => 'int', context => 'PGC_POSTMASTER', group => 'REPLICATION_SENDING', + short_desc => 'Sets the maximum number of simultaneously running WAL sender processes.', + variable => 'max_wal_senders', + boot_val => '10', + min => '0', + max => 'MAX_BACKENDS', +}, + +/* see max_wal_senders */ +{ name => 'max_replication_slots', type => 'int', context => 'PGC_POSTMASTER', group => 'REPLICATION_SENDING', + short_desc => 'Sets the maximum number of simultaneously defined replication slots.', + variable => 'max_replication_slots', + boot_val => '10', + min => '0', + max => 'MAX_BACKENDS /* XXX? */', +}, + +{ name => 'max_slot_wal_keep_size', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_SENDING', + short_desc => 'Sets the maximum WAL size that can be reserved by replication slots.', + long_desc => 'Replication slots will be marked as failed, and segments released for deletion or recycling, if this much space is occupied by WAL on disk. -1 means no maximum.', + flags => 'GUC_UNIT_MB', + variable => 'max_slot_wal_keep_size_mb', + boot_val => '-1', + min => '-1', + max => 'MAX_KILOBYTES', +}, + +{ name => 'wal_sender_timeout', type => 'int', context => 'PGC_USERSET', group => 'REPLICATION_SENDING', + short_desc => 'Sets the maximum time to wait for WAL replication.', + flags => 'GUC_UNIT_MS', + variable => 'wal_sender_timeout', + boot_val => '60 * 1000', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'idle_replication_slot_timeout', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_SENDING', + short_desc => 'Sets the duration a replication slot can remain idle before it is invalidated.', + flags => 'GUC_UNIT_S', + variable => 'idle_replication_slot_timeout_secs', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +# we have no microseconds designation, so can't supply units here +{ name => 'commit_delay', type => 'int', context => 'PGC_SUSET', group => 'WAL_SETTINGS', + short_desc => 'Sets the delay in microseconds between transaction commit and flushing WAL to disk.', + variable => 'CommitDelay', + boot_val => '0', + min => '0', + max => '100000', +}, + +{ name => 'commit_siblings', type => 'int', context => 'PGC_USERSET', group => 'WAL_SETTINGS', + short_desc => 'Sets the minimum number of concurrent open transactions required before performing "commit_delay".', + variable => 'CommitSiblings', + boot_val => '5', + min => '0', + max => '1000', +}, + +{ name => 'extra_float_digits', type => 'int', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets the number of digits displayed for floating-point values.', + long_desc => 'This affects real, double precision, and geometric data types. A zero or negative parameter value is added to the standard number of digits (FLT_DIG or DBL_DIG as appropriate). Any value greater than zero selects precise output mode.', + variable => 'extra_float_digits', + boot_val => '1', + min => '-15', + max => '3', +}, + +{ name => 'log_min_duration_sample', type => 'int', context => 'PGC_SUSET', group => 'LOGGING_WHEN', + short_desc => 'Sets the minimum execution time above which a sample of statements will be logged. Sampling is determined by "log_statement_sample_rate".', + long_desc => '-1 disables sampling. 0 means sample all statements.', + flags => 'GUC_UNIT_MS', + variable => 'log_min_duration_sample', + boot_val => '-1', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'log_min_duration_statement', type => 'int', context => 'PGC_SUSET', group => 'LOGGING_WHEN', + short_desc => 'Sets the minimum execution time above which all statements will be logged.', + long_desc => '-1 disables logging statement durations. 0 means log all statement durations.', + flags => 'GUC_UNIT_MS', + variable => 'log_min_duration_statement', + boot_val => '-1', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'log_autovacuum_min_duration', type => 'int', context => 'PGC_SIGHUP', group => 'LOGGING_WHAT', + short_desc => 'Sets the minimum execution time above which autovacuum actions will be logged.', + long_desc => '-1 disables logging autovacuum actions. 0 means log all autovacuum actions.', + flags => 'GUC_UNIT_MS', + variable => 'Log_autovacuum_min_duration', + boot_val => '600000', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'log_parameter_max_length', type => 'int', context => 'PGC_SUSET', group => 'LOGGING_WHAT', + short_desc => 'Sets the maximum length in bytes of data logged for bind parameter values when logging statements.', + long_desc => '-1 means log values in full.', + flags => 'GUC_UNIT_BYTE', + variable => 'log_parameter_max_length', + boot_val => '-1', + min => '-1', + max => 'INT_MAX / 2', +}, + +{ name => 'log_parameter_max_length_on_error', type => 'int', context => 'PGC_USERSET', group => 'LOGGING_WHAT', + short_desc => 'Sets the maximum length in bytes of data logged for bind parameter values when logging statements, on error.', + long_desc => '-1 means log values in full.', + flags => 'GUC_UNIT_BYTE', + variable => 'log_parameter_max_length_on_error', + boot_val => '0', + min => '-1', + max => 'INT_MAX / 2', +}, + +{ name => 'bgwriter_delay', type => 'int', context => 'PGC_SIGHUP', group => 'RESOURCES_BGWRITER', + short_desc => 'Background writer sleep time between rounds.', + flags => 'GUC_UNIT_MS', + variable => 'BgWriterDelay', + boot_val => '200', + min => '10', + max => '10000', +}, + +# Same upper limit as shared_buffers +{ name => 'bgwriter_lru_maxpages', type => 'int', context => 'PGC_SIGHUP', group => 'RESOURCES_BGWRITER', + short_desc => 'Background writer maximum number of LRU pages to flush per round.', + long_desc => '0 disables background writing.', + variable => 'bgwriter_lru_maxpages', + boot_val => '100', + min => '0', + max => 'INT_MAX / 2', +}, + +{ name => 'bgwriter_flush_after', type => 'int', context => 'PGC_SIGHUP', group => 'RESOURCES_BGWRITER', + short_desc => 'Number of pages after which previously performed writes are flushed to disk.', + long_desc => '0 disables forced writeback.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'bgwriter_flush_after', + boot_val => 'DEFAULT_BGWRITER_FLUSH_AFTER', + min => '0', + max => 'WRITEBACK_MAX_PENDING_FLUSHES', +}, + +{ name => 'effective_io_concurrency', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_IO', + short_desc => 'Number of simultaneous requests that can be handled efficiently by the disk subsystem.', + long_desc => '0 disables simultaneous requests.', + flags => 'GUC_EXPLAIN', + variable => 'effective_io_concurrency', + boot_val => 'DEFAULT_EFFECTIVE_IO_CONCURRENCY', + min => '0', + max => 'MAX_IO_CONCURRENCY', +}, + +{ name => 'maintenance_io_concurrency', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_IO', + short_desc => 'A variant of "effective_io_concurrency" that is used for maintenance work.', + long_desc => '0 disables simultaneous requests.', + flags => 'GUC_EXPLAIN', + variable => 'maintenance_io_concurrency', + boot_val => 'DEFAULT_MAINTENANCE_IO_CONCURRENCY', + min => '0', + max => 'MAX_IO_CONCURRENCY', + assign_hook => 'assign_maintenance_io_concurrency', +}, + +{ name => 'io_max_combine_limit', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_IO', + short_desc => 'Server-wide limit that clamps io_combine_limit.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'io_max_combine_limit', + boot_val => 'DEFAULT_IO_COMBINE_LIMIT', + min => '1', + max => 'MAX_IO_COMBINE_LIMIT', + assign_hook => 'assign_io_max_combine_limit', +}, + +{ name => 'io_combine_limit', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_IO', + short_desc => 'Limit on the size of data reads and writes.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'io_combine_limit_guc', + boot_val => 'DEFAULT_IO_COMBINE_LIMIT', + min => '1', + max => 'MAX_IO_COMBINE_LIMIT', + assign_hook => 'assign_io_combine_limit', +}, + +{ name => 'io_max_concurrency', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_IO', + short_desc => 'Max number of IOs that one process can execute simultaneously.', + variable => 'io_max_concurrency', + boot_val => '-1', + min => '-1', + max => '1024', + check_hook => 'check_io_max_concurrency', +}, + +{ name => 'io_workers', type => 'int', context => 'PGC_SIGHUP', group => 'RESOURCES_IO', + short_desc => 'Number of IO worker processes, for io_method=worker.', + variable => 'io_workers', + boot_val => '3', + min => '1', + max => 'MAX_IO_WORKERS', +}, + +{ name => 'backend_flush_after', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_IO', + short_desc => 'Number of pages after which previously performed writes are flushed to disk.', + long_desc => '0 disables forced writeback.', + flags => 'GUC_UNIT_BLOCKS', + variable => 'backend_flush_after', + boot_val => 'DEFAULT_BACKEND_FLUSH_AFTER', + min => '0', + max => 'WRITEBACK_MAX_PENDING_FLUSHES', +}, + +{ name => 'max_worker_processes', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_WORKER_PROCESSES', + short_desc => 'Maximum number of concurrent worker processes.', + variable => 'max_worker_processes', + boot_val => '8', + min => '0', + max => 'MAX_BACKENDS', +}, + +{ name => 'max_logical_replication_workers', type => 'int', context => 'PGC_POSTMASTER', group => 'REPLICATION_SUBSCRIBERS', + short_desc => 'Maximum number of logical replication worker processes.', + variable => 'max_logical_replication_workers', + boot_val => '4', + min => '0', + max => 'MAX_BACKENDS', +}, + +{ name => 'max_sync_workers_per_subscription', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_SUBSCRIBERS', + short_desc => 'Maximum number of table synchronization workers per subscription.', + variable => 'max_sync_workers_per_subscription', + boot_val => '2', + min => '0', + max => 'MAX_BACKENDS', +}, + +{ name => 'max_parallel_apply_workers_per_subscription', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_SUBSCRIBERS', + short_desc => 'Maximum number of parallel apply workers per subscription.', + variable => 'max_parallel_apply_workers_per_subscription', + boot_val => '2', + min => '0', + max => 'MAX_PARALLEL_WORKER_LIMIT', +}, + +{ name => 'max_active_replication_origins', type => 'int', context => 'PGC_POSTMASTER', group => 'REPLICATION_SUBSCRIBERS', + short_desc => 'Sets the maximum number of active replication origins.', + variable => 'max_active_replication_origins', + boot_val => '10', + min => '0', + max => 'MAX_BACKENDS', +}, + +{ name => 'log_rotation_age', type => 'int', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Sets the amount of time to wait before forcing log file rotation.', + long_desc => '0 disables time-based creation of new log files.', + flags => 'GUC_UNIT_MIN', + variable => 'Log_RotationAge', + boot_val => 'HOURS_PER_DAY * MINS_PER_HOUR', + min => '0', + max => 'INT_MAX / SECS_PER_MINUTE', +}, + +{ name => 'log_rotation_size', type => 'int', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Sets the maximum size a log file can reach before being rotated.', + long_desc => '0 disables size-based creation of new log files.', + flags => 'GUC_UNIT_KB', + variable => 'Log_RotationSize', + boot_val => '10 * 1024', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'max_function_args', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the maximum number of function arguments.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'max_function_args', + boot_val => 'FUNC_MAX_ARGS', + min => 'FUNC_MAX_ARGS', + max => 'FUNC_MAX_ARGS', +}, + +{ name => 'max_index_keys', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the maximum number of index keys.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'max_index_keys', + boot_val => 'INDEX_MAX_KEYS', + min => 'INDEX_MAX_KEYS', + max => 'INDEX_MAX_KEYS', +}, + +{ name => 'max_identifier_length', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the maximum identifier length.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'max_identifier_length', + boot_val => 'NAMEDATALEN - 1', + min => 'NAMEDATALEN - 1', + max => 'NAMEDATALEN - 1', +}, + +{ name => 'block_size', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the size of a disk block.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'block_size', + boot_val => 'BLCKSZ', + min => 'BLCKSZ', + max => 'BLCKSZ', +}, + +{ name => 'segment_size', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the number of pages per disk file.', + flags => 'GUC_UNIT_BLOCKS | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'segment_size', + boot_val => 'RELSEG_SIZE', + min => 'RELSEG_SIZE', + max => 'RELSEG_SIZE', +}, + +{ name => 'wal_block_size', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the block size in the write ahead log.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'wal_block_size', + boot_val => 'XLOG_BLCKSZ', + min => 'XLOG_BLCKSZ', + max => 'XLOG_BLCKSZ', +}, + +{ name => 'wal_retrieve_retry_interval', type => 'int', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Sets the time to wait before retrying to retrieve WAL after a failed attempt.', + flags => 'GUC_UNIT_MS', + variable => 'wal_retrieve_retry_interval', + boot_val => '5000', + min => '1', + max => 'INT_MAX', +}, + +{ name => 'wal_segment_size', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the size of write ahead log segments.', + flags => 'GUC_UNIT_BYTE | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED', + variable => 'wal_segment_size', + boot_val => 'DEFAULT_XLOG_SEG_SIZE', + min => 'WalSegMinSize', + max => 'WalSegMaxSize', + check_hook => 'check_wal_segment_size', +}, + +{ name => 'wal_summary_keep_time', type => 'int', context => 'PGC_SIGHUP', group => 'WAL_SUMMARIZATION', + short_desc => 'Time for which WAL summary files should be kept.', + long_desc => '0 disables automatic summary file deletion.', + flags => 'GUC_UNIT_MIN', + variable => 'wal_summary_keep_time', + boot_val => '10 * HOURS_PER_DAY * MINS_PER_HOUR /* 10 days */', + min => '0', + max => 'INT_MAX / SECS_PER_MINUTE', +}, + +{ name => 'autovacuum_naptime', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Time to sleep between autovacuum runs.', + flags => 'GUC_UNIT_S', + variable => 'autovacuum_naptime', + boot_val => '60', + min => '1', + max => 'INT_MAX / 1000', +}, + +{ name => 'autovacuum_vacuum_threshold', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Minimum number of tuple updates or deletes prior to vacuum.', + variable => 'autovacuum_vac_thresh', + boot_val => '50', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'autovacuum_vacuum_max_threshold', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Maximum number of tuple updates or deletes prior to vacuum.', + long_desc => '-1 disables the maximum threshold.', + variable => 'autovacuum_vac_max_thresh', + boot_val => '100000000', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'autovacuum_vacuum_insert_threshold', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Minimum number of tuple inserts prior to vacuum.', + long_desc => '-1 disables insert vacuums.', + variable => 'autovacuum_vac_ins_thresh', + boot_val => '1000', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'autovacuum_analyze_threshold', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Minimum number of tuple inserts, updates, or deletes prior to analyze.', + variable => 'autovacuum_anl_thresh', + boot_val => '50', + min => '0', + max => 'INT_MAX', +}, + +# see varsup.c for why this is PGC_POSTMASTER not PGC_SIGHUP +# see vacuum_failsafe_age if you change the upper-limit value. +{ name => 'autovacuum_freeze_max_age', type => 'int', context => 'PGC_POSTMASTER', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Age at which to autovacuum a table to prevent transaction ID wraparound.', + variable => 'autovacuum_freeze_max_age', + boot_val => '200000000', + min => '100000', + max => '2000000000', +}, + +# see multixact.c for why this is PGC_POSTMASTER not PGC_SIGHUP +{ name => 'autovacuum_multixact_freeze_max_age', type => 'int', context => 'PGC_POSTMASTER', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Multixact age at which to autovacuum a table to prevent multixact wraparound.', + variable => 'autovacuum_multixact_freeze_max_age', + boot_val => '400000000', + min => '10000', + max => '2000000000', +}, + +# see max_connections +{ name => 'autovacuum_worker_slots', type => 'int', context => 'PGC_POSTMASTER', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Sets the number of backend slots to allocate for autovacuum workers.', + variable => 'autovacuum_worker_slots', + boot_val => '16', + min => '1', + max => 'MAX_BACKENDS', +}, + +{ name => 'autovacuum_max_workers', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Sets the maximum number of simultaneously running autovacuum worker processes.', + variable => 'autovacuum_max_workers', + boot_val => '3', + min => '1', + max => 'MAX_BACKENDS', +}, + +{ name => 'max_parallel_maintenance_workers', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_WORKER_PROCESSES', + short_desc => 'Sets the maximum number of parallel processes per maintenance operation.', + variable => 'max_parallel_maintenance_workers', + boot_val => '2', + min => '0', + max => 'MAX_PARALLEL_WORKER_LIMIT', +}, + +{ name => 'max_parallel_workers_per_gather', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_WORKER_PROCESSES', + short_desc => 'Sets the maximum number of parallel processes per executor node.', + flags => 'GUC_EXPLAIN', + variable => 'max_parallel_workers_per_gather', + boot_val => '2', + min => '0', + max => 'MAX_PARALLEL_WORKER_LIMIT', +}, + +{ name => 'max_parallel_workers', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_WORKER_PROCESSES', + short_desc => 'Sets the maximum number of parallel workers that can be active at one time.', + flags => 'GUC_EXPLAIN', + variable => 'max_parallel_workers', + boot_val => '8', + min => '0', + max => 'MAX_PARALLEL_WORKER_LIMIT', +}, + +{ name => 'autovacuum_work_mem', type => 'int', context => 'PGC_SIGHUP', group => 'RESOURCES_MEM', + short_desc => 'Sets the maximum memory to be used by each autovacuum worker process.', + long_desc => '-1 means use "maintenance_work_mem".', + flags => 'GUC_UNIT_KB', + variable => 'autovacuum_work_mem', + boot_val => '-1', + min => '-1', + max => 'MAX_KILOBYTES', + check_hook => 'check_autovacuum_work_mem', +}, + +{ name => 'tcp_keepalives_idle', type => 'int', context => 'PGC_USERSET', group => 'CONN_AUTH_TCP', + short_desc => 'Time between issuing TCP keepalives.', + long_desc => '0 means use the system default.', + flags => 'GUC_UNIT_S', + variable => 'tcp_keepalives_idle', + boot_val => '0', + min => '0', + max => 'INT_MAX', + assign_hook => 'assign_tcp_keepalives_idle', + show_hook => 'show_tcp_keepalives_idle', +}, + +{ name => 'tcp_keepalives_interval', type => 'int', context => 'PGC_USERSET', group => 'CONN_AUTH_TCP', + short_desc => 'Time between TCP keepalive retransmits.', + long_desc => '0 means use the system default.', + flags => 'GUC_UNIT_S', + variable => 'tcp_keepalives_interval', + boot_val => '0', + min => '0', + max => 'INT_MAX', + assign_hook => 'assign_tcp_keepalives_interval', + show_hook => 'show_tcp_keepalives_interval', +}, + +{ name => 'ssl_renegotiation_limit', type => 'int', context => 'PGC_USERSET', group => 'COMPAT_OPTIONS_PREVIOUS', + short_desc => 'SSL renegotiation is no longer supported; this can only be 0.', + flags => 'GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'ssl_renegotiation_limit', + boot_val => '0', + min => '0', + max => '0', +}, + +{ name => 'tcp_keepalives_count', type => 'int', context => 'PGC_USERSET', group => 'CONN_AUTH_TCP', + short_desc => 'Maximum number of TCP keepalive retransmits.', + long_desc => 'Number of consecutive keepalive retransmits that can be lost before a connection is considered dead. 0 means use the system default.', + variable => 'tcp_keepalives_count', + boot_val => '0', + min => '0', + max => 'INT_MAX', + assign_hook => 'assign_tcp_keepalives_count', + show_hook => 'show_tcp_keepalives_count', +}, + +{ name => 'gin_fuzzy_search_limit', type => 'int', context => 'PGC_USERSET', group => 'CLIENT_CONN_OTHER', + short_desc => 'Sets the maximum allowed result for exact search by GIN.', + long_desc => '0 means no limit.', + variable => 'GinFuzzySearchLimit', + boot_val => '0', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'effective_cache_size', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the planner\'s assumption about the total size of the data caches.', + long_desc => 'That is, the total size of the caches (kernel cache and shared buffers) used for PostgreSQL data files. This is measured in disk pages, which are normally 8 kB each.', + flags => 'GUC_UNIT_BLOCKS | GUC_EXPLAIN', + variable => 'effective_cache_size', + boot_val => 'DEFAULT_EFFECTIVE_CACHE_SIZE', + min => '1', + max => 'INT_MAX', +}, + +{ name => 'min_parallel_table_scan_size', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the minimum amount of table data for a parallel scan.', + long_desc => 'If the planner estimates that it will read a number of table pages too small to reach this limit, a parallel scan will not be considered.', + flags => 'GUC_UNIT_BLOCKS | GUC_EXPLAIN', + variable => 'min_parallel_table_scan_size', + boot_val => '(8 * 1024 * 1024) / BLCKSZ', + min => '0', + max => 'INT_MAX / 3', +}, + +{ name => 'min_parallel_index_scan_size', type => 'int', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the minimum amount of index data for a parallel scan.', + long_desc => 'If the planner estimates that it will read a number of index pages too small to reach this limit, a parallel scan will not be considered.', + flags => 'GUC_UNIT_BLOCKS | GUC_EXPLAIN', + variable => 'min_parallel_index_scan_size', + boot_val => '(512 * 1024) / BLCKSZ', + min => '0', + max => 'INT_MAX / 3', +}, + +# Can't be set in postgresql.conf +{ name => 'server_version_num', type => 'int', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the server version as an integer.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'server_version_num', + boot_val => 'PG_VERSION_NUM', + min => 'PG_VERSION_NUM', + max => 'PG_VERSION_NUM', +}, + +{ name => 'log_temp_files', type => 'int', context => 'PGC_SUSET', group => 'LOGGING_WHAT', + short_desc => 'Log the use of temporary files larger than this number of kilobytes.', + long_desc => '-1 disables logging temporary files. 0 means log all temporary files.', + flags => 'GUC_UNIT_KB', + variable => 'log_temp_files', + boot_val => '-1', + min => '-1', + max => 'INT_MAX', +}, + +{ name => 'track_activity_query_size', type => 'int', context => 'PGC_POSTMASTER', group => 'STATS_CUMULATIVE', + short_desc => 'Sets the size reserved for pg_stat_activity.query, in bytes.', + flags => 'GUC_UNIT_BYTE', + variable => 'pgstat_track_activity_query_size', + boot_val => '1024', + min => '100', + max => '1048576', +}, + +{ name => 'gin_pending_list_limit', type => 'int', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the maximum size of the pending list for GIN index.', + flags => 'GUC_UNIT_KB', + variable => 'gin_pending_list_limit', + boot_val => '4096', + min => '64', + max => 'MAX_KILOBYTES', +}, + +{ name => 'tcp_user_timeout', type => 'int', context => 'PGC_USERSET', group => 'CONN_AUTH_TCP', + short_desc => 'TCP user timeout.', + long_desc => '0 means use the system default.', + flags => 'GUC_UNIT_MS', + variable => 'tcp_user_timeout', + boot_val => '0', + min => '0', + max => 'INT_MAX', + assign_hook => 'assign_tcp_user_timeout', + show_hook => 'show_tcp_user_timeout', +}, + +{ name => 'huge_page_size', type => 'int', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'The size of huge page that should be requested.', + long_desc => '0 means use the system default.', + flags => 'GUC_UNIT_KB', + variable => 'huge_page_size', + boot_val => '0', + min => '0', + max => 'INT_MAX', + check_hook => 'check_huge_page_size', +}, + +{ name => 'debug_discard_caches', type => 'int', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Aggressively flush system caches for debugging purposes.', + long_desc => '0 means use normal caching behavior.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'debug_discard_caches', + boot_val => 'DEFAULT_DEBUG_DISCARD_CACHES', + min => 'MIN_DEBUG_DISCARD_CACHES', + max => 'MAX_DEBUG_DISCARD_CACHES', +}, + +{ name => 'client_connection_check_interval', type => 'int', context => 'PGC_USERSET', group => 'CONN_AUTH_TCP', + short_desc => 'Sets the time interval between checks for disconnection while running queries.', + long_desc => '0 disables connection checks.', + flags => 'GUC_UNIT_MS', + variable => 'client_connection_check_interval', + boot_val => '0', + min => '0', + max => 'INT_MAX', + check_hook => 'check_client_connection_check_interval', +}, + +{ name => 'log_startup_progress_interval', type => 'int', context => 'PGC_SIGHUP', group => 'LOGGING_WHEN', + short_desc => 'Time between progress updates for long-running startup operations.', + long_desc => '0 disables progress updates.', + flags => 'GUC_UNIT_MS', + variable => 'log_startup_progress_interval', + boot_val => '10000', + min => '0', + max => 'INT_MAX', +}, + +{ name => 'scram_iterations', type => 'int', context => 'PGC_USERSET', group => 'CONN_AUTH_AUTH', + short_desc => 'Sets the iteration count for SCRAM secret generation.', + flags => 'GUC_REPORT', + variable => 'scram_sha_256_iterations', + boot_val => 'SCRAM_SHA_256_DEFAULT_ITERATIONS', + min => '1', + max => 'INT_MAX', +}, + + +{ name => 'seq_page_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the planner\'s estimate of the cost of a sequentially fetched disk page.', + flags => 'GUC_EXPLAIN', + variable => 'seq_page_cost', + boot_val => 'DEFAULT_SEQ_PAGE_COST', + min => '0', + max => 'DBL_MAX', +}, + +{ name => 'random_page_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the planner\'s estimate of the cost of a nonsequentially fetched disk page.', + flags => 'GUC_EXPLAIN', + variable => 'random_page_cost', + boot_val => 'DEFAULT_RANDOM_PAGE_COST', + min => '0', + max => 'DBL_MAX', +}, + +{ name => 'cpu_tuple_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the planner\'s estimate of the cost of processing each tuple (row).', + flags => 'GUC_EXPLAIN', + variable => 'cpu_tuple_cost', + boot_val => 'DEFAULT_CPU_TUPLE_COST', + min => '0', + max => 'DBL_MAX', +}, + +{ name => 'cpu_index_tuple_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the planner\'s estimate of the cost of processing each index entry during an index scan.', + flags => 'GUC_EXPLAIN', + variable => 'cpu_index_tuple_cost', + boot_val => 'DEFAULT_CPU_INDEX_TUPLE_COST', + min => '0', + max => 'DBL_MAX', +}, + +{ name => 'cpu_operator_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the planner\'s estimate of the cost of processing each operator or function call.', + flags => 'GUC_EXPLAIN', + variable => 'cpu_operator_cost', + boot_val => 'DEFAULT_CPU_OPERATOR_COST', + min => '0', + max => 'DBL_MAX', +}, + +{ name => 'parallel_tuple_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the planner\'s estimate of the cost of passing each tuple (row) from worker to leader backend.', + flags => 'GUC_EXPLAIN', + variable => 'parallel_tuple_cost', + boot_val => 'DEFAULT_PARALLEL_TUPLE_COST', + min => '0', + max => 'DBL_MAX', +}, + +{ name => 'parallel_setup_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Sets the planner\'s estimate of the cost of starting up worker processes for parallel query.', + flags => 'GUC_EXPLAIN', + variable => 'parallel_setup_cost', + boot_val => 'DEFAULT_PARALLEL_SETUP_COST', + min => '0', + max => 'DBL_MAX', +}, + +{ name => 'jit_above_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Perform JIT compilation if query is more expensive.', + long_desc => '-1 disables JIT compilation.', + flags => 'GUC_EXPLAIN', + variable => 'jit_above_cost', + boot_val => '100000', + min => '-1', + max => 'DBL_MAX', +}, + +{ name => 'jit_optimize_above_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Optimize JIT-compiled functions if query is more expensive.', + long_desc => '-1 disables optimization.', + flags => 'GUC_EXPLAIN', + variable => 'jit_optimize_above_cost', + boot_val => '500000', + min => '-1', + max => 'DBL_MAX', +}, + +{ name => 'jit_inline_above_cost', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_COST', + short_desc => 'Perform JIT inlining if query is more expensive.', + long_desc => '-1 disables inlining.', + flags => 'GUC_EXPLAIN', + variable => 'jit_inline_above_cost', + boot_val => '500000', + min => '-1', + max => 'DBL_MAX', +}, + +{ name => 'cursor_tuple_fraction', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER', + short_desc => 'Sets the planner\'s estimate of the fraction of a cursor\'s rows that will be retrieved.', + flags => 'GUC_EXPLAIN', + variable => 'cursor_tuple_fraction', + boot_val => 'DEFAULT_CURSOR_TUPLE_FRACTION', + min => '0.0', + max => '1.0', +}, + +{ name => 'recursive_worktable_factor', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER', + short_desc => 'Sets the planner\'s estimate of the average size of a recursive query\'s working table.', + flags => 'GUC_EXPLAIN', + variable => 'recursive_worktable_factor', + boot_val => 'DEFAULT_RECURSIVE_WORKTABLE_FACTOR', + min => '0.001', + max => '1000000.0', +}, + +{ name => 'geqo_selection_bias', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_GEQO', + short_desc => 'GEQO: selective pressure within the population.', + flags => 'GUC_EXPLAIN', + variable => 'Geqo_selection_bias', + boot_val => 'DEFAULT_GEQO_SELECTION_BIAS', + min => 'MIN_GEQO_SELECTION_BIAS', + max => 'MAX_GEQO_SELECTION_BIAS', +}, + +{ name => 'geqo_seed', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_GEQO', + short_desc => 'GEQO: seed for random path selection.', + flags => 'GUC_EXPLAIN', + variable => 'Geqo_seed', + boot_val => '0.0', + min => '0.0', + max => '1.0', +}, + +{ name => 'hash_mem_multiplier', type => 'real', context => 'PGC_USERSET', group => 'RESOURCES_MEM', + short_desc => 'Multiple of "work_mem" to use for hash tables.', + flags => 'GUC_EXPLAIN', + variable => 'hash_mem_multiplier', + boot_val => '2.0', + min => '1.0', + max => '1000.0', +}, + +{ name => 'bgwriter_lru_multiplier', type => 'real', context => 'PGC_SIGHUP', group => 'RESOURCES_BGWRITER', + short_desc => 'Multiple of the average buffer usage to free per round.', + variable => 'bgwriter_lru_multiplier', + boot_val => '2.0', + min => '0.0', + max => '10.0', +}, + +{ name => 'seed', type => 'real', context => 'PGC_USERSET', group => 'UNGROUPED', + short_desc => 'Sets the seed for random-number generation.', + flags => 'GUC_NO_SHOW_ALL | GUC_NO_RESET | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'phony_random_seed', + boot_val => '0.0', + min => '-1.0', + max => '1.0', + check_hook => 'check_random_seed', + assign_hook => 'assign_random_seed', + show_hook => 'show_random_seed', +}, + +{ name => 'vacuum_cost_delay', type => 'real', context => 'PGC_USERSET', group => 'VACUUM_COST_DELAY', + short_desc => 'Vacuum cost delay in milliseconds.', + flags => 'GUC_UNIT_MS', + variable => 'VacuumCostDelay', + boot_val => '0', + min => '0', + max => '100', +}, + +{ name => 'autovacuum_vacuum_cost_delay', type => 'real', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Vacuum cost delay in milliseconds, for autovacuum.', + long_desc => '-1 means use "vacuum_cost_delay".', + flags => 'GUC_UNIT_MS', + variable => 'autovacuum_vac_cost_delay', + boot_val => '2', + min => '-1', + max => '100', +}, + +{ name => 'autovacuum_vacuum_scale_factor', type => 'real', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Number of tuple updates or deletes prior to vacuum as a fraction of reltuples.', + variable => 'autovacuum_vac_scale', + boot_val => '0.2', + min => '0.0', + max => '100.0', +}, + +{ name => 'autovacuum_vacuum_insert_scale_factor', type => 'real', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Number of tuple inserts prior to vacuum as a fraction of reltuples.', + variable => 'autovacuum_vac_ins_scale', + boot_val => '0.2', + min => '0.0', + max => '100.0', +}, + +{ name => 'autovacuum_analyze_scale_factor', type => 'real', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Number of tuple inserts, updates, or deletes prior to analyze as a fraction of reltuples.', + variable => 'autovacuum_anl_scale', + boot_val => '0.1', + min => '0.0', + max => '100.0', +}, + +{ name => 'checkpoint_completion_target', type => 'real', context => 'PGC_SIGHUP', group => 'WAL_CHECKPOINTS', + short_desc => 'Time spent flushing dirty buffers during checkpoint, as fraction of checkpoint interval.', + variable => 'CheckPointCompletionTarget', + boot_val => '0.9', + min => '0.0', + max => '1.0', + assign_hook => 'assign_checkpoint_completion_target', +}, + +{ name => 'log_statement_sample_rate', type => 'real', context => 'PGC_SUSET', group => 'LOGGING_WHEN', + short_desc => 'Fraction of statements exceeding "log_min_duration_sample" to be logged.', + long_desc => 'Use a value between 0.0 (never log) and 1.0 (always log).', + variable => 'log_statement_sample_rate', + boot_val => '1.0', + min => '0.0', + max => '1.0', +}, + +{ name => 'log_transaction_sample_rate', type => 'real', context => 'PGC_SUSET', group => 'LOGGING_WHEN', + short_desc => 'Sets the fraction of transactions from which to log all statements.', + long_desc => 'Use a value between 0.0 (never log) and 1.0 (log all statements for all transactions).', + variable => 'log_xact_sample_rate', + boot_val => '0.0', + min => '0.0', + max => '1.0', +}, + +{ name => 'vacuum_max_eager_freeze_failure_rate', type => 'real', context => 'PGC_USERSET', group => 'VACUUM_FREEZING', + short_desc => 'Fraction of pages in a relation vacuum can scan and fail to freeze before disabling eager scanning.', + long_desc => 'A value of 0.0 disables eager scanning and a value of 1.0 will eagerly scan up to 100 percent of the all-visible pages in the relation. If vacuum successfully freezes these pages, the cap is lower than 100 percent, because the goal is to amortize page freezing across multiple vacuums.', + variable => 'vacuum_max_eager_freeze_failure_rate', + boot_val => '0.03', + min => '0.0', + max => '1.0', +}, + + +{ name => 'archive_command', type => 'string', context => 'PGC_SIGHUP', group => 'WAL_ARCHIVING', + short_desc => 'Sets the shell command that will be called to archive a WAL file.', + long_desc => 'An empty string means use "archive_library".', + variable => 'XLogArchiveCommand', + boot_val => '""', + show_hook => 'show_archive_command', +}, + +{ name => 'archive_library', type => 'string', context => 'PGC_SIGHUP', group => 'WAL_ARCHIVING', + short_desc => 'Sets the library that will be called to archive a WAL file.', + long_desc => 'An empty string means use "archive_command".', + variable => 'XLogArchiveLibrary', + boot_val => '""', +}, + +{ name => 'restore_command', type => 'string', context => 'PGC_SIGHUP', group => 'WAL_ARCHIVE_RECOVERY', + short_desc => 'Sets the shell command that will be called to retrieve an archived WAL file.', + variable => 'recoveryRestoreCommand', + boot_val => '""', +}, + +{ name => 'archive_cleanup_command', type => 'string', context => 'PGC_SIGHUP', group => 'WAL_ARCHIVE_RECOVERY', + short_desc => 'Sets the shell command that will be executed at every restart point.', + variable => 'archiveCleanupCommand', + boot_val => '""', +}, + +{ name => 'recovery_end_command', type => 'string', context => 'PGC_SIGHUP', group => 'WAL_ARCHIVE_RECOVERY', + short_desc => 'Sets the shell command that will be executed once at the end of recovery.', + variable => 'recoveryEndCommand', + boot_val => '""', +}, + +{ name => 'recovery_target_timeline', type => 'string', context => 'PGC_POSTMASTER', group => 'WAL_RECOVERY_TARGET', + short_desc => 'Specifies the timeline to recover into.', + variable => 'recovery_target_timeline_string', + boot_val => '"latest"', + check_hook => 'check_recovery_target_timeline', + assign_hook => 'assign_recovery_target_timeline', +}, + +{ name => 'recovery_target', type => 'string', context => 'PGC_POSTMASTER', group => 'WAL_RECOVERY_TARGET', + short_desc => 'Set to "immediate" to end recovery as soon as a consistent state is reached.', + variable => 'recovery_target_string', + boot_val => '""', + check_hook => 'check_recovery_target', + assign_hook => 'assign_recovery_target', +}, + +{ name => 'recovery_target_xid', type => 'string', context => 'PGC_POSTMASTER', group => 'WAL_RECOVERY_TARGET', + short_desc => 'Sets the transaction ID up to which recovery will proceed.', + variable => 'recovery_target_xid_string', + boot_val => '""', + check_hook => 'check_recovery_target_xid', + assign_hook => 'assign_recovery_target_xid', +}, + +{ name => 'recovery_target_time', type => 'string', context => 'PGC_POSTMASTER', group => 'WAL_RECOVERY_TARGET', + short_desc => 'Sets the time stamp up to which recovery will proceed.', + variable => 'recovery_target_time_string', + boot_val => '""', + check_hook => 'check_recovery_target_time', + assign_hook => 'assign_recovery_target_time', +}, + +{ name => 'recovery_target_name', type => 'string', context => 'PGC_POSTMASTER', group => 'WAL_RECOVERY_TARGET', + short_desc => 'Sets the named restore point up to which recovery will proceed.', + variable => 'recovery_target_name_string', + boot_val => '""', + check_hook => 'check_recovery_target_name', + assign_hook => 'assign_recovery_target_name', +}, + +{ name => 'recovery_target_lsn', type => 'string', context => 'PGC_POSTMASTER', group => 'WAL_RECOVERY_TARGET', + short_desc => 'Sets the LSN of the write-ahead log location up to which recovery will proceed.', + variable => 'recovery_target_lsn_string', + boot_val => '""', + check_hook => 'check_recovery_target_lsn', + assign_hook => 'assign_recovery_target_lsn', +}, + +{ name => 'primary_conninfo', type => 'string', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Sets the connection string to be used to connect to the sending server.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'PrimaryConnInfo', + boot_val => '""', +}, + +{ name => 'primary_slot_name', type => 'string', context => 'PGC_SIGHUP', group => 'REPLICATION_STANDBY', + short_desc => 'Sets the name of the replication slot to use on the sending server.', + variable => 'PrimarySlotName', + boot_val => '""', + check_hook => 'check_primary_slot_name', +}, + +{ name => 'client_encoding', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets the client\'s character set encoding.', + flags => 'GUC_IS_NAME | GUC_REPORT', + variable => 'client_encoding_string', + boot_val => '"SQL_ASCII"', + check_hook => 'check_client_encoding', + assign_hook => 'assign_client_encoding', +}, + +{ name => 'log_line_prefix', type => 'string', context => 'PGC_SIGHUP', group => 'LOGGING_WHAT', + short_desc => 'Controls information prefixed to each log line.', + long_desc => 'An empty string means no prefix.', + variable => 'Log_line_prefix', + boot_val => '"%m [%p] "', +}, + +{ name => 'log_timezone', type => 'string', context => 'PGC_SIGHUP', group => 'LOGGING_WHAT', + short_desc => 'Sets the time zone to use in log messages.', + variable => 'log_timezone_string', + boot_val => '"GMT"', + check_hook => 'check_log_timezone', + assign_hook => 'assign_log_timezone', + show_hook => 'show_log_timezone', +}, + +{ name => 'DateStyle', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets the display format for date and time values.', + long_desc => 'Also controls interpretation of ambiguous date inputs.', + flags => 'GUC_LIST_INPUT | GUC_REPORT', + variable => 'datestyle_string', + boot_val => '"ISO, MDY"', + check_hook => 'check_datestyle', + assign_hook => 'assign_datestyle', +}, + +{ name => 'default_table_access_method', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the default table access method for new tables.', + flags => 'GUC_IS_NAME', + variable => 'default_table_access_method', + boot_val => 'DEFAULT_TABLE_ACCESS_METHOD', + check_hook => 'check_default_table_access_method', +}, + +{ name => 'default_tablespace', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the default tablespace to create tables and indexes in.', + long_desc => 'An empty string means use the database\'s default tablespace.', + flags => 'GUC_IS_NAME', + variable => 'default_tablespace', + boot_val => '""', + check_hook => 'check_default_tablespace', +}, + +{ name => 'temp_tablespaces', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the tablespace(s) to use for temporary tables and sort files.', + long_desc => 'An empty string means use the database\'s default tablespace.', + flags => 'GUC_LIST_INPUT | GUC_LIST_QUOTE', + variable => 'temp_tablespaces', + boot_val => '""', + check_hook => 'check_temp_tablespaces', + assign_hook => 'assign_temp_tablespaces', +}, + +{ name => 'createrole_self_grant', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets whether a CREATEROLE user automatically grants the role to themselves, and with which options.', + long_desc => 'An empty string disables automatic self grants.', + flags => 'GUC_LIST_INPUT', + variable => 'createrole_self_grant', + boot_val => '""', + check_hook => 'check_createrole_self_grant', + assign_hook => 'assign_createrole_self_grant', +}, + +{ name => 'dynamic_library_path', type => 'string', context => 'PGC_SUSET', group => 'CLIENT_CONN_OTHER', + short_desc => 'Sets the path for dynamically loadable modules.', + long_desc => 'If a dynamically loadable module needs to be opened and the specified name does not have a directory component (i.e., the name does not contain a slash), the system will search this path for the specified file.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'Dynamic_library_path', + boot_val => '"$libdir"', +}, + +{ name => 'extension_control_path', type => 'string', context => 'PGC_SUSET', group => 'CLIENT_CONN_OTHER', + short_desc => 'Sets the path for extension control files.', + long_desc => 'The remaining extension script and secondary control files are then loaded from the same directory where the primary control file was found.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'Extension_control_path', + boot_val => '"$system"', +}, + +{ name => 'krb_server_keyfile', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_AUTH', + short_desc => 'Sets the location of the Kerberos server key file.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'pg_krb_server_keyfile', + boot_val => 'PG_KRB_SRVTAB', +}, + +{ name => 'bonjour_name', type => 'string', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Sets the Bonjour service name.', + long_desc => 'An empty string means use the computer name.', + variable => 'bonjour_name', + boot_val => '""', +}, + +{ name => 'lc_messages', type => 'string', context => 'PGC_SUSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets the language in which messages are displayed.', + long_desc => 'An empty string means use the operating system setting.', + variable => 'locale_messages', + boot_val => '""', + check_hook => 'check_locale_messages', + assign_hook => 'assign_locale_messages', +}, + +{ name => 'lc_monetary', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets the locale for formatting monetary amounts.', + long_desc => 'An empty string means use the operating system setting.', + variable => 'locale_monetary', + boot_val => '"C"', + check_hook => 'check_locale_monetary', + assign_hook => 'assign_locale_monetary', +}, + +{ name => 'lc_numeric', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets the locale for formatting numbers.', + long_desc => 'An empty string means use the operating system setting.', + variable => 'locale_numeric', + boot_val => '"C"', + check_hook => 'check_locale_numeric', + assign_hook => 'assign_locale_numeric', +}, + +{ name => 'lc_time', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets the locale for formatting date and time values.', + long_desc => 'An empty string means use the operating system setting.', + variable => 'locale_time', + boot_val => '"C"', + check_hook => 'check_locale_time', + assign_hook => 'assign_locale_time', +}, + +{ name => 'session_preload_libraries', type => 'string', context => 'PGC_SUSET', group => 'CLIENT_CONN_PRELOAD', + short_desc => 'Lists shared libraries to preload into each backend.', + flags => 'GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY', + variable => 'session_preload_libraries_string', + boot_val => '""', +}, + +{ name => 'shared_preload_libraries', type => 'string', context => 'PGC_POSTMASTER', group => 'CLIENT_CONN_PRELOAD', + short_desc => 'Lists shared libraries to preload into server.', + flags => 'GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY', + variable => 'shared_preload_libraries_string', + boot_val => '""', +}, + +{ name => 'local_preload_libraries', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_PRELOAD', + short_desc => 'Lists unprivileged shared libraries to preload into each backend.', + flags => 'GUC_LIST_INPUT | GUC_LIST_QUOTE', + variable => 'local_preload_libraries_string', + boot_val => '""', +}, + +{ name => 'search_path', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the schema search order for names that are not schema-qualified.', + flags => 'GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_EXPLAIN | GUC_REPORT', + variable => 'namespace_search_path', + boot_val => '"\"$user\", public"', + check_hook => 'check_search_path', + assign_hook => 'assign_search_path', +}, + +# Can't be set in postgresql.conf +{ name => 'server_encoding', type => 'string', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the server (database) character set encoding.', + flags => 'GUC_IS_NAME | GUC_REPORT | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'server_encoding_string', + boot_val => '"SQL_ASCII"', +}, + +# Can't be set in postgresql.conf +{ name => 'server_version', type => 'string', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the server version.', + flags => 'GUC_REPORT | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'server_version_string', + boot_val => 'PG_VERSION', +}, + +# Not for general use --- used by SET ROLE +{ name => 'role', type => 'string', context => 'PGC_USERSET', group => 'UNGROUPED', + short_desc => 'Sets the current role.', + flags => 'GUC_IS_NAME | GUC_NO_SHOW_ALL | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_NOT_WHILE_SEC_REST', + variable => 'role_string', + boot_val => '"none"', + check_hook => 'check_role', + assign_hook => 'assign_role', + show_hook => 'show_role', +}, + +# Not for general use --- used by SET SESSION AUTHORIZATION +{ name => 'session_authorization', type => 'string', context => 'PGC_USERSET', group => 'UNGROUPED', + short_desc => 'Sets the session user name.', + flags => 'GUC_IS_NAME | GUC_REPORT | GUC_NO_SHOW_ALL | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_NOT_WHILE_SEC_REST', + variable => 'session_authorization_string', + boot_val => 'NULL', + check_hook => 'check_session_authorization', + assign_hook => 'assign_session_authorization', +}, + +{ name => 'log_destination', type => 'string', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Sets the destination for server log output.', + long_desc => 'Valid values are combinations of "stderr", "syslog", "csvlog", "jsonlog", and "eventlog", depending on the platform.', + flags => 'GUC_LIST_INPUT', + variable => 'Log_destination_string', + boot_val => '"stderr"', + check_hook => 'check_log_destination', + assign_hook => 'assign_log_destination', +}, + +{ name => 'log_directory', type => 'string', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Sets the destination directory for log files.', + long_desc => 'Can be specified as relative to the data directory or as absolute path.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'Log_directory', + boot_val => '"log"', + check_hook => 'check_canonical_path', +}, + +{ name => 'log_filename', type => 'string', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Sets the file name pattern for log files.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'Log_filename', + boot_val => '"postgresql-%Y-%m-%d_%H%M%S.log"', +}, + +{ name => 'syslog_ident', type => 'string', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Sets the program name used to identify PostgreSQL messages in syslog.', + variable => 'syslog_ident_str', + boot_val => '"postgres"', + assign_hook => 'assign_syslog_ident', +}, + +{ name => 'event_source', type => 'string', context => 'PGC_POSTMASTER', group => 'LOGGING_WHERE', + short_desc => 'Sets the application name used to identify PostgreSQL messages in the event log.', + variable => 'event_source', + boot_val => 'DEFAULT_EVENT_SOURCE', +}, + +{ name => 'TimeZone', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets the time zone for displaying and interpreting time stamps.', + flags => 'GUC_REPORT', + variable => 'timezone_string', + boot_val => '"GMT"', + check_hook => 'check_timezone', + assign_hook => 'assign_timezone', + show_hook => 'show_timezone', +}, + +{ name => 'timezone_abbreviations', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Selects a file of time zone abbreviations.', + variable => 'timezone_abbreviations_string', + boot_val => 'NULL', + check_hook => 'check_timezone_abbreviations', + assign_hook => 'assign_timezone_abbreviations', +}, + +{ name => 'unix_socket_group', type => 'string', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Sets the owning group of the Unix-domain socket.', + long_desc => 'The owning user of the socket is always the user that starts the server. An empty string means use the user\'s default group.', + variable => 'Unix_socket_group', + boot_val => '""', +}, + +{ name => 'unix_socket_directories', type => 'string', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Sets the directories where Unix-domain sockets will be created.', + flags => 'GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY', + variable => 'Unix_socket_directories', + boot_val => 'DEFAULT_PGSOCKET_DIR', +}, + +{ name => 'listen_addresses', type => 'string', context => 'PGC_POSTMASTER', group => 'CONN_AUTH_SETTINGS', + short_desc => 'Sets the host name or IP address(es) to listen to.', + flags => 'GUC_LIST_INPUT', + variable => 'ListenAddresses', + boot_val => '"localhost"', +}, + +# Can't be set by ALTER SYSTEM as it can lead to recursive definition +# of data_directory. +{ name => 'data_directory', type => 'string', context => 'PGC_POSTMASTER', group => 'FILE_LOCATIONS', + short_desc => 'Sets the server\'s data directory.', + flags => 'GUC_SUPERUSER_ONLY | GUC_DISALLOW_IN_AUTO_FILE', + variable => 'data_directory', + boot_val => 'NULL', +}, + +{ name => 'config_file', type => 'string', context => 'PGC_POSTMASTER', group => 'FILE_LOCATIONS', + short_desc => 'Sets the server\'s main configuration file.', + flags => 'GUC_DISALLOW_IN_FILE | GUC_SUPERUSER_ONLY', + variable => 'ConfigFileName', + boot_val => 'NULL', +}, + +{ name => 'hba_file', type => 'string', context => 'PGC_POSTMASTER', group => 'FILE_LOCATIONS', + short_desc => 'Sets the server\'s "hba" configuration file.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'HbaFileName', + boot_val => 'NULL', +}, + +{ name => 'ident_file', type => 'string', context => 'PGC_POSTMASTER', group => 'FILE_LOCATIONS', + short_desc => 'Sets the server\'s "ident" configuration file.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'IdentFileName', + boot_val => 'NULL', +}, + +{ name => 'external_pid_file', type => 'string', context => 'PGC_POSTMASTER', group => 'FILE_LOCATIONS', + short_desc => 'Writes the postmaster PID to the specified file.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'external_pid_file', + boot_val => 'NULL', + check_hook => 'check_canonical_path', +}, + +{ name => 'ssl_library', type => 'string', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Shows the name of the SSL library.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'ssl_library', + boot_val => 'SSL_LIBRARY', +}, + +{ name => 'ssl_cert_file', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Location of the SSL server certificate file.', + variable => 'ssl_cert_file', + boot_val => '"server.crt"', +}, + +{ name => 'ssl_key_file', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Location of the SSL server private key file.', + variable => 'ssl_key_file', + boot_val => '"server.key"', +}, + +{ name => 'ssl_ca_file', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Location of the SSL certificate authority file.', + variable => 'ssl_ca_file', + boot_val => '""', +}, + +{ name => 'ssl_crl_file', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Location of the SSL certificate revocation list file.', + variable => 'ssl_crl_file', + boot_val => '""', +}, + +{ name => 'ssl_crl_dir', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Location of the SSL certificate revocation list directory.', + variable => 'ssl_crl_dir', + boot_val => '""', +}, + +{ name => 'synchronous_standby_names', type => 'string', context => 'PGC_SIGHUP', group => 'REPLICATION_PRIMARY', + short_desc => 'Number of synchronous standbys and list of names of potential synchronous ones.', + flags => 'GUC_LIST_INPUT', + variable => 'SyncRepStandbyNames', + boot_val => '""', + check_hook => 'check_synchronous_standby_names', + assign_hook => 'assign_synchronous_standby_names', +}, + +{ name => 'default_text_search_config', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets default text search configuration.', + variable => 'TSCurrentConfig', + boot_val => '"pg_catalog.simple"', + check_hook => 'check_default_text_search_config', + assign_hook => 'assign_default_text_search_config', +}, + +{ name => 'ssl_tls13_ciphers', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Sets the list of allowed TLSv1.3 cipher suites.', + long_desc => 'An empty string means use the default cipher suites.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'SSLCipherSuites', + boot_val => '""', +}, + +{ name => 'ssl_ciphers', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Sets the list of allowed TLSv1.2 (and lower) ciphers.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'SSLCipherList', + boot_val => 'DEFAULT_SSL_CIPHERS', +}, + +{ name => 'ssl_groups', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Sets the group(s) to use for Diffie-Hellman key exchange.', + long_desc => 'Multiple groups can be specified using a colon-separated list.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'SSLECDHCurve', + boot_val => 'DEFAULT_SSL_GROUPS', +}, + +{ name => 'ssl_dh_params_file', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Location of the SSL DH parameters file.', + long_desc => 'An empty string means use compiled-in default parameters.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'ssl_dh_params_file', + boot_val => '""', +}, + +{ name => 'ssl_passphrase_command', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Command to obtain passphrases for SSL.', + long_desc => 'An empty string means use the built-in prompting mechanism.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'ssl_passphrase_command', + boot_val => '""', +}, + +{ name => 'application_name', type => 'string', context => 'PGC_USERSET', group => 'LOGGING_WHAT', + short_desc => 'Sets the application name to be reported in statistics and logs.', + flags => 'GUC_IS_NAME | GUC_REPORT | GUC_NOT_IN_SAMPLE', + variable => 'application_name', + boot_val => '""', + check_hook => 'check_application_name', + assign_hook => 'assign_application_name', +}, + +{ name => 'cluster_name', type => 'string', context => 'PGC_POSTMASTER', group => 'PROCESS_TITLE', + short_desc => 'Sets the name of the cluster, which is included in the process title.', + flags => 'GUC_IS_NAME', + variable => 'cluster_name', + boot_val => '""', + check_hook => 'check_cluster_name', +}, + +{ name => 'wal_consistency_checking', type => 'string', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Sets the WAL resource managers for which WAL consistency checks are done.', + long_desc => 'Full-page images will be logged for all data blocks and cross-checked against the results of WAL replay.', + flags => 'GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE', + variable => 'wal_consistency_checking_string', + boot_val => '""', + check_hook => 'check_wal_consistency_checking', + assign_hook => 'assign_wal_consistency_checking', +}, + +{ name => 'jit_provider', type => 'string', context => 'PGC_POSTMASTER', group => 'CLIENT_CONN_PRELOAD', + short_desc => 'JIT provider to use.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'jit_provider', + boot_val => '"llvmjit"', +}, + +{ name => 'backtrace_functions', type => 'string', context => 'PGC_SUSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Log backtrace for errors in these functions.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'backtrace_functions', + boot_val => '""', + check_hook => 'check_backtrace_functions', + assign_hook => 'assign_backtrace_functions', +}, + +{ name => 'debug_io_direct', type => 'string', context => 'PGC_POSTMASTER', group => 'DEVELOPER_OPTIONS', + short_desc => 'Use direct I/O for file access.', + long_desc => 'An empty string disables direct I/O.', + flags => 'GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE', + variable => 'debug_io_direct_string', + boot_val => '""', + check_hook => 'check_debug_io_direct', + assign_hook => 'assign_debug_io_direct', +}, + +{ name => 'synchronized_standby_slots', type => 'string', context => 'PGC_SIGHUP', group => 'REPLICATION_PRIMARY', + short_desc => 'Lists streaming replication standby server replication slot names that logical WAL sender processes will wait for.', + long_desc => 'Logical WAL sender processes will send decoded changes to output plugins only after the specified replication slots have confirmed receiving WAL.', + flags => 'GUC_LIST_INPUT', + variable => 'synchronized_standby_slots', + boot_val => '""', + check_hook => 'check_synchronized_standby_slots', + assign_hook => 'assign_synchronized_standby_slots', +}, + +{ name => 'restrict_nonsystem_relation_kind', type => 'string', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Prohibits access to non-system relations of specified kinds.', + flags => 'GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE', + variable => 'restrict_nonsystem_relation_kind_string', + boot_val => '""', + check_hook => 'check_restrict_nonsystem_relation_kind', + assign_hook => 'assign_restrict_nonsystem_relation_kind', +}, + +{ name => 'oauth_validator_libraries', type => 'string', context => 'PGC_SIGHUP', group => 'CONN_AUTH_AUTH', + short_desc => 'Lists libraries that may be called to validate OAuth v2 bearer tokens.', + flags => 'GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY', + variable => 'oauth_validator_libraries_string', + boot_val => '""', +}, + +{ name => 'log_connections', type => 'string', context => 'PGC_SU_BACKEND', group => 'LOGGING_WHAT', + short_desc => 'Logs specified aspects of connection establishment and setup.', + flags => 'GUC_LIST_INPUT', + variable => 'log_connections_string', + boot_val => '""', + check_hook => 'check_log_connections', + assign_hook => 'assign_log_connections', +}, + +{ name => 'backslash_quote', type => 'enum', context => 'PGC_USERSET', group => 'COMPAT_OPTIONS_PREVIOUS', + short_desc => 'Sets whether "\\\\\'" is allowed in string literals.', + variable => 'backslash_quote', + boot_val => 'BACKSLASH_QUOTE_SAFE_ENCODING', + options => 'backslash_quote_options', +}, + +{ name => 'bytea_output', type => 'enum', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the output format for bytea.', + variable => 'bytea_output', + boot_val => 'BYTEA_OUTPUT_HEX', + options => 'bytea_output_options', +}, + +{ name => 'client_min_messages', type => 'enum', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the message levels that are sent to the client.', + long_desc => 'Each level includes all the levels that follow it. The later the level, the fewer messages are sent.', + variable => 'client_min_messages', + boot_val => 'NOTICE', + options => 'client_message_level_options', +}, + +{ name => 'compute_query_id', type => 'enum', context => 'PGC_SUSET', group => 'STATS_MONITORING', + short_desc => 'Enables in-core computation of query identifiers.', + variable => 'compute_query_id', + boot_val => 'COMPUTE_QUERY_ID_AUTO', + options => 'compute_query_id_options', +}, + +{ name => 'constraint_exclusion', type => 'enum', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER', + short_desc => 'Enables the planner to use constraints to optimize queries.', + long_desc => 'Table scans will be skipped if their constraints guarantee that no rows match the query.', + flags => 'GUC_EXPLAIN', + variable => 'constraint_exclusion', + boot_val => 'CONSTRAINT_EXCLUSION_PARTITION', + options => 'constraint_exclusion_options', +}, + +{ name => 'default_toast_compression', type => 'enum', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the default compression method for compressible values.', + variable => 'default_toast_compression', + boot_val => 'TOAST_PGLZ_COMPRESSION', + options => 'default_toast_compression_options', +}, + +{ name => 'default_transaction_isolation', type => 'enum', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the transaction isolation level of each new transaction.', + variable => 'DefaultXactIsoLevel', + boot_val => 'XACT_READ_COMMITTED', + options => 'isolation_level_options', +}, + +{ name => 'transaction_isolation', type => 'enum', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the current transaction\'s isolation level.', + flags => 'GUC_NO_RESET | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'XactIsoLevel', + boot_val => 'XACT_READ_COMMITTED', + options => 'isolation_level_options', + check_hook => 'check_transaction_isolation', +}, + +{ name => 'IntervalStyle', type => 'enum', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Sets the display format for interval values.', + flags => 'GUC_REPORT', + variable => 'IntervalStyle', + boot_val => 'INTSTYLE_POSTGRES', + options => 'intervalstyle_options', +}, + +{ name => 'icu_validation_level', type => 'enum', context => 'PGC_USERSET', group => 'CLIENT_CONN_LOCALE', + short_desc => 'Log level for reporting invalid ICU locale strings.', + variable => 'icu_validation_level', + boot_val => 'WARNING', + options => 'icu_validation_level_options', +}, + +{ name => 'log_error_verbosity', type => 'enum', context => 'PGC_SUSET', group => 'LOGGING_WHAT', + short_desc => 'Sets the verbosity of logged messages.', + variable => 'Log_error_verbosity', + boot_val => 'PGERROR_DEFAULT', + options => 'log_error_verbosity_options', +}, + +{ name => 'log_min_messages', type => 'enum', context => 'PGC_SUSET', group => 'LOGGING_WHEN', + short_desc => 'Sets the message levels that are logged.', + long_desc => 'Each level includes all the levels that follow it. The later the level, the fewer messages are sent.', + variable => 'log_min_messages', + boot_val => 'WARNING', + options => 'server_message_level_options', +}, + +{ name => 'log_min_error_statement', type => 'enum', context => 'PGC_SUSET', group => 'LOGGING_WHEN', + short_desc => 'Causes all statements generating error at or above this level to be logged.', + long_desc => 'Each level includes all the levels that follow it. The later the level, the fewer messages are sent.', + variable => 'log_min_error_statement', + boot_val => 'ERROR', + options => 'server_message_level_options', +}, + +{ name => 'log_statement', type => 'enum', context => 'PGC_SUSET', group => 'LOGGING_WHAT', + short_desc => 'Sets the type of statements logged.', + variable => 'log_statement', + boot_val => 'LOGSTMT_NONE', + options => 'log_statement_options', +}, + +{ name => 'syslog_facility', type => 'enum', context => 'PGC_SIGHUP', group => 'LOGGING_WHERE', + short_desc => 'Sets the syslog "facility" to be used when syslog enabled.', + variable => 'syslog_facility', + boot_val => 'DEFAULT_SYSLOG_FACILITY', + options => 'syslog_facility_options', + assign_hook => 'assign_syslog_facility', +}, + +{ name => 'session_replication_role', type => 'enum', context => 'PGC_SUSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets the session\'s behavior for triggers and rewrite rules.', + variable => 'SessionReplicationRole', + boot_val => 'SESSION_REPLICATION_ROLE_ORIGIN', + options => 'session_replication_role_options', + assign_hook => 'assign_session_replication_role', +}, + +{ name => 'synchronous_commit', type => 'enum', context => 'PGC_USERSET', group => 'WAL_SETTINGS', + short_desc => 'Sets the current transaction\'s synchronization level.', + variable => 'synchronous_commit', + boot_val => 'SYNCHRONOUS_COMMIT_ON', + options => 'synchronous_commit_options', + assign_hook => 'assign_synchronous_commit', +}, + +{ name => 'archive_mode', type => 'enum', context => 'PGC_POSTMASTER', group => 'WAL_ARCHIVING', + short_desc => 'Allows archiving of WAL files using "archive_command".', + variable => 'XLogArchiveMode', + boot_val => 'ARCHIVE_MODE_OFF', + options => 'archive_mode_options', +}, + +{ name => 'recovery_target_action', type => 'enum', context => 'PGC_POSTMASTER', group => 'WAL_RECOVERY_TARGET', + short_desc => 'Sets the action to perform upon reaching the recovery target.', + variable => 'recoveryTargetAction', + boot_val => 'RECOVERY_TARGET_ACTION_PAUSE', + options => 'recovery_target_action_options', +}, + +{ name => 'track_functions', type => 'enum', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', + short_desc => 'Collects function-level statistics on database activity.', + variable => 'pgstat_track_functions', + boot_val => 'TRACK_FUNC_OFF', + options => 'track_function_options', +}, + +{ name => 'stats_fetch_consistency', type => 'enum', context => 'PGC_USERSET', group => 'STATS_CUMULATIVE', + short_desc => 'Sets the consistency of accesses to statistics data.', + variable => 'pgstat_fetch_consistency', + boot_val => 'PGSTAT_FETCH_CONSISTENCY_CACHE', + options => 'stats_fetch_consistency', + assign_hook => 'assign_stats_fetch_consistency', +}, + +{ name => 'wal_compression', type => 'enum', context => 'PGC_SUSET', group => 'WAL_SETTINGS', + short_desc => 'Compresses full-page writes written in WAL file with specified method.', + variable => 'wal_compression', + boot_val => 'WAL_COMPRESSION_NONE', + options => 'wal_compression_options', +}, + +{ name => 'wal_level', type => 'enum', context => 'PGC_POSTMASTER', group => 'WAL_SETTINGS', + short_desc => 'Sets the level of information written to the WAL.', + variable => 'wal_level', + boot_val => 'WAL_LEVEL_REPLICA', + options => 'wal_level_options', +}, + +{ name => 'dynamic_shared_memory_type', type => 'enum', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Selects the dynamic shared memory implementation used.', + variable => 'dynamic_shared_memory_type', + boot_val => 'DEFAULT_DYNAMIC_SHARED_MEMORY_TYPE', + options => 'dynamic_shared_memory_options', +}, + +{ name => 'shared_memory_type', type => 'enum', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Selects the shared memory implementation used for the main shared memory region.', + variable => 'shared_memory_type', + boot_val => 'DEFAULT_SHARED_MEMORY_TYPE', + options => 'shared_memory_options', +}, + +{ name => 'file_copy_method', type => 'enum', context => 'PGC_USERSET', group => 'RESOURCES_DISK', + short_desc => 'Selects the file copy method.', + variable => 'file_copy_method', + boot_val => 'FILE_COPY_METHOD_COPY', + options => 'file_copy_method_options', +}, + +{ name => 'wal_sync_method', type => 'enum', context => 'PGC_SIGHUP', group => 'WAL_SETTINGS', + short_desc => 'Selects the method used for forcing WAL updates to disk.', + variable => 'wal_sync_method', + boot_val => 'DEFAULT_WAL_SYNC_METHOD', + options => 'wal_sync_method_options', + assign_hook => 'assign_wal_sync_method', +}, + +{ name => 'xmlbinary', type => 'enum', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets how binary values are to be encoded in XML.', + variable => 'xmlbinary', + boot_val => 'XMLBINARY_BASE64', + options => 'xmlbinary_options', +}, + +{ name => 'xmloption', type => 'enum', context => 'PGC_USERSET', group => 'CLIENT_CONN_STATEMENT', + short_desc => 'Sets whether XML data in implicit parsing and serialization operations is to be considered as documents or content fragments.', + variable => 'xmloption', + boot_val => 'XMLOPTION_CONTENT', + options => 'xmloption_options', +}, + +{ name => 'huge_pages', type => 'enum', context => 'PGC_POSTMASTER', group => 'RESOURCES_MEM', + short_desc => 'Use of huge pages on Linux or Windows.', + variable => 'huge_pages', + boot_val => 'HUGE_PAGES_TRY', + options => 'huge_pages_options', +}, + +{ name => 'huge_pages_status', type => 'enum', context => 'PGC_INTERNAL', group => 'PRESET_OPTIONS', + short_desc => 'Indicates the status of huge pages.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE', + variable => 'huge_pages_status', + boot_val => 'HUGE_PAGES_UNKNOWN', + options => 'huge_pages_status_options', +}, + +{ name => 'recovery_prefetch', type => 'enum', context => 'PGC_SIGHUP', group => 'WAL_RECOVERY', + short_desc => 'Prefetch referenced blocks during recovery.', + long_desc => 'Look ahead in the WAL to find references to uncached data.', + variable => 'recovery_prefetch', + boot_val => 'RECOVERY_PREFETCH_TRY', + options => 'recovery_prefetch_options', + check_hook => 'check_recovery_prefetch', + assign_hook => 'assign_recovery_prefetch', +}, + +{ name => 'debug_parallel_query', type => 'enum', context => 'PGC_USERSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Forces the planner\'s use parallel query nodes.', + long_desc => 'This can be useful for testing the parallel query infrastructure by forcing the planner to generate plans that contain nodes that perform tuple communication between workers and the main process.', + flags => 'GUC_NOT_IN_SAMPLE | GUC_EXPLAIN', + variable => 'debug_parallel_query', + boot_val => 'DEBUG_PARALLEL_OFF', + options => 'debug_parallel_query_options', +}, + +{ name => 'password_encryption', type => 'enum', context => 'PGC_USERSET', group => 'CONN_AUTH_AUTH', + short_desc => 'Chooses the algorithm for encrypting passwords.', + variable => 'Password_encryption', + boot_val => 'PASSWORD_TYPE_SCRAM_SHA_256', + options => 'password_encryption_options', +}, + +{ name => 'plan_cache_mode', type => 'enum', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER', + short_desc => 'Controls the planner\'s selection of custom or generic plan.', + long_desc => 'Prepared statements can have custom and generic plans, and the planner will attempt to choose which is better. This can be set to override the default behavior.', + flags => 'GUC_EXPLAIN', + variable => 'plan_cache_mode', + boot_val => 'PLAN_CACHE_MODE_AUTO', + options => 'plan_cache_mode_options', +}, + +{ name => 'ssl_min_protocol_version', type => 'enum', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Sets the minimum SSL/TLS protocol version to use.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'ssl_min_protocol_version', + boot_val => 'PG_TLS1_2_VERSION', + options => 'ssl_protocol_versions_info + 1', # don't allow PG_TLS_ANY +}, + +{ name => 'ssl_max_protocol_version', type => 'enum', context => 'PGC_SIGHUP', group => 'CONN_AUTH_SSL', + short_desc => 'Sets the maximum SSL/TLS protocol version to use.', + flags => 'GUC_SUPERUSER_ONLY', + variable => 'ssl_max_protocol_version', + boot_val => 'PG_TLS_ANY', + options => 'ssl_protocol_versions_info', +}, + +{ name => 'recovery_init_sync_method', type => 'enum', context => 'PGC_SIGHUP', group => 'ERROR_HANDLING_OPTIONS', + short_desc => 'Sets the method for synchronizing the data directory before crash recovery.', + variable => 'recovery_init_sync_method', + boot_val => 'DATA_DIR_SYNC_METHOD_FSYNC', + options => 'recovery_init_sync_method_options', +}, + +{ name => 'debug_logical_replication_streaming', type => 'enum', context => 'PGC_USERSET', group => 'DEVELOPER_OPTIONS', + short_desc => 'Forces immediate streaming or serialization of changes in large transactions.', + long_desc => 'On the publisher, it allows streaming or serializing each change in logical decoding. On the subscriber, it allows serialization of all changes to files and notifies the parallel apply workers to read and apply them at the end of the transaction.', + flags => 'GUC_NOT_IN_SAMPLE', + variable => 'debug_logical_replication_streaming', + boot_val => 'DEBUG_LOGICAL_REP_STREAMING_BUFFERED', + options => 'debug_logical_replication_streaming_options', +}, + +{ name => 'io_method', type => 'enum', context => 'PGC_POSTMASTER', group => 'RESOURCES_IO', + short_desc => 'Selects the method for executing asynchronous I/O.', + variable => 'io_method', + boot_val => 'DEFAULT_IO_METHOD', + options => 'io_method_options', + assign_hook => 'assign_io_method', +}, + +] diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index d14b1678e7fec..00c8376cf4ded 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -507,6 +507,7 @@ bool AllowAlterSystem = true; bool log_duration = false; bool Debug_print_plan = false; bool Debug_print_parse = false; +bool Debug_print_raw_parse = false; bool Debug_print_rewritten = false; bool Debug_pretty_print = true; @@ -760,4666 +761,4 @@ StaticAssertDecl(lengthof(config_type_names) == (PGC_ENUM + 1), "array length mismatch"); -/* - * Contents of GUC tables - * - * See src/backend/utils/misc/README for design notes. - * - * TO ADD AN OPTION: - * - * 1. Declare a global variable of type bool, int, double, or char* - * and make use of it. - * - * 2. Decide at what times it's safe to set the option. See guc.h for - * details. - * - * 3. Decide on a name, a default value, upper and lower bounds (if - * applicable), etc. - * - * 4. Add a record below. - * - * 5. Add it to src/backend/utils/misc/postgresql.conf.sample, if - * appropriate. - * - * 6. Don't forget to document the option (at least in config.sgml). - * - * 7. If it's a new GUC_LIST_QUOTE option, you must add it to - * variable_is_guc_list_quote() in src/bin/pg_dump/dumputils.c. - */ - -struct config_bool ConfigureNamesBool[] = -{ - { - {"enable_seqscan", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of sequential-scan plans."), - NULL, - GUC_EXPLAIN - }, - &enable_seqscan, - true, - NULL, NULL, NULL - }, - { - {"enable_indexscan", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of index-scan plans."), - NULL, - GUC_EXPLAIN - }, - &enable_indexscan, - true, - NULL, NULL, NULL - }, - { - {"enable_indexonlyscan", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of index-only-scan plans."), - NULL, - GUC_EXPLAIN - }, - &enable_indexonlyscan, - true, - NULL, NULL, NULL - }, - { - {"enable_bitmapscan", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of bitmap-scan plans."), - NULL, - GUC_EXPLAIN - }, - &enable_bitmapscan, - true, - NULL, NULL, NULL - }, - { - {"enable_tidscan", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of TID scan plans."), - NULL, - GUC_EXPLAIN - }, - &enable_tidscan, - true, - NULL, NULL, NULL - }, - { - {"enable_sort", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of explicit sort steps."), - NULL, - GUC_EXPLAIN - }, - &enable_sort, - true, - NULL, NULL, NULL - }, - { - {"enable_incremental_sort", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of incremental sort steps."), - NULL, - GUC_EXPLAIN - }, - &enable_incremental_sort, - true, - NULL, NULL, NULL - }, - { - {"enable_hashagg", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of hashed aggregation plans."), - NULL, - GUC_EXPLAIN - }, - &enable_hashagg, - true, - NULL, NULL, NULL - }, - { - {"enable_material", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of materialization."), - NULL, - GUC_EXPLAIN - }, - &enable_material, - true, - NULL, NULL, NULL - }, - { - {"enable_memoize", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of memoization."), - NULL, - GUC_EXPLAIN - }, - &enable_memoize, - true, - NULL, NULL, NULL - }, - { - {"enable_nestloop", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of nested-loop join plans."), - NULL, - GUC_EXPLAIN - }, - &enable_nestloop, - true, - NULL, NULL, NULL - }, - { - {"enable_mergejoin", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of merge join plans."), - NULL, - GUC_EXPLAIN - }, - &enable_mergejoin, - true, - NULL, NULL, NULL - }, - { - {"enable_hashjoin", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of hash join plans."), - NULL, - GUC_EXPLAIN - }, - &enable_hashjoin, - true, - NULL, NULL, NULL - }, - { - {"enable_gathermerge", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of gather merge plans."), - NULL, - GUC_EXPLAIN - }, - &enable_gathermerge, - true, - NULL, NULL, NULL - }, - { - {"enable_partitionwise_join", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables partitionwise join."), - NULL, - GUC_EXPLAIN - }, - &enable_partitionwise_join, - false, - NULL, NULL, NULL - }, - { - {"enable_partitionwise_aggregate", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables partitionwise aggregation and grouping."), - NULL, - GUC_EXPLAIN - }, - &enable_partitionwise_aggregate, - false, - NULL, NULL, NULL - }, - { - {"enable_parallel_append", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of parallel append plans."), - NULL, - GUC_EXPLAIN - }, - &enable_parallel_append, - true, - NULL, NULL, NULL - }, - { - {"enable_parallel_hash", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of parallel hash plans."), - NULL, - GUC_EXPLAIN - }, - &enable_parallel_hash, - true, - NULL, NULL, NULL - }, - { - {"enable_partition_pruning", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables plan-time and execution-time partition pruning."), - gettext_noop("Allows the query planner and executor to compare partition " - "bounds to conditions in the query to determine which " - "partitions must be scanned."), - GUC_EXPLAIN - }, - &enable_partition_pruning, - true, - NULL, NULL, NULL - }, - { - {"enable_presorted_aggregate", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's ability to produce plans that " - "provide presorted input for ORDER BY / DISTINCT aggregate " - "functions."), - gettext_noop("Allows the query planner to build plans that provide " - "presorted input for aggregate functions with an ORDER BY / " - "DISTINCT clause. When disabled, implicit sorts are always " - "performed during execution."), - GUC_EXPLAIN - }, - &enable_presorted_aggregate, - true, - NULL, NULL, NULL - }, - { - {"enable_async_append", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of async append plans."), - NULL, - GUC_EXPLAIN - }, - &enable_async_append, - true, - NULL, NULL, NULL - }, - { - {"enable_self_join_elimination", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables removal of unique self-joins."), - NULL, - GUC_EXPLAIN - }, - &enable_self_join_elimination, - true, - NULL, NULL, NULL - }, - { - {"enable_group_by_reordering", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables reordering of GROUP BY keys."), - NULL, - GUC_EXPLAIN - }, - &enable_group_by_reordering, - true, - NULL, NULL, NULL - }, - { - {"enable_distinct_reordering", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables reordering of DISTINCT keys."), - NULL, - GUC_EXPLAIN - }, - &enable_distinct_reordering, - true, - NULL, NULL, NULL - }, - { - {"geqo", PGC_USERSET, QUERY_TUNING_GEQO, - gettext_noop("Enables genetic query optimization."), - gettext_noop("This algorithm attempts to do planning without " - "exhaustive searching."), - GUC_EXPLAIN - }, - &enable_geqo, - true, - NULL, NULL, NULL - }, - { - /* - * Not for general use --- used by SET SESSION AUTHORIZATION and SET - * ROLE - */ - {"is_superuser", PGC_INTERNAL, UNGROUPED, - gettext_noop("Shows whether the current user is a superuser."), - NULL, - GUC_REPORT | GUC_NO_SHOW_ALL | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_ALLOW_IN_PARALLEL - }, - ¤t_role_is_superuser, - false, - NULL, NULL, NULL - }, - { - /* - * This setting itself cannot be set by ALTER SYSTEM to avoid an - * operator turning this setting off by using ALTER SYSTEM, without a - * way to turn it back on. - */ - {"allow_alter_system", PGC_SIGHUP, COMPAT_OPTIONS_OTHER, - gettext_noop("Allows running the ALTER SYSTEM command."), - gettext_noop("Can be set to off for environments where global configuration " - "changes should be made using a different method."), - GUC_DISALLOW_IN_AUTO_FILE - }, - &AllowAlterSystem, - true, - NULL, NULL, NULL - }, - { - {"bonjour", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Enables advertising the server via Bonjour."), - NULL - }, - &enable_bonjour, - false, - check_bonjour, NULL, NULL - }, - { - {"track_commit_timestamp", PGC_POSTMASTER, REPLICATION_SENDING, - gettext_noop("Collects transaction commit time."), - NULL - }, - &track_commit_timestamp, - false, - NULL, NULL, NULL - }, - { - {"ssl", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Enables SSL connections."), - NULL - }, - &EnableSSL, - false, - check_ssl, NULL, NULL - }, - { - {"ssl_passphrase_command_supports_reload", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Controls whether \"ssl_passphrase_command\" is called during server reload."), - NULL - }, - &ssl_passphrase_command_supports_reload, - false, - NULL, NULL, NULL - }, - { - {"ssl_prefer_server_ciphers", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Give priority to server ciphersuite order."), - NULL - }, - &SSLPreferServerCiphers, - true, - NULL, NULL, NULL - }, - { - {"fsync", PGC_SIGHUP, WAL_SETTINGS, - gettext_noop("Forces synchronization of updates to disk."), - gettext_noop("The server will use the fsync() system call in several places to make " - "sure that updates are physically written to disk. This ensures " - "that a database cluster will recover to a consistent state after " - "an operating system or hardware crash.") - }, - &enableFsync, - true, - NULL, NULL, NULL - }, - { - {"ignore_checksum_failure", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Continues processing after a checksum failure."), - gettext_noop("Detection of a checksum failure normally causes PostgreSQL to " - "report an error, aborting the current transaction. Setting " - "ignore_checksum_failure to true causes the system to ignore the failure " - "(but still report a warning), and continue processing. This " - "behavior could cause crashes or other serious problems. Only " - "has an effect if checksums are enabled."), - GUC_NOT_IN_SAMPLE - }, - &ignore_checksum_failure, - false, - NULL, NULL, NULL - }, - { - {"zero_damaged_pages", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Continues processing past damaged page headers."), - gettext_noop("Detection of a damaged page header normally causes PostgreSQL to " - "report an error, aborting the current transaction. Setting " - "\"zero_damaged_pages\" to true causes the system to instead report a " - "warning, zero out the damaged page, and continue processing. This " - "behavior will destroy data, namely all the rows on the damaged page."), - GUC_NOT_IN_SAMPLE - }, - &zero_damaged_pages, - false, - NULL, NULL, NULL - }, - { - {"ignore_invalid_pages", PGC_POSTMASTER, DEVELOPER_OPTIONS, - gettext_noop("Continues recovery after an invalid pages failure."), - gettext_noop("Detection of WAL records having references to " - "invalid pages during recovery causes PostgreSQL to " - "raise a PANIC-level error, aborting the recovery. " - "Setting \"ignore_invalid_pages\" to true causes " - "the system to ignore invalid page references " - "in WAL records (but still report a warning), " - "and continue recovery. This behavior may cause " - "crashes, data loss, propagate or hide corruption, " - "or other serious problems. Only has an effect " - "during recovery or in standby mode."), - GUC_NOT_IN_SAMPLE - }, - &ignore_invalid_pages, - false, - NULL, NULL, NULL - }, - { - {"full_page_writes", PGC_SIGHUP, WAL_SETTINGS, - gettext_noop("Writes full pages to WAL when first modified after a checkpoint."), - gettext_noop("A page write in process during an operating system crash might be " - "only partially written to disk. During recovery, the row changes " - "stored in WAL are not enough to recover. This option writes " - "pages when first modified after a checkpoint to WAL so full recovery " - "is possible.") - }, - &fullPageWrites, - true, - NULL, NULL, NULL - }, - - { - {"wal_log_hints", PGC_POSTMASTER, WAL_SETTINGS, - gettext_noop("Writes full pages to WAL when first modified after a checkpoint, even for a non-critical modification."), - NULL - }, - &wal_log_hints, - false, - NULL, NULL, NULL - }, - - { - {"wal_init_zero", PGC_SUSET, WAL_SETTINGS, - gettext_noop("Writes zeroes to new WAL files before first use."), - NULL - }, - &wal_init_zero, - true, - NULL, NULL, NULL - }, - - { - {"wal_recycle", PGC_SUSET, WAL_SETTINGS, - gettext_noop("Recycles WAL files by renaming them."), - NULL - }, - &wal_recycle, - true, - NULL, NULL, NULL - }, - - { - {"log_checkpoints", PGC_SIGHUP, LOGGING_WHAT, - gettext_noop("Logs each checkpoint."), - NULL - }, - &log_checkpoints, - true, - NULL, NULL, NULL - }, - { - {"trace_connection_negotiation", PGC_POSTMASTER, DEVELOPER_OPTIONS, - gettext_noop("Logs details of pre-authentication connection handshake."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Trace_connection_negotiation, - false, - NULL, NULL, NULL - }, - { - {"log_disconnections", PGC_SU_BACKEND, LOGGING_WHAT, - gettext_noop("Logs end of a session, including duration."), - NULL - }, - &Log_disconnections, - false, - NULL, NULL, NULL - }, - { - {"log_replication_commands", PGC_SUSET, LOGGING_WHAT, - gettext_noop("Logs each replication command."), - NULL - }, - &log_replication_commands, - false, - NULL, NULL, NULL - }, - { - {"debug_assertions", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows whether the running server has assertion checks enabled."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &assert_enabled, - DEFAULT_ASSERT_ENABLED, - NULL, NULL, NULL - }, - - { - {"exit_on_error", PGC_USERSET, ERROR_HANDLING_OPTIONS, - gettext_noop("Terminate session on any error."), - NULL - }, - &ExitOnAnyError, - false, - NULL, NULL, NULL - }, - { - {"restart_after_crash", PGC_SIGHUP, ERROR_HANDLING_OPTIONS, - gettext_noop("Reinitialize server after backend crash."), - NULL - }, - &restart_after_crash, - true, - NULL, NULL, NULL - }, - { - {"remove_temp_files_after_crash", PGC_SIGHUP, DEVELOPER_OPTIONS, - gettext_noop("Remove temporary files after backend crash."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &remove_temp_files_after_crash, - true, - NULL, NULL, NULL - }, - { - {"send_abort_for_crash", PGC_SIGHUP, DEVELOPER_OPTIONS, - gettext_noop("Send SIGABRT not SIGQUIT to child processes after backend crash."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &send_abort_for_crash, - false, - NULL, NULL, NULL - }, - { - {"send_abort_for_kill", PGC_SIGHUP, DEVELOPER_OPTIONS, - gettext_noop("Send SIGABRT not SIGKILL to stuck child processes."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &send_abort_for_kill, - false, - NULL, NULL, NULL - }, - - { - {"log_duration", PGC_SUSET, LOGGING_WHAT, - gettext_noop("Logs the duration of each completed SQL statement."), - NULL - }, - &log_duration, - false, - NULL, NULL, NULL - }, -#ifdef DEBUG_NODE_TESTS_ENABLED - { - {"debug_copy_parse_plan_trees", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Set this to force all parse and plan trees to be passed through " - "copyObject(), to facilitate catching errors and omissions in " - "copyObject()."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Debug_copy_parse_plan_trees, -/* support for legacy compile-time setting */ -#ifdef COPY_PARSE_PLAN_TREES - true, -#else - false, -#endif - NULL, NULL, NULL - }, - { - {"debug_write_read_parse_plan_trees", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Set this to force all parse and plan trees to be passed through " - "outfuncs.c/readfuncs.c, to facilitate catching errors and omissions in " - "those modules."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Debug_write_read_parse_plan_trees, -/* support for legacy compile-time setting */ -#ifdef WRITE_READ_PARSE_PLAN_TREES - true, -#else - false, -#endif - NULL, NULL, NULL - }, - { - {"debug_raw_expression_coverage_test", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Set this to force all raw parse trees for DML statements to be scanned " - "by raw_expression_tree_walker(), to facilitate catching errors and " - "omissions in that function."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Debug_raw_expression_coverage_test, -/* support for legacy compile-time setting */ -#ifdef RAW_EXPRESSION_COVERAGE_TEST - true, -#else - false, -#endif - NULL, NULL, NULL - }, -#endif /* DEBUG_NODE_TESTS_ENABLED */ - { - {"debug_print_parse", PGC_USERSET, LOGGING_WHAT, - gettext_noop("Logs each query's parse tree."), - NULL - }, - &Debug_print_parse, - false, - NULL, NULL, NULL - }, - { - {"debug_print_rewritten", PGC_USERSET, LOGGING_WHAT, - gettext_noop("Logs each query's rewritten parse tree."), - NULL - }, - &Debug_print_rewritten, - false, - NULL, NULL, NULL - }, - { - {"debug_print_plan", PGC_USERSET, LOGGING_WHAT, - gettext_noop("Logs each query's execution plan."), - NULL - }, - &Debug_print_plan, - false, - NULL, NULL, NULL - }, - { - {"debug_pretty_print", PGC_USERSET, LOGGING_WHAT, - gettext_noop("Indents parse and plan tree displays."), - NULL - }, - &Debug_pretty_print, - true, - NULL, NULL, NULL - }, - { - {"log_parser_stats", PGC_SUSET, STATS_MONITORING, - gettext_noop("Writes parser performance statistics to the server log."), - NULL - }, - &log_parser_stats, - false, - check_stage_log_stats, NULL, NULL - }, - { - {"log_planner_stats", PGC_SUSET, STATS_MONITORING, - gettext_noop("Writes planner performance statistics to the server log."), - NULL - }, - &log_planner_stats, - false, - check_stage_log_stats, NULL, NULL - }, - { - {"log_executor_stats", PGC_SUSET, STATS_MONITORING, - gettext_noop("Writes executor performance statistics to the server log."), - NULL - }, - &log_executor_stats, - false, - check_stage_log_stats, NULL, NULL - }, - { - {"log_statement_stats", PGC_SUSET, STATS_MONITORING, - gettext_noop("Writes cumulative performance statistics to the server log."), - NULL - }, - &log_statement_stats, - false, - check_log_stats, NULL, NULL - }, -#ifdef BTREE_BUILD_STATS - { - {"log_btree_build_stats", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Logs system resource usage statistics (memory and CPU) on various B-tree operations."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &log_btree_build_stats, - false, - NULL, NULL, NULL - }, -#endif - - { - {"track_activities", PGC_SUSET, STATS_CUMULATIVE, - gettext_noop("Collects information about executing commands."), - gettext_noop("Enables the collection of information on the currently " - "executing command of each session, along with " - "the time at which that command began execution.") - }, - &pgstat_track_activities, - true, - NULL, NULL, NULL - }, - { - {"track_counts", PGC_SUSET, STATS_CUMULATIVE, - gettext_noop("Collects statistics on database activity."), - NULL - }, - &pgstat_track_counts, - true, - NULL, NULL, NULL - }, - { - {"track_cost_delay_timing", PGC_SUSET, STATS_CUMULATIVE, - gettext_noop("Collects timing statistics for cost-based vacuum delay."), - NULL - }, - &track_cost_delay_timing, - false, - NULL, NULL, NULL - }, - { - {"track_io_timing", PGC_SUSET, STATS_CUMULATIVE, - gettext_noop("Collects timing statistics for database I/O activity."), - NULL - }, - &track_io_timing, - false, - NULL, NULL, NULL - }, - { - {"track_wal_io_timing", PGC_SUSET, STATS_CUMULATIVE, - gettext_noop("Collects timing statistics for WAL I/O activity."), - NULL - }, - &track_wal_io_timing, - false, - NULL, NULL, NULL - }, - - { - {"update_process_title", PGC_SUSET, PROCESS_TITLE, - gettext_noop("Updates the process title to show the active SQL command."), - gettext_noop("Enables updating of the process title every time a new SQL command is received by the server.") - }, - &update_process_title, - DEFAULT_UPDATE_PROCESS_TITLE, - NULL, NULL, NULL - }, - - { - {"autovacuum", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Starts the autovacuum subprocess."), - NULL - }, - &autovacuum_start_daemon, - true, - NULL, NULL, NULL - }, - - { - {"trace_notify", PGC_USERSET, DEVELOPER_OPTIONS, - gettext_noop("Generates debugging output for LISTEN and NOTIFY."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Trace_notify, - false, - NULL, NULL, NULL - }, - -#ifdef LOCK_DEBUG - { - {"trace_locks", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Emits information about lock usage."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Trace_locks, - false, - NULL, NULL, NULL - }, - { - {"trace_userlocks", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Emits information about user lock usage."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Trace_userlocks, - false, - NULL, NULL, NULL - }, - { - {"trace_lwlocks", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Emits information about lightweight lock usage."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Trace_lwlocks, - false, - NULL, NULL, NULL - }, - { - {"debug_deadlocks", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Dumps information about all current locks when a deadlock timeout occurs."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Debug_deadlocks, - false, - NULL, NULL, NULL - }, -#endif - - { - {"log_lock_waits", PGC_SUSET, LOGGING_WHAT, - gettext_noop("Logs long lock waits."), - NULL - }, - &log_lock_waits, - false, - NULL, NULL, NULL - }, - { - {"log_lock_failures", PGC_SUSET, LOGGING_WHAT, - gettext_noop("Logs lock failures."), - NULL - }, - &log_lock_failures, - false, - NULL, NULL, NULL - }, - { - {"log_recovery_conflict_waits", PGC_SIGHUP, LOGGING_WHAT, - gettext_noop("Logs standby recovery conflict waits."), - NULL - }, - &log_recovery_conflict_waits, - false, - NULL, NULL, NULL - }, - { - {"log_hostname", PGC_SIGHUP, LOGGING_WHAT, - gettext_noop("Logs the host name in the connection logs."), - gettext_noop("By default, connection logs only show the IP address " - "of the connecting host. If you want them to show the host name you " - "can turn this on, but depending on your host name resolution " - "setup it might impose a non-negligible performance penalty.") - }, - &log_hostname, - false, - NULL, NULL, NULL - }, - { - {"transform_null_equals", PGC_USERSET, COMPAT_OPTIONS_OTHER, - gettext_noop("Treats \"expr=NULL\" as \"expr IS NULL\"."), - gettext_noop("When turned on, expressions of the form expr = NULL " - "(or NULL = expr) are treated as expr IS NULL, that is, they " - "return true if expr evaluates to the null value, and false " - "otherwise. The correct behavior of expr = NULL is to always " - "return null (unknown).") - }, - &Transform_null_equals, - false, - NULL, NULL, NULL - }, - { - {"default_transaction_read_only", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the default read-only status of new transactions."), - NULL, - GUC_REPORT - }, - &DefaultXactReadOnly, - false, - NULL, NULL, NULL - }, - { - {"transaction_read_only", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the current transaction's read-only status."), - NULL, - GUC_NO_RESET | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &XactReadOnly, - false, - check_transaction_read_only, NULL, NULL - }, - { - {"default_transaction_deferrable", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the default deferrable status of new transactions."), - NULL - }, - &DefaultXactDeferrable, - false, - NULL, NULL, NULL - }, - { - {"transaction_deferrable", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Whether to defer a read-only serializable transaction until it can be executed with no possible serialization failures."), - NULL, - GUC_NO_RESET | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &XactDeferrable, - false, - check_transaction_deferrable, NULL, NULL - }, - { - {"row_security", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Enables row security."), - gettext_noop("When enabled, row security will be applied to all users.") - }, - &row_security, - true, - NULL, NULL, NULL - }, - { - {"check_function_bodies", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Check routine bodies during CREATE FUNCTION and CREATE PROCEDURE."), - NULL - }, - &check_function_bodies, - true, - NULL, NULL, NULL - }, - { - {"array_nulls", PGC_USERSET, COMPAT_OPTIONS_PREVIOUS, - gettext_noop("Enables input of NULL elements in arrays."), - gettext_noop("When turned on, unquoted NULL in an array input " - "value means a null value; " - "otherwise it is taken literally.") - }, - &Array_nulls, - true, - NULL, NULL, NULL - }, - - /* - * WITH OIDS support, and consequently default_with_oids, was removed in - * PostgreSQL 12, but we tolerate the parameter being set to false to - * avoid unnecessarily breaking older dump files. - */ - { - {"default_with_oids", PGC_USERSET, COMPAT_OPTIONS_PREVIOUS, - gettext_noop("WITH OIDS is no longer supported; this can only be false."), - NULL, - GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE - }, - &default_with_oids, - false, - check_default_with_oids, NULL, NULL - }, - { - {"logging_collector", PGC_POSTMASTER, LOGGING_WHERE, - gettext_noop("Start a subprocess to capture stderr, csvlog and/or jsonlog into log files."), - NULL - }, - &Logging_collector, - false, - NULL, NULL, NULL - }, - { - {"log_truncate_on_rotation", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Truncate existing log files of same name during log rotation."), - NULL - }, - &Log_truncate_on_rotation, - false, - NULL, NULL, NULL - }, - - { - {"trace_sort", PGC_USERSET, DEVELOPER_OPTIONS, - gettext_noop("Emit information about resource usage in sorting."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &trace_sort, - false, - NULL, NULL, NULL - }, - -#ifdef TRACE_SYNCSCAN - /* this is undocumented because not exposed in a standard build */ - { - {"trace_syncscan", PGC_USERSET, DEVELOPER_OPTIONS, - gettext_noop("Generate debugging output for synchronized scanning."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &trace_syncscan, - false, - NULL, NULL, NULL - }, -#endif - -#ifdef DEBUG_BOUNDED_SORT - /* this is undocumented because not exposed in a standard build */ - { - { - "optimize_bounded_sort", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables bounded sorting using heap sort."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_EXPLAIN - }, - &optimize_bounded_sort, - true, - NULL, NULL, NULL - }, -#endif - -#ifdef WAL_DEBUG - { - {"wal_debug", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Emit WAL-related debugging output."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &XLOG_DEBUG, - false, - NULL, NULL, NULL - }, -#endif - - { - {"integer_datetimes", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows whether datetimes are integer based."), - NULL, - GUC_REPORT | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &integer_datetimes, - true, - NULL, NULL, NULL - }, - - { - {"krb_caseins_users", PGC_SIGHUP, CONN_AUTH_AUTH, - gettext_noop("Sets whether Kerberos and GSSAPI user names should be treated as case-insensitive."), - NULL - }, - &pg_krb_caseins_users, - false, - NULL, NULL, NULL - }, - - { - {"gss_accept_delegation", PGC_SIGHUP, CONN_AUTH_AUTH, - gettext_noop("Sets whether GSSAPI delegation should be accepted from the client."), - NULL - }, - &pg_gss_accept_delegation, - false, - NULL, NULL, NULL - }, - - { - {"escape_string_warning", PGC_USERSET, COMPAT_OPTIONS_PREVIOUS, - gettext_noop("Warn about backslash escapes in ordinary string literals."), - NULL - }, - &escape_string_warning, - true, - NULL, NULL, NULL - }, - - { - {"standard_conforming_strings", PGC_USERSET, COMPAT_OPTIONS_PREVIOUS, - gettext_noop("Causes '...' strings to treat backslashes literally."), - NULL, - GUC_REPORT - }, - &standard_conforming_strings, - true, - NULL, NULL, NULL - }, - - { - {"synchronize_seqscans", PGC_USERSET, COMPAT_OPTIONS_PREVIOUS, - gettext_noop("Enables synchronized sequential scans."), - NULL - }, - &synchronize_seqscans, - true, - NULL, NULL, NULL - }, - - { - {"recovery_target_inclusive", PGC_POSTMASTER, WAL_RECOVERY_TARGET, - gettext_noop("Sets whether to include or exclude transaction with recovery target."), - NULL - }, - &recoveryTargetInclusive, - true, - NULL, NULL, NULL - }, - - { - {"summarize_wal", PGC_SIGHUP, WAL_SUMMARIZATION, - gettext_noop("Starts the WAL summarizer process to enable incremental backup."), - NULL - }, - &summarize_wal, - false, - NULL, NULL, NULL - }, - - { - {"hot_standby", PGC_POSTMASTER, REPLICATION_STANDBY, - gettext_noop("Allows connections and queries during recovery."), - NULL - }, - &EnableHotStandby, - true, - NULL, NULL, NULL - }, - - { - {"hot_standby_feedback", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Allows feedback from a hot standby to the primary that will avoid query conflicts."), - NULL - }, - &hot_standby_feedback, - false, - NULL, NULL, NULL - }, - - { - {"in_hot_standby", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows whether hot standby is currently active."), - NULL, - GUC_REPORT | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &in_hot_standby_guc, - false, - NULL, NULL, show_in_hot_standby - }, - - { - {"allow_system_table_mods", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Allows modifications of the structure of system tables."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &allowSystemTableMods, - false, - NULL, NULL, NULL - }, - - { - {"ignore_system_indexes", PGC_BACKEND, DEVELOPER_OPTIONS, - gettext_noop("Disables reading from system indexes."), - gettext_noop("It does not prevent updating the indexes, so it is safe " - "to use. The worst consequence is slowness."), - GUC_NOT_IN_SAMPLE - }, - &IgnoreSystemIndexes, - false, - NULL, NULL, NULL - }, - - { - {"allow_in_place_tablespaces", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Allows tablespaces directly inside pg_tblspc, for testing."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &allow_in_place_tablespaces, - false, - NULL, NULL, NULL - }, - - { - {"lo_compat_privileges", PGC_SUSET, COMPAT_OPTIONS_PREVIOUS, - gettext_noop("Enables backward compatibility mode for privilege checks on large objects."), - gettext_noop("Skips privilege checks when reading or modifying large objects, " - "for compatibility with PostgreSQL releases prior to 9.0.") - }, - &lo_compat_privileges, - false, - NULL, NULL, NULL - }, - - { - {"quote_all_identifiers", PGC_USERSET, COMPAT_OPTIONS_PREVIOUS, - gettext_noop("When generating SQL fragments, quote all identifiers."), - NULL, - }, - "e_all_identifiers, - false, - NULL, NULL, NULL - }, - - { - {"data_checksums", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows whether data checksums are turned on for this cluster."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED - }, - &data_checksums, - false, - NULL, NULL, NULL - }, - - { - {"syslog_sequence_numbers", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Add sequence number to syslog messages to avoid duplicate suppression."), - NULL - }, - &syslog_sequence_numbers, - true, - NULL, NULL, NULL - }, - - { - {"syslog_split_messages", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Split messages sent to syslog by lines and to fit into 1024 bytes."), - NULL - }, - &syslog_split_messages, - true, - NULL, NULL, NULL - }, - - { - {"parallel_leader_participation", PGC_USERSET, RESOURCES_WORKER_PROCESSES, - gettext_noop("Controls whether Gather and Gather Merge also run subplans."), - gettext_noop("Should gather nodes also run subplans or just gather tuples?"), - GUC_EXPLAIN - }, - ¶llel_leader_participation, - true, - NULL, NULL, NULL - }, - - { - {"jit", PGC_USERSET, QUERY_TUNING_OTHER, - gettext_noop("Allow JIT compilation."), - NULL, - GUC_EXPLAIN - }, - &jit_enabled, - true, - NULL, NULL, NULL - }, - - { - {"jit_debugging_support", PGC_SU_BACKEND, DEVELOPER_OPTIONS, - gettext_noop("Register JIT-compiled functions with debugger."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &jit_debugging_support, - false, - - /* - * This is not guaranteed to be available, but given it's a developer - * oriented option, it doesn't seem worth adding code checking - * availability. - */ - NULL, NULL, NULL - }, - - { - {"jit_dump_bitcode", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Write out LLVM bitcode to facilitate JIT debugging."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &jit_dump_bitcode, - false, - NULL, NULL, NULL - }, - - { - {"jit_expressions", PGC_USERSET, DEVELOPER_OPTIONS, - gettext_noop("Allow JIT compilation of expressions."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &jit_expressions, - true, - NULL, NULL, NULL - }, - - { - {"jit_profiling_support", PGC_SU_BACKEND, DEVELOPER_OPTIONS, - gettext_noop("Register JIT-compiled functions with perf profiler."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &jit_profiling_support, - false, - - /* - * This is not guaranteed to be available, but given it's a developer - * oriented option, it doesn't seem worth adding code checking - * availability. - */ - NULL, NULL, NULL - }, - - { - {"jit_tuple_deforming", PGC_USERSET, DEVELOPER_OPTIONS, - gettext_noop("Allow JIT compilation of tuple deforming."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &jit_tuple_deforming, - true, - NULL, NULL, NULL - }, - - { - {"data_sync_retry", PGC_POSTMASTER, ERROR_HANDLING_OPTIONS, - gettext_noop("Whether to continue running after a failure to sync data files."), - }, - &data_sync_retry, - false, - NULL, NULL, NULL - }, - - { - {"wal_receiver_create_temp_slot", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Sets whether a WAL receiver should create a temporary replication slot if no permanent slot is configured."), - }, - &wal_receiver_create_temp_slot, - false, - NULL, NULL, NULL - }, - - { - {"event_triggers", PGC_SUSET, CLIENT_CONN_STATEMENT, - gettext_noop("Enables event triggers."), - gettext_noop("When enabled, event triggers will fire for all applicable statements."), - }, - &event_triggers, - true, - NULL, NULL, NULL - }, - - { - {"sync_replication_slots", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Enables a physical standby to synchronize logical failover replication slots from the primary server."), - }, - &sync_replication_slots, - false, - NULL, NULL, NULL - }, - - { - {"md5_password_warnings", PGC_USERSET, CONN_AUTH_AUTH, - gettext_noop("Enables deprecation warnings for MD5 passwords."), - }, - &md5_password_warnings, - true, - NULL, NULL, NULL - }, - - { - {"vacuum_truncate", PGC_USERSET, VACUUM_DEFAULT, - gettext_noop("Enables vacuum to truncate empty pages at the end of the table."), - }, - &vacuum_truncate, - true, - NULL, NULL, NULL - }, - - /* End-of-list marker */ - { - {NULL, 0, 0, NULL, NULL}, NULL, false, NULL, NULL, NULL - } -}; - - -struct config_int ConfigureNamesInt[] = -{ - { - {"archive_timeout", PGC_SIGHUP, WAL_ARCHIVING, - gettext_noop("Sets the amount of time to wait before forcing a " - "switch to the next WAL file."), - gettext_noop("0 disables the timeout."), - GUC_UNIT_S - }, - &XLogArchiveTimeout, - 0, 0, INT_MAX / 2, - NULL, NULL, NULL - }, - { - {"post_auth_delay", PGC_BACKEND, DEVELOPER_OPTIONS, - gettext_noop("Sets the amount of time to wait after " - "authentication on connection startup."), - gettext_noop("This allows attaching a debugger to the process."), - GUC_NOT_IN_SAMPLE | GUC_UNIT_S - }, - &PostAuthDelay, - 0, 0, INT_MAX / 1000000, - NULL, NULL, NULL - }, - { - {"default_statistics_target", PGC_USERSET, QUERY_TUNING_OTHER, - gettext_noop("Sets the default statistics target."), - gettext_noop("This applies to table columns that have not had a " - "column-specific target set via ALTER TABLE SET STATISTICS.") - }, - &default_statistics_target, - 100, 1, MAX_STATISTICS_TARGET, - NULL, NULL, NULL - }, - { - {"from_collapse_limit", PGC_USERSET, QUERY_TUNING_OTHER, - gettext_noop("Sets the FROM-list size beyond which subqueries " - "are not collapsed."), - gettext_noop("The planner will merge subqueries into upper " - "queries if the resulting FROM list would have no more than " - "this many items."), - GUC_EXPLAIN - }, - &from_collapse_limit, - 8, 1, INT_MAX, - NULL, NULL, NULL - }, - { - {"join_collapse_limit", PGC_USERSET, QUERY_TUNING_OTHER, - gettext_noop("Sets the FROM-list size beyond which JOIN " - "constructs are not flattened."), - gettext_noop("The planner will flatten explicit JOIN " - "constructs into lists of FROM items whenever a " - "list of no more than this many items would result."), - GUC_EXPLAIN - }, - &join_collapse_limit, - 8, 1, INT_MAX, - NULL, NULL, NULL - }, - { - {"geqo_threshold", PGC_USERSET, QUERY_TUNING_GEQO, - gettext_noop("Sets the threshold of FROM items beyond which GEQO is used."), - NULL, - GUC_EXPLAIN - }, - &geqo_threshold, - 12, 2, INT_MAX, - NULL, NULL, NULL - }, - { - {"geqo_effort", PGC_USERSET, QUERY_TUNING_GEQO, - gettext_noop("GEQO: effort is used to set the default for other GEQO parameters."), - NULL, - GUC_EXPLAIN - }, - &Geqo_effort, - DEFAULT_GEQO_EFFORT, MIN_GEQO_EFFORT, MAX_GEQO_EFFORT, - NULL, NULL, NULL - }, - { - {"geqo_pool_size", PGC_USERSET, QUERY_TUNING_GEQO, - gettext_noop("GEQO: number of individuals in the population."), - gettext_noop("0 means use a suitable default value."), - GUC_EXPLAIN - }, - &Geqo_pool_size, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - { - {"geqo_generations", PGC_USERSET, QUERY_TUNING_GEQO, - gettext_noop("GEQO: number of iterations of the algorithm."), - gettext_noop("0 means use a suitable default value."), - GUC_EXPLAIN - }, - &Geqo_generations, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - /* This is PGC_SUSET to prevent hiding from log_lock_waits. */ - {"deadlock_timeout", PGC_SUSET, LOCK_MANAGEMENT, - gettext_noop("Sets the time to wait on a lock before checking for deadlock."), - NULL, - GUC_UNIT_MS - }, - &DeadlockTimeout, - 1000, 1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"max_standby_archive_delay", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Sets the maximum delay before canceling queries when a hot standby server is processing archived WAL data."), - gettext_noop("-1 means wait forever."), - GUC_UNIT_MS - }, - &max_standby_archive_delay, - 30 * 1000, -1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"max_standby_streaming_delay", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Sets the maximum delay before canceling queries when a hot standby server is processing streamed WAL data."), - gettext_noop("-1 means wait forever."), - GUC_UNIT_MS - }, - &max_standby_streaming_delay, - 30 * 1000, -1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"recovery_min_apply_delay", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Sets the minimum delay for applying changes during recovery."), - NULL, - GUC_UNIT_MS - }, - &recovery_min_apply_delay, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"wal_receiver_status_interval", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Sets the maximum interval between WAL receiver status reports to the sending server."), - NULL, - GUC_UNIT_S - }, - &wal_receiver_status_interval, - 10, 0, INT_MAX / 1000, - NULL, NULL, NULL - }, - - { - {"wal_receiver_timeout", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Sets the maximum wait time to receive data from the sending server."), - gettext_noop("0 disables the timeout."), - GUC_UNIT_MS - }, - &wal_receiver_timeout, - 60 * 1000, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"max_connections", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Sets the maximum number of concurrent connections."), - NULL - }, - &MaxConnections, - 100, 1, MAX_BACKENDS, - NULL, NULL, NULL - }, - - { - /* see max_connections */ - {"superuser_reserved_connections", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Sets the number of connection slots reserved for superusers."), - NULL - }, - &SuperuserReservedConnections, - 3, 0, MAX_BACKENDS, - NULL, NULL, NULL - }, - - { - {"reserved_connections", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Sets the number of connection slots reserved for roles " - "with privileges of pg_use_reserved_connections."), - NULL - }, - &ReservedConnections, - 0, 0, MAX_BACKENDS, - NULL, NULL, NULL - }, - - { - {"min_dynamic_shared_memory", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Amount of dynamic shared memory reserved at startup."), - NULL, - GUC_UNIT_MB - }, - &min_dynamic_shared_memory, - 0, 0, (int) Min((size_t) INT_MAX, SIZE_MAX / (1024 * 1024)), - NULL, NULL, NULL - }, - - /* - * We sometimes multiply the number of shared buffers by two without - * checking for overflow, so we mustn't allow more than INT_MAX / 2. - */ - { - {"shared_buffers", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Sets the number of shared memory buffers used by the server."), - NULL, - GUC_UNIT_BLOCKS - }, - &NBuffers, - 16384, 16, INT_MAX / 2, - NULL, NULL, NULL - }, - - { - {"vacuum_buffer_usage_limit", PGC_USERSET, RESOURCES_MEM, - gettext_noop("Sets the buffer pool size for VACUUM, ANALYZE, and autovacuum."), - NULL, - GUC_UNIT_KB - }, - &VacuumBufferUsageLimit, - 2048, 0, MAX_BAS_VAC_RING_SIZE_KB, - check_vacuum_buffer_usage_limit, NULL, NULL - }, - - { - {"shared_memory_size", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the size of the server's main shared memory area (rounded up to the nearest MB)."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_UNIT_MB | GUC_RUNTIME_COMPUTED - }, - &shared_memory_size_mb, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"shared_memory_size_in_huge_pages", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the number of huge pages needed for the main shared memory area."), - gettext_noop("-1 means huge pages are not supported."), - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED - }, - &shared_memory_size_in_huge_pages, - -1, -1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"num_os_semaphores", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the number of semaphores required for the server."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED - }, - &num_os_semaphores, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"commit_timestamp_buffers", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Sets the size of the dedicated buffer pool used for the commit timestamp cache."), - gettext_noop("0 means use a fraction of \"shared_buffers\"."), - GUC_UNIT_BLOCKS - }, - &commit_timestamp_buffers, - 0, 0, SLRU_MAX_ALLOWED_BUFFERS, - check_commit_ts_buffers, NULL, NULL - }, - - { - {"multixact_member_buffers", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Sets the size of the dedicated buffer pool used for the MultiXact member cache."), - NULL, - GUC_UNIT_BLOCKS - }, - &multixact_member_buffers, - 32, 16, SLRU_MAX_ALLOWED_BUFFERS, - check_multixact_member_buffers, NULL, NULL - }, - - { - {"multixact_offset_buffers", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Sets the size of the dedicated buffer pool used for the MultiXact offset cache."), - NULL, - GUC_UNIT_BLOCKS - }, - &multixact_offset_buffers, - 16, 16, SLRU_MAX_ALLOWED_BUFFERS, - check_multixact_offset_buffers, NULL, NULL - }, - - { - {"notify_buffers", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Sets the size of the dedicated buffer pool used for the LISTEN/NOTIFY message cache."), - NULL, - GUC_UNIT_BLOCKS - }, - ¬ify_buffers, - 16, 16, SLRU_MAX_ALLOWED_BUFFERS, - check_notify_buffers, NULL, NULL - }, - - { - {"serializable_buffers", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Sets the size of the dedicated buffer pool used for the serializable transaction cache."), - NULL, - GUC_UNIT_BLOCKS - }, - &serializable_buffers, - 32, 16, SLRU_MAX_ALLOWED_BUFFERS, - check_serial_buffers, NULL, NULL - }, - - { - {"subtransaction_buffers", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Sets the size of the dedicated buffer pool used for the subtransaction cache."), - gettext_noop("0 means use a fraction of \"shared_buffers\"."), - GUC_UNIT_BLOCKS - }, - &subtransaction_buffers, - 0, 0, SLRU_MAX_ALLOWED_BUFFERS, - check_subtrans_buffers, NULL, NULL - }, - - { - {"transaction_buffers", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Sets the size of the dedicated buffer pool used for the transaction status cache."), - gettext_noop("0 means use a fraction of \"shared_buffers\"."), - GUC_UNIT_BLOCKS - }, - &transaction_buffers, - 0, 0, SLRU_MAX_ALLOWED_BUFFERS, - check_transaction_buffers, NULL, NULL - }, - - { - {"temp_buffers", PGC_USERSET, RESOURCES_MEM, - gettext_noop("Sets the maximum number of temporary buffers used by each session."), - NULL, - GUC_UNIT_BLOCKS | GUC_EXPLAIN - }, - &num_temp_buffers, - 1024, 100, INT_MAX / 2, - check_temp_buffers, NULL, NULL - }, - - { - {"port", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Sets the TCP port the server listens on."), - NULL - }, - &PostPortNumber, - DEF_PGPORT, 1, 65535, - NULL, NULL, NULL - }, - - { - {"unix_socket_permissions", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Sets the access permissions of the Unix-domain socket."), - gettext_noop("Unix-domain sockets use the usual Unix file system " - "permission set. The parameter value is expected " - "to be a numeric mode specification in the form " - "accepted by the chmod and umask system calls. " - "(To use the customary octal format the number must " - "start with a 0 (zero).)") - }, - &Unix_socket_permissions, - 0777, 0000, 0777, - NULL, NULL, show_unix_socket_permissions - }, - - { - {"log_file_mode", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Sets the file permissions for log files."), - gettext_noop("The parameter value is expected " - "to be a numeric mode specification in the form " - "accepted by the chmod and umask system calls. " - "(To use the customary octal format the number must " - "start with a 0 (zero).)") - }, - &Log_file_mode, - 0600, 0000, 0777, - NULL, NULL, show_log_file_mode - }, - - - { - {"data_directory_mode", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the mode of the data directory."), - gettext_noop("The parameter value is a numeric mode specification " - "in the form accepted by the chmod and umask system " - "calls. (To use the customary octal format the number " - "must start with a 0 (zero).)"), - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED - }, - &data_directory_mode, - 0700, 0000, 0777, - NULL, NULL, show_data_directory_mode - }, - - { - {"work_mem", PGC_USERSET, RESOURCES_MEM, - gettext_noop("Sets the maximum memory to be used for query workspaces."), - gettext_noop("This much memory can be used by each internal " - "sort operation and hash table before switching to " - "temporary disk files."), - GUC_UNIT_KB | GUC_EXPLAIN - }, - &work_mem, - 4096, 64, MAX_KILOBYTES, - NULL, NULL, NULL - }, - - /* - * Dynamic shared memory has a higher overhead than local memory contexts, - * so when testing low-memory scenarios that could use shared memory, the - * recommended minimum is 1MB. - */ - { - {"maintenance_work_mem", PGC_USERSET, RESOURCES_MEM, - gettext_noop("Sets the maximum memory to be used for maintenance operations."), - gettext_noop("This includes operations such as VACUUM and CREATE INDEX."), - GUC_UNIT_KB - }, - &maintenance_work_mem, - 65536, 64, MAX_KILOBYTES, - NULL, NULL, NULL - }, - - { - {"logical_decoding_work_mem", PGC_USERSET, RESOURCES_MEM, - gettext_noop("Sets the maximum memory to be used for logical decoding."), - gettext_noop("This much memory can be used by each internal " - "reorder buffer before spilling to disk."), - GUC_UNIT_KB - }, - &logical_decoding_work_mem, - 65536, 64, MAX_KILOBYTES, - NULL, NULL, NULL - }, - - /* - * We use the hopefully-safely-small value of 100kB as the compiled-in - * default for max_stack_depth. InitializeGUCOptions will increase it if - * possible, depending on the actual platform-specific stack limit. - */ - { - {"max_stack_depth", PGC_SUSET, RESOURCES_MEM, - gettext_noop("Sets the maximum stack depth, in kilobytes."), - NULL, - GUC_UNIT_KB - }, - &max_stack_depth, - 100, 100, MAX_KILOBYTES, - check_max_stack_depth, assign_max_stack_depth, NULL - }, - - { - {"temp_file_limit", PGC_SUSET, RESOURCES_DISK, - gettext_noop("Limits the total size of all temporary files used by each process."), - gettext_noop("-1 means no limit."), - GUC_UNIT_KB - }, - &temp_file_limit, - -1, -1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"vacuum_cost_page_hit", PGC_USERSET, VACUUM_COST_DELAY, - gettext_noop("Vacuum cost for a page found in the buffer cache."), - NULL - }, - &VacuumCostPageHit, - 1, 0, 10000, - NULL, NULL, NULL - }, - - { - {"vacuum_cost_page_miss", PGC_USERSET, VACUUM_COST_DELAY, - gettext_noop("Vacuum cost for a page not found in the buffer cache."), - NULL - }, - &VacuumCostPageMiss, - 2, 0, 10000, - NULL, NULL, NULL - }, - - { - {"vacuum_cost_page_dirty", PGC_USERSET, VACUUM_COST_DELAY, - gettext_noop("Vacuum cost for a page dirtied by vacuum."), - NULL - }, - &VacuumCostPageDirty, - 20, 0, 10000, - NULL, NULL, NULL - }, - - { - {"vacuum_cost_limit", PGC_USERSET, VACUUM_COST_DELAY, - gettext_noop("Vacuum cost amount available before napping."), - NULL - }, - &VacuumCostLimit, - 200, 1, 10000, - NULL, NULL, NULL - }, - - { - {"autovacuum_vacuum_cost_limit", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Vacuum cost amount available before napping, for autovacuum."), - gettext_noop("-1 means use \"vacuum_cost_limit\".") - }, - &autovacuum_vac_cost_limit, - -1, -1, 10000, - NULL, NULL, NULL - }, - - { - {"max_files_per_process", PGC_POSTMASTER, RESOURCES_KERNEL, - gettext_noop("Sets the maximum number of files each server process is allowed to open simultaneously."), - NULL - }, - &max_files_per_process, - 1000, 64, INT_MAX, - NULL, NULL, NULL - }, - - /* - * See also CheckRequiredParameterValues() if this parameter changes - */ - { - {"max_prepared_transactions", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Sets the maximum number of simultaneously prepared transactions."), - NULL - }, - &max_prepared_xacts, - 0, 0, MAX_BACKENDS, - NULL, NULL, NULL - }, - -#ifdef LOCK_DEBUG - { - {"trace_lock_oidmin", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Sets the minimum OID of tables for tracking locks."), - gettext_noop("Is used to avoid output on system tables."), - GUC_NOT_IN_SAMPLE - }, - &Trace_lock_oidmin, - FirstNormalObjectId, 0, INT_MAX, - NULL, NULL, NULL - }, - { - {"trace_lock_table", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Sets the OID of the table with unconditionally lock tracing."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &Trace_lock_table, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, -#endif - - { - {"statement_timeout", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the maximum allowed duration of any statement."), - gettext_noop("0 disables the timeout."), - GUC_UNIT_MS - }, - &StatementTimeout, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"lock_timeout", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the maximum allowed duration of any wait for a lock."), - gettext_noop("0 disables the timeout."), - GUC_UNIT_MS - }, - &LockTimeout, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"idle_in_transaction_session_timeout", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the maximum allowed idle time between queries, when in a transaction."), - gettext_noop("0 disables the timeout."), - GUC_UNIT_MS - }, - &IdleInTransactionSessionTimeout, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"transaction_timeout", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the maximum allowed duration of any transaction within a session (not a prepared transaction)."), - gettext_noop("0 disables the timeout."), - GUC_UNIT_MS - }, - &TransactionTimeout, - 0, 0, INT_MAX, - NULL, assign_transaction_timeout, NULL - }, - - { - {"idle_session_timeout", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the maximum allowed idle time between queries, when not in a transaction."), - gettext_noop("0 disables the timeout."), - GUC_UNIT_MS - }, - &IdleSessionTimeout, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"vacuum_freeze_min_age", PGC_USERSET, VACUUM_FREEZING, - gettext_noop("Minimum age at which VACUUM should freeze a table row."), - NULL - }, - &vacuum_freeze_min_age, - 50000000, 0, 1000000000, - NULL, NULL, NULL - }, - - { - {"vacuum_freeze_table_age", PGC_USERSET, VACUUM_FREEZING, - gettext_noop("Age at which VACUUM should scan whole table to freeze tuples."), - NULL - }, - &vacuum_freeze_table_age, - 150000000, 0, 2000000000, - NULL, NULL, NULL - }, - - { - {"vacuum_multixact_freeze_min_age", PGC_USERSET, VACUUM_FREEZING, - gettext_noop("Minimum age at which VACUUM should freeze a MultiXactId in a table row."), - NULL - }, - &vacuum_multixact_freeze_min_age, - 5000000, 0, 1000000000, - NULL, NULL, NULL - }, - - { - {"vacuum_multixact_freeze_table_age", PGC_USERSET, VACUUM_FREEZING, - gettext_noop("Multixact age at which VACUUM should scan whole table to freeze tuples."), - NULL - }, - &vacuum_multixact_freeze_table_age, - 150000000, 0, 2000000000, - NULL, NULL, NULL - }, - - { - {"vacuum_failsafe_age", PGC_USERSET, VACUUM_FREEZING, - gettext_noop("Age at which VACUUM should trigger failsafe to avoid a wraparound outage."), - NULL - }, - &vacuum_failsafe_age, - 1600000000, 0, 2100000000, - NULL, NULL, NULL - }, - { - {"vacuum_multixact_failsafe_age", PGC_USERSET, VACUUM_FREEZING, - gettext_noop("Multixact age at which VACUUM should trigger failsafe to avoid a wraparound outage."), - NULL - }, - &vacuum_multixact_failsafe_age, - 1600000000, 0, 2100000000, - NULL, NULL, NULL - }, - - /* - * See also CheckRequiredParameterValues() if this parameter changes - */ - { - {"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT, - gettext_noop("Sets the maximum number of locks per transaction."), - gettext_noop("The shared lock table is sized on the assumption that at most " - "\"max_locks_per_transaction\" objects per server process or prepared " - "transaction will need to be locked at any one time.") - }, - &max_locks_per_xact, - 64, 10, INT_MAX, - NULL, NULL, NULL - }, - - { - {"max_pred_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT, - gettext_noop("Sets the maximum number of predicate locks per transaction."), - gettext_noop("The shared predicate lock table is sized on the assumption that " - "at most \"max_pred_locks_per_transaction\" objects per server process " - "or prepared transaction will need to be locked at any one time.") - }, - &max_predicate_locks_per_xact, - 64, 10, INT_MAX, - NULL, NULL, NULL - }, - - { - {"max_pred_locks_per_relation", PGC_SIGHUP, LOCK_MANAGEMENT, - gettext_noop("Sets the maximum number of predicate-locked pages and tuples per relation."), - gettext_noop("If more than this total of pages and tuples in the same relation are locked " - "by a connection, those locks are replaced by a relation-level lock.") - }, - &max_predicate_locks_per_relation, - -2, INT_MIN, INT_MAX, - NULL, NULL, NULL - }, - - { - {"max_pred_locks_per_page", PGC_SIGHUP, LOCK_MANAGEMENT, - gettext_noop("Sets the maximum number of predicate-locked tuples per page."), - gettext_noop("If more than this number of tuples on the same page are locked " - "by a connection, those locks are replaced by a page-level lock.") - }, - &max_predicate_locks_per_page, - 2, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"authentication_timeout", PGC_SIGHUP, CONN_AUTH_AUTH, - gettext_noop("Sets the maximum allowed time to complete client authentication."), - NULL, - GUC_UNIT_S - }, - &AuthenticationTimeout, - 60, 1, 600, - NULL, NULL, NULL - }, - - { - /* Not for general use */ - {"pre_auth_delay", PGC_SIGHUP, DEVELOPER_OPTIONS, - gettext_noop("Sets the amount of time to wait before " - "authentication on connection startup."), - gettext_noop("This allows attaching a debugger to the process."), - GUC_NOT_IN_SAMPLE | GUC_UNIT_S - }, - &PreAuthDelay, - 0, 0, 60, - NULL, NULL, NULL - }, - - { - {"max_notify_queue_pages", PGC_POSTMASTER, RESOURCES_DISK, - gettext_noop("Sets the maximum number of allocated pages for NOTIFY / LISTEN queue."), - NULL, - }, - &max_notify_queue_pages, - 1048576, 64, INT_MAX, - NULL, NULL, NULL - }, - - { - {"wal_decode_buffer_size", PGC_POSTMASTER, WAL_RECOVERY, - gettext_noop("Buffer size for reading ahead in the WAL during recovery."), - gettext_noop("Maximum distance to read ahead in the WAL to prefetch referenced data blocks."), - GUC_UNIT_BYTE - }, - &wal_decode_buffer_size, - 512 * 1024, 64 * 1024, MaxAllocSize, - NULL, NULL, NULL - }, - - { - {"wal_keep_size", PGC_SIGHUP, REPLICATION_SENDING, - gettext_noop("Sets the size of WAL files held for standby servers."), - NULL, - GUC_UNIT_MB - }, - &wal_keep_size_mb, - 0, 0, MAX_KILOBYTES, - NULL, NULL, NULL - }, - - { - {"min_wal_size", PGC_SIGHUP, WAL_CHECKPOINTS, - gettext_noop("Sets the minimum size to shrink the WAL to."), - NULL, - GUC_UNIT_MB - }, - &min_wal_size_mb, - DEFAULT_MIN_WAL_SEGS * (DEFAULT_XLOG_SEG_SIZE / (1024 * 1024)), - 2, MAX_KILOBYTES, - NULL, NULL, NULL - }, - - { - {"max_wal_size", PGC_SIGHUP, WAL_CHECKPOINTS, - gettext_noop("Sets the WAL size that triggers a checkpoint."), - NULL, - GUC_UNIT_MB - }, - &max_wal_size_mb, - DEFAULT_MAX_WAL_SEGS * (DEFAULT_XLOG_SEG_SIZE / (1024 * 1024)), - 2, MAX_KILOBYTES, - NULL, assign_max_wal_size, NULL - }, - - { - {"checkpoint_timeout", PGC_SIGHUP, WAL_CHECKPOINTS, - gettext_noop("Sets the maximum time between automatic WAL checkpoints."), - NULL, - GUC_UNIT_S - }, - &CheckPointTimeout, - 300, 30, 86400, - NULL, NULL, NULL - }, - - { - {"checkpoint_warning", PGC_SIGHUP, WAL_CHECKPOINTS, - gettext_noop("Sets the maximum time before warning if checkpoints " - "triggered by WAL volume happen too frequently."), - gettext_noop("Write a message to the server log if checkpoints " - "caused by the filling of WAL segment files happen more " - "frequently than this amount of time. " - "0 disables the warning."), - GUC_UNIT_S - }, - &CheckPointWarning, - 30, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"checkpoint_flush_after", PGC_SIGHUP, WAL_CHECKPOINTS, - gettext_noop("Number of pages after which previously performed writes are flushed to disk."), - gettext_noop("0 disables forced writeback."), - GUC_UNIT_BLOCKS - }, - &checkpoint_flush_after, - DEFAULT_CHECKPOINT_FLUSH_AFTER, 0, WRITEBACK_MAX_PENDING_FLUSHES, - NULL, NULL, NULL - }, - - { - {"wal_buffers", PGC_POSTMASTER, WAL_SETTINGS, - gettext_noop("Sets the number of disk-page buffers in shared memory for WAL."), - gettext_noop("-1 means use a fraction of \"shared_buffers\"."), - GUC_UNIT_XBLOCKS - }, - &XLOGbuffers, - -1, -1, (INT_MAX / XLOG_BLCKSZ), - check_wal_buffers, NULL, NULL - }, - - { - {"wal_writer_delay", PGC_SIGHUP, WAL_SETTINGS, - gettext_noop("Time between WAL flushes performed in the WAL writer."), - NULL, - GUC_UNIT_MS - }, - &WalWriterDelay, - 200, 1, 10000, - NULL, NULL, NULL - }, - - { - {"wal_writer_flush_after", PGC_SIGHUP, WAL_SETTINGS, - gettext_noop("Amount of WAL written out by WAL writer that triggers a flush."), - NULL, - GUC_UNIT_XBLOCKS - }, - &WalWriterFlushAfter, - DEFAULT_WAL_WRITER_FLUSH_AFTER, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"wal_skip_threshold", PGC_USERSET, WAL_SETTINGS, - gettext_noop("Minimum size of new file to fsync instead of writing WAL."), - NULL, - GUC_UNIT_KB - }, - &wal_skip_threshold, - 2048, 0, MAX_KILOBYTES, - NULL, NULL, NULL - }, - - { - {"max_wal_senders", PGC_POSTMASTER, REPLICATION_SENDING, - gettext_noop("Sets the maximum number of simultaneously running WAL sender processes."), - NULL - }, - &max_wal_senders, - 10, 0, MAX_BACKENDS, - NULL, NULL, NULL - }, - - { - /* see max_wal_senders */ - {"max_replication_slots", PGC_POSTMASTER, REPLICATION_SENDING, - gettext_noop("Sets the maximum number of simultaneously defined replication slots."), - NULL - }, - &max_replication_slots, - 10, 0, MAX_BACKENDS /* XXX? */ , - NULL, NULL, NULL - }, - - { - {"max_slot_wal_keep_size", PGC_SIGHUP, REPLICATION_SENDING, - gettext_noop("Sets the maximum WAL size that can be reserved by replication slots."), - gettext_noop("Replication slots will be marked as failed, and segments released " - "for deletion or recycling, if this much space is occupied by WAL on disk. " - "-1 means no maximum."), - GUC_UNIT_MB - }, - &max_slot_wal_keep_size_mb, - -1, -1, MAX_KILOBYTES, - NULL, NULL, NULL - }, - - { - {"wal_sender_timeout", PGC_USERSET, REPLICATION_SENDING, - gettext_noop("Sets the maximum time to wait for WAL replication."), - NULL, - GUC_UNIT_MS - }, - &wal_sender_timeout, - 60 * 1000, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"idle_replication_slot_timeout", PGC_SIGHUP, REPLICATION_SENDING, - gettext_noop("Sets the duration a replication slot can remain idle before " - "it is invalidated."), - NULL, - GUC_UNIT_S - }, - &idle_replication_slot_timeout_secs, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"commit_delay", PGC_SUSET, WAL_SETTINGS, - gettext_noop("Sets the delay in microseconds between transaction commit and " - "flushing WAL to disk."), - NULL - /* we have no microseconds designation, so can't supply units here */ - }, - &CommitDelay, - 0, 0, 100000, - NULL, NULL, NULL - }, - - { - {"commit_siblings", PGC_USERSET, WAL_SETTINGS, - gettext_noop("Sets the minimum number of concurrent open transactions " - "required before performing \"commit_delay\"."), - NULL - }, - &CommitSiblings, - 5, 0, 1000, - NULL, NULL, NULL - }, - - { - {"extra_float_digits", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets the number of digits displayed for floating-point values."), - gettext_noop("This affects real, double precision, and geometric data types. " - "A zero or negative parameter value is added to the standard " - "number of digits (FLT_DIG or DBL_DIG as appropriate). " - "Any value greater than zero selects precise output mode.") - }, - &extra_float_digits, - 1, -15, 3, - NULL, NULL, NULL - }, - - { - {"log_min_duration_sample", PGC_SUSET, LOGGING_WHEN, - gettext_noop("Sets the minimum execution time above which " - "a sample of statements will be logged." - " Sampling is determined by \"log_statement_sample_rate\"."), - gettext_noop("-1 disables sampling. 0 means sample all statements."), - GUC_UNIT_MS - }, - &log_min_duration_sample, - -1, -1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"log_min_duration_statement", PGC_SUSET, LOGGING_WHEN, - gettext_noop("Sets the minimum execution time above which " - "all statements will be logged."), - gettext_noop("-1 disables logging statement durations. 0 means log all statement durations."), - GUC_UNIT_MS - }, - &log_min_duration_statement, - -1, -1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"log_autovacuum_min_duration", PGC_SIGHUP, LOGGING_WHAT, - gettext_noop("Sets the minimum execution time above which " - "autovacuum actions will be logged."), - gettext_noop("-1 disables logging autovacuum actions. 0 means log all autovacuum actions."), - GUC_UNIT_MS - }, - &Log_autovacuum_min_duration, - 600000, -1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"log_parameter_max_length", PGC_SUSET, LOGGING_WHAT, - gettext_noop("Sets the maximum length in bytes of data logged for bind " - "parameter values when logging statements."), - gettext_noop("-1 means log values in full."), - GUC_UNIT_BYTE - }, - &log_parameter_max_length, - -1, -1, INT_MAX / 2, - NULL, NULL, NULL - }, - - { - {"log_parameter_max_length_on_error", PGC_USERSET, LOGGING_WHAT, - gettext_noop("Sets the maximum length in bytes of data logged for bind " - "parameter values when logging statements, on error."), - gettext_noop("-1 means log values in full."), - GUC_UNIT_BYTE - }, - &log_parameter_max_length_on_error, - 0, -1, INT_MAX / 2, - NULL, NULL, NULL - }, - - { - {"bgwriter_delay", PGC_SIGHUP, RESOURCES_BGWRITER, - gettext_noop("Background writer sleep time between rounds."), - NULL, - GUC_UNIT_MS - }, - &BgWriterDelay, - 200, 10, 10000, - NULL, NULL, NULL - }, - - { - {"bgwriter_lru_maxpages", PGC_SIGHUP, RESOURCES_BGWRITER, - gettext_noop("Background writer maximum number of LRU pages to flush per round."), - gettext_noop("0 disables background writing.") - }, - &bgwriter_lru_maxpages, - 100, 0, INT_MAX / 2, /* Same upper limit as shared_buffers */ - NULL, NULL, NULL - }, - - { - {"bgwriter_flush_after", PGC_SIGHUP, RESOURCES_BGWRITER, - gettext_noop("Number of pages after which previously performed writes are flushed to disk."), - gettext_noop("0 disables forced writeback."), - GUC_UNIT_BLOCKS - }, - &bgwriter_flush_after, - DEFAULT_BGWRITER_FLUSH_AFTER, 0, WRITEBACK_MAX_PENDING_FLUSHES, - NULL, NULL, NULL - }, - - { - {"effective_io_concurrency", - PGC_USERSET, - RESOURCES_IO, - gettext_noop("Number of simultaneous requests that can be handled efficiently by the disk subsystem."), - gettext_noop("0 disables simultaneous requests."), - GUC_EXPLAIN - }, - &effective_io_concurrency, - DEFAULT_EFFECTIVE_IO_CONCURRENCY, - 0, MAX_IO_CONCURRENCY, - NULL, NULL, NULL - }, - - { - {"maintenance_io_concurrency", - PGC_USERSET, - RESOURCES_IO, - gettext_noop("A variant of \"effective_io_concurrency\" that is used for maintenance work."), - gettext_noop("0 disables simultaneous requests."), - GUC_EXPLAIN - }, - &maintenance_io_concurrency, - DEFAULT_MAINTENANCE_IO_CONCURRENCY, - 0, MAX_IO_CONCURRENCY, - NULL, assign_maintenance_io_concurrency, - NULL - }, - - { - {"io_max_combine_limit", - PGC_POSTMASTER, - RESOURCES_IO, - gettext_noop("Server-wide limit that clamps io_combine_limit."), - NULL, - GUC_UNIT_BLOCKS - }, - &io_max_combine_limit, - DEFAULT_IO_COMBINE_LIMIT, - 1, MAX_IO_COMBINE_LIMIT, - NULL, assign_io_max_combine_limit, NULL - }, - - { - {"io_combine_limit", - PGC_USERSET, - RESOURCES_IO, - gettext_noop("Limit on the size of data reads and writes."), - NULL, - GUC_UNIT_BLOCKS - }, - &io_combine_limit_guc, - DEFAULT_IO_COMBINE_LIMIT, - 1, MAX_IO_COMBINE_LIMIT, - NULL, assign_io_combine_limit, NULL - }, - - { - {"io_max_concurrency", - PGC_POSTMASTER, - RESOURCES_IO, - gettext_noop("Max number of IOs that one process can execute simultaneously."), - NULL, - }, - &io_max_concurrency, - -1, -1, 1024, - check_io_max_concurrency, NULL, NULL - }, - - { - {"io_workers", - PGC_SIGHUP, - RESOURCES_IO, - gettext_noop("Number of IO worker processes, for io_method=worker."), - NULL, - }, - &io_workers, - 3, 1, MAX_IO_WORKERS, - NULL, NULL, NULL - }, - - { - {"backend_flush_after", PGC_USERSET, RESOURCES_IO, - gettext_noop("Number of pages after which previously performed writes are flushed to disk."), - gettext_noop("0 disables forced writeback."), - GUC_UNIT_BLOCKS - }, - &backend_flush_after, - DEFAULT_BACKEND_FLUSH_AFTER, 0, WRITEBACK_MAX_PENDING_FLUSHES, - NULL, NULL, NULL - }, - - { - {"max_worker_processes", - PGC_POSTMASTER, - RESOURCES_WORKER_PROCESSES, - gettext_noop("Maximum number of concurrent worker processes."), - NULL, - }, - &max_worker_processes, - 8, 0, MAX_BACKENDS, - NULL, NULL, NULL - }, - - { - {"max_logical_replication_workers", - PGC_POSTMASTER, - REPLICATION_SUBSCRIBERS, - gettext_noop("Maximum number of logical replication worker processes."), - NULL, - }, - &max_logical_replication_workers, - 4, 0, MAX_BACKENDS, - NULL, NULL, NULL - }, - - { - {"max_sync_workers_per_subscription", - PGC_SIGHUP, - REPLICATION_SUBSCRIBERS, - gettext_noop("Maximum number of table synchronization workers per subscription."), - NULL, - }, - &max_sync_workers_per_subscription, - 2, 0, MAX_BACKENDS, - NULL, NULL, NULL - }, - - { - {"max_parallel_apply_workers_per_subscription", - PGC_SIGHUP, - REPLICATION_SUBSCRIBERS, - gettext_noop("Maximum number of parallel apply workers per subscription."), - NULL, - }, - &max_parallel_apply_workers_per_subscription, - 2, 0, MAX_PARALLEL_WORKER_LIMIT, - NULL, NULL, NULL - }, - - { - {"max_active_replication_origins", - PGC_POSTMASTER, - REPLICATION_SUBSCRIBERS, - gettext_noop("Sets the maximum number of active replication origins."), - NULL - }, - &max_active_replication_origins, - 10, 0, MAX_BACKENDS, - NULL, NULL, NULL - }, - - { - {"log_rotation_age", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Sets the amount of time to wait before forcing " - "log file rotation."), - gettext_noop("0 disables time-based creation of new log files."), - GUC_UNIT_MIN - }, - &Log_RotationAge, - HOURS_PER_DAY * MINS_PER_HOUR, 0, INT_MAX / SECS_PER_MINUTE, - NULL, NULL, NULL - }, - - { - {"log_rotation_size", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Sets the maximum size a log file can reach before " - "being rotated."), - gettext_noop("0 disables size-based creation of new log files."), - GUC_UNIT_KB - }, - &Log_RotationSize, - 10 * 1024, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"max_function_args", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the maximum number of function arguments."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &max_function_args, - FUNC_MAX_ARGS, FUNC_MAX_ARGS, FUNC_MAX_ARGS, - NULL, NULL, NULL - }, - - { - {"max_index_keys", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the maximum number of index keys."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &max_index_keys, - INDEX_MAX_KEYS, INDEX_MAX_KEYS, INDEX_MAX_KEYS, - NULL, NULL, NULL - }, - - { - {"max_identifier_length", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the maximum identifier length."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &max_identifier_length, - NAMEDATALEN - 1, NAMEDATALEN - 1, NAMEDATALEN - 1, - NULL, NULL, NULL - }, - - { - {"block_size", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the size of a disk block."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &block_size, - BLCKSZ, BLCKSZ, BLCKSZ, - NULL, NULL, NULL - }, - - { - {"segment_size", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the number of pages per disk file."), - NULL, - GUC_UNIT_BLOCKS | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &segment_size, - RELSEG_SIZE, RELSEG_SIZE, RELSEG_SIZE, - NULL, NULL, NULL - }, - - { - {"wal_block_size", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the block size in the write ahead log."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &wal_block_size, - XLOG_BLCKSZ, XLOG_BLCKSZ, XLOG_BLCKSZ, - NULL, NULL, NULL - }, - - { - {"wal_retrieve_retry_interval", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Sets the time to wait before retrying to retrieve WAL " - "after a failed attempt."), - NULL, - GUC_UNIT_MS - }, - &wal_retrieve_retry_interval, - 5000, 1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"wal_segment_size", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the size of write ahead log segments."), - NULL, - GUC_UNIT_BYTE | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_RUNTIME_COMPUTED - }, - &wal_segment_size, - DEFAULT_XLOG_SEG_SIZE, - WalSegMinSize, - WalSegMaxSize, - check_wal_segment_size, NULL, NULL - }, - - { - {"wal_summary_keep_time", PGC_SIGHUP, WAL_SUMMARIZATION, - gettext_noop("Time for which WAL summary files should be kept."), - gettext_noop("0 disables automatic summary file deletion."), - GUC_UNIT_MIN, - }, - &wal_summary_keep_time, - 10 * HOURS_PER_DAY * MINS_PER_HOUR, /* 10 days */ - 0, - INT_MAX / SECS_PER_MINUTE, - NULL, NULL, NULL - }, - - { - {"autovacuum_naptime", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Time to sleep between autovacuum runs."), - NULL, - GUC_UNIT_S - }, - &autovacuum_naptime, - 60, 1, INT_MAX / 1000, - NULL, NULL, NULL - }, - { - {"autovacuum_vacuum_threshold", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Minimum number of tuple updates or deletes prior to vacuum."), - NULL - }, - &autovacuum_vac_thresh, - 50, 0, INT_MAX, - NULL, NULL, NULL - }, - { - {"autovacuum_vacuum_max_threshold", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Maximum number of tuple updates or deletes prior to vacuum."), - gettext_noop("-1 disables the maximum threshold.") - }, - &autovacuum_vac_max_thresh, - 100000000, -1, INT_MAX, - NULL, NULL, NULL - }, - { - {"autovacuum_vacuum_insert_threshold", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Minimum number of tuple inserts prior to vacuum."), - gettext_noop("-1 disables insert vacuums.") - }, - &autovacuum_vac_ins_thresh, - 1000, -1, INT_MAX, - NULL, NULL, NULL - }, - { - {"autovacuum_analyze_threshold", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Minimum number of tuple inserts, updates, or deletes prior to analyze."), - NULL - }, - &autovacuum_anl_thresh, - 50, 0, INT_MAX, - NULL, NULL, NULL - }, - { - /* see varsup.c for why this is PGC_POSTMASTER not PGC_SIGHUP */ - {"autovacuum_freeze_max_age", PGC_POSTMASTER, VACUUM_AUTOVACUUM, - gettext_noop("Age at which to autovacuum a table to prevent transaction ID wraparound."), - NULL - }, - &autovacuum_freeze_max_age, - - /* see vacuum_failsafe_age if you change the upper-limit value. */ - 200000000, 100000, 2000000000, - NULL, NULL, NULL - }, - { - /* see multixact.c for why this is PGC_POSTMASTER not PGC_SIGHUP */ - {"autovacuum_multixact_freeze_max_age", PGC_POSTMASTER, VACUUM_AUTOVACUUM, - gettext_noop("Multixact age at which to autovacuum a table to prevent multixact wraparound."), - NULL - }, - &autovacuum_multixact_freeze_max_age, - 400000000, 10000, 2000000000, - NULL, NULL, NULL - }, - { - /* see max_connections */ - {"autovacuum_worker_slots", PGC_POSTMASTER, VACUUM_AUTOVACUUM, - gettext_noop("Sets the number of backend slots to allocate for autovacuum workers."), - NULL - }, - &autovacuum_worker_slots, - 16, 1, MAX_BACKENDS, - NULL, NULL, NULL - }, - { - {"autovacuum_max_workers", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Sets the maximum number of simultaneously running autovacuum worker processes."), - NULL - }, - &autovacuum_max_workers, - 3, 1, MAX_BACKENDS, - NULL, NULL, NULL - }, - - { - {"max_parallel_maintenance_workers", PGC_USERSET, RESOURCES_WORKER_PROCESSES, - gettext_noop("Sets the maximum number of parallel processes per maintenance operation."), - NULL - }, - &max_parallel_maintenance_workers, - 2, 0, MAX_PARALLEL_WORKER_LIMIT, - NULL, NULL, NULL - }, - - { - {"max_parallel_workers_per_gather", PGC_USERSET, RESOURCES_WORKER_PROCESSES, - gettext_noop("Sets the maximum number of parallel processes per executor node."), - NULL, - GUC_EXPLAIN - }, - &max_parallel_workers_per_gather, - 2, 0, MAX_PARALLEL_WORKER_LIMIT, - NULL, NULL, NULL - }, - - { - {"max_parallel_workers", PGC_USERSET, RESOURCES_WORKER_PROCESSES, - gettext_noop("Sets the maximum number of parallel workers that can be active at one time."), - NULL, - GUC_EXPLAIN - }, - &max_parallel_workers, - 8, 0, MAX_PARALLEL_WORKER_LIMIT, - NULL, NULL, NULL - }, - - { - {"autovacuum_work_mem", PGC_SIGHUP, RESOURCES_MEM, - gettext_noop("Sets the maximum memory to be used by each autovacuum worker process."), - gettext_noop("-1 means use \"maintenance_work_mem\"."), - GUC_UNIT_KB - }, - &autovacuum_work_mem, - -1, -1, MAX_KILOBYTES, - check_autovacuum_work_mem, NULL, NULL - }, - - { - {"tcp_keepalives_idle", PGC_USERSET, CONN_AUTH_TCP, - gettext_noop("Time between issuing TCP keepalives."), - gettext_noop("0 means use the system default."), - GUC_UNIT_S - }, - &tcp_keepalives_idle, - 0, 0, INT_MAX, - NULL, assign_tcp_keepalives_idle, show_tcp_keepalives_idle - }, - - { - {"tcp_keepalives_interval", PGC_USERSET, CONN_AUTH_TCP, - gettext_noop("Time between TCP keepalive retransmits."), - gettext_noop("0 means use the system default."), - GUC_UNIT_S - }, - &tcp_keepalives_interval, - 0, 0, INT_MAX, - NULL, assign_tcp_keepalives_interval, show_tcp_keepalives_interval - }, - - { - {"ssl_renegotiation_limit", PGC_USERSET, COMPAT_OPTIONS_PREVIOUS, - gettext_noop("SSL renegotiation is no longer supported; this can only be 0."), - NULL, - GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE, - }, - &ssl_renegotiation_limit, - 0, 0, 0, - NULL, NULL, NULL - }, - - { - {"tcp_keepalives_count", PGC_USERSET, CONN_AUTH_TCP, - gettext_noop("Maximum number of TCP keepalive retransmits."), - gettext_noop("Number of consecutive keepalive retransmits that can be " - "lost before a connection is considered dead. " - "0 means use the system default."), - }, - &tcp_keepalives_count, - 0, 0, INT_MAX, - NULL, assign_tcp_keepalives_count, show_tcp_keepalives_count - }, - - { - {"gin_fuzzy_search_limit", PGC_USERSET, CLIENT_CONN_OTHER, - gettext_noop("Sets the maximum allowed result for exact search by GIN."), - gettext_noop("0 means no limit."), - }, - &GinFuzzySearchLimit, - 0, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"effective_cache_size", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the planner's assumption about the total size of the data caches."), - gettext_noop("That is, the total size of the caches (kernel cache and shared buffers) used for PostgreSQL data files. " - "This is measured in disk pages, which are normally 8 kB each."), - GUC_UNIT_BLOCKS | GUC_EXPLAIN, - }, - &effective_cache_size, - DEFAULT_EFFECTIVE_CACHE_SIZE, 1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"min_parallel_table_scan_size", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the minimum amount of table data for a parallel scan."), - gettext_noop("If the planner estimates that it will read a number of table pages too small to reach this limit, a parallel scan will not be considered."), - GUC_UNIT_BLOCKS | GUC_EXPLAIN, - }, - &min_parallel_table_scan_size, - (8 * 1024 * 1024) / BLCKSZ, 0, INT_MAX / 3, - NULL, NULL, NULL - }, - - { - {"min_parallel_index_scan_size", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the minimum amount of index data for a parallel scan."), - gettext_noop("If the planner estimates that it will read a number of index pages too small to reach this limit, a parallel scan will not be considered."), - GUC_UNIT_BLOCKS | GUC_EXPLAIN, - }, - &min_parallel_index_scan_size, - (512 * 1024) / BLCKSZ, 0, INT_MAX / 3, - NULL, NULL, NULL - }, - - { - /* Can't be set in postgresql.conf */ - {"server_version_num", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the server version as an integer."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &server_version_num, - PG_VERSION_NUM, PG_VERSION_NUM, PG_VERSION_NUM, - NULL, NULL, NULL - }, - - { - {"log_temp_files", PGC_SUSET, LOGGING_WHAT, - gettext_noop("Log the use of temporary files larger than this number of kilobytes."), - gettext_noop("-1 disables logging temporary files. 0 means log all temporary files."), - GUC_UNIT_KB - }, - &log_temp_files, - -1, -1, INT_MAX, - NULL, NULL, NULL - }, - - { - {"track_activity_query_size", PGC_POSTMASTER, STATS_CUMULATIVE, - gettext_noop("Sets the size reserved for pg_stat_activity.query, in bytes."), - NULL, - GUC_UNIT_BYTE - }, - &pgstat_track_activity_query_size, - 1024, 100, 1048576, - NULL, NULL, NULL - }, - - { - {"gin_pending_list_limit", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the maximum size of the pending list for GIN index."), - NULL, - GUC_UNIT_KB - }, - &gin_pending_list_limit, - 4096, 64, MAX_KILOBYTES, - NULL, NULL, NULL - }, - - { - {"tcp_user_timeout", PGC_USERSET, CONN_AUTH_TCP, - gettext_noop("TCP user timeout."), - gettext_noop("0 means use the system default."), - GUC_UNIT_MS - }, - &tcp_user_timeout, - 0, 0, INT_MAX, - NULL, assign_tcp_user_timeout, show_tcp_user_timeout - }, - - { - {"huge_page_size", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("The size of huge page that should be requested."), - gettext_noop("0 means use the system default."), - GUC_UNIT_KB - }, - &huge_page_size, - 0, 0, INT_MAX, - check_huge_page_size, NULL, NULL - }, - - { - {"debug_discard_caches", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Aggressively flush system caches for debugging purposes."), - gettext_noop("0 means use normal caching behavior."), - GUC_NOT_IN_SAMPLE - }, - &debug_discard_caches, -#ifdef DISCARD_CACHES_ENABLED - /* Set default based on older compile-time-only cache clobber macros */ -#if defined(CLOBBER_CACHE_RECURSIVELY) - 3, -#elif defined(CLOBBER_CACHE_ALWAYS) - 1, -#else - 0, -#endif - 0, 5, -#else /* not DISCARD_CACHES_ENABLED */ - 0, 0, 0, -#endif /* not DISCARD_CACHES_ENABLED */ - NULL, NULL, NULL - }, - - { - {"client_connection_check_interval", PGC_USERSET, CONN_AUTH_TCP, - gettext_noop("Sets the time interval between checks for disconnection while running queries."), - gettext_noop("0 disables connection checks."), - GUC_UNIT_MS - }, - &client_connection_check_interval, - 0, 0, INT_MAX, - check_client_connection_check_interval, NULL, NULL - }, - - { - {"log_startup_progress_interval", PGC_SIGHUP, LOGGING_WHEN, - gettext_noop("Time between progress updates for " - "long-running startup operations."), - gettext_noop("0 disables progress updates."), - GUC_UNIT_MS, - }, - &log_startup_progress_interval, - 10000, 0, INT_MAX, - NULL, NULL, NULL - }, - - { - {"scram_iterations", PGC_USERSET, CONN_AUTH_AUTH, - gettext_noop("Sets the iteration count for SCRAM secret generation."), - NULL, - GUC_REPORT - }, - &scram_sha_256_iterations, - SCRAM_SHA_256_DEFAULT_ITERATIONS, 1, INT_MAX, - NULL, NULL, NULL - }, - - /* End-of-list marker */ - { - {NULL, 0, 0, NULL, NULL}, NULL, 0, 0, 0, NULL, NULL, NULL - } -}; - - -struct config_real ConfigureNamesReal[] = -{ - { - {"seq_page_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the planner's estimate of the cost of a " - "sequentially fetched disk page."), - NULL, - GUC_EXPLAIN - }, - &seq_page_cost, - DEFAULT_SEQ_PAGE_COST, 0, DBL_MAX, - NULL, NULL, NULL - }, - { - {"random_page_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the planner's estimate of the cost of a " - "nonsequentially fetched disk page."), - NULL, - GUC_EXPLAIN - }, - &random_page_cost, - DEFAULT_RANDOM_PAGE_COST, 0, DBL_MAX, - NULL, NULL, NULL - }, - { - {"cpu_tuple_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the planner's estimate of the cost of " - "processing each tuple (row)."), - NULL, - GUC_EXPLAIN - }, - &cpu_tuple_cost, - DEFAULT_CPU_TUPLE_COST, 0, DBL_MAX, - NULL, NULL, NULL - }, - { - {"cpu_index_tuple_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the planner's estimate of the cost of " - "processing each index entry during an index scan."), - NULL, - GUC_EXPLAIN - }, - &cpu_index_tuple_cost, - DEFAULT_CPU_INDEX_TUPLE_COST, 0, DBL_MAX, - NULL, NULL, NULL - }, - { - {"cpu_operator_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the planner's estimate of the cost of " - "processing each operator or function call."), - NULL, - GUC_EXPLAIN - }, - &cpu_operator_cost, - DEFAULT_CPU_OPERATOR_COST, 0, DBL_MAX, - NULL, NULL, NULL - }, - { - {"parallel_tuple_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the planner's estimate of the cost of " - "passing each tuple (row) from worker to leader backend."), - NULL, - GUC_EXPLAIN - }, - ¶llel_tuple_cost, - DEFAULT_PARALLEL_TUPLE_COST, 0, DBL_MAX, - NULL, NULL, NULL - }, - { - {"parallel_setup_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Sets the planner's estimate of the cost of " - "starting up worker processes for parallel query."), - NULL, - GUC_EXPLAIN - }, - ¶llel_setup_cost, - DEFAULT_PARALLEL_SETUP_COST, 0, DBL_MAX, - NULL, NULL, NULL - }, - - { - {"jit_above_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Perform JIT compilation if query is more expensive."), - gettext_noop("-1 disables JIT compilation."), - GUC_EXPLAIN - }, - &jit_above_cost, - 100000, -1, DBL_MAX, - NULL, NULL, NULL - }, - - { - {"jit_optimize_above_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Optimize JIT-compiled functions if query is more expensive."), - gettext_noop("-1 disables optimization."), - GUC_EXPLAIN - }, - &jit_optimize_above_cost, - 500000, -1, DBL_MAX, - NULL, NULL, NULL - }, - - { - {"jit_inline_above_cost", PGC_USERSET, QUERY_TUNING_COST, - gettext_noop("Perform JIT inlining if query is more expensive."), - gettext_noop("-1 disables inlining."), - GUC_EXPLAIN - }, - &jit_inline_above_cost, - 500000, -1, DBL_MAX, - NULL, NULL, NULL - }, - - { - {"cursor_tuple_fraction", PGC_USERSET, QUERY_TUNING_OTHER, - gettext_noop("Sets the planner's estimate of the fraction of " - "a cursor's rows that will be retrieved."), - NULL, - GUC_EXPLAIN - }, - &cursor_tuple_fraction, - DEFAULT_CURSOR_TUPLE_FRACTION, 0.0, 1.0, - NULL, NULL, NULL - }, - - { - {"recursive_worktable_factor", PGC_USERSET, QUERY_TUNING_OTHER, - gettext_noop("Sets the planner's estimate of the average size " - "of a recursive query's working table."), - NULL, - GUC_EXPLAIN - }, - &recursive_worktable_factor, - DEFAULT_RECURSIVE_WORKTABLE_FACTOR, 0.001, 1000000.0, - NULL, NULL, NULL - }, - - { - {"geqo_selection_bias", PGC_USERSET, QUERY_TUNING_GEQO, - gettext_noop("GEQO: selective pressure within the population."), - NULL, - GUC_EXPLAIN - }, - &Geqo_selection_bias, - DEFAULT_GEQO_SELECTION_BIAS, - MIN_GEQO_SELECTION_BIAS, MAX_GEQO_SELECTION_BIAS, - NULL, NULL, NULL - }, - { - {"geqo_seed", PGC_USERSET, QUERY_TUNING_GEQO, - gettext_noop("GEQO: seed for random path selection."), - NULL, - GUC_EXPLAIN - }, - &Geqo_seed, - 0.0, 0.0, 1.0, - NULL, NULL, NULL - }, - - { - {"hash_mem_multiplier", PGC_USERSET, RESOURCES_MEM, - gettext_noop("Multiple of \"work_mem\" to use for hash tables."), - NULL, - GUC_EXPLAIN - }, - &hash_mem_multiplier, - 2.0, 1.0, 1000.0, - NULL, NULL, NULL - }, - - { - {"bgwriter_lru_multiplier", PGC_SIGHUP, RESOURCES_BGWRITER, - gettext_noop("Multiple of the average buffer usage to free per round."), - NULL - }, - &bgwriter_lru_multiplier, - 2.0, 0.0, 10.0, - NULL, NULL, NULL - }, - - { - {"seed", PGC_USERSET, UNGROUPED, - gettext_noop("Sets the seed for random-number generation."), - NULL, - GUC_NO_SHOW_ALL | GUC_NO_RESET | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &phony_random_seed, - 0.0, -1.0, 1.0, - check_random_seed, assign_random_seed, show_random_seed - }, - - { - {"vacuum_cost_delay", PGC_USERSET, VACUUM_COST_DELAY, - gettext_noop("Vacuum cost delay in milliseconds."), - NULL, - GUC_UNIT_MS - }, - &VacuumCostDelay, - 0, 0, 100, - NULL, NULL, NULL - }, - - { - {"autovacuum_vacuum_cost_delay", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Vacuum cost delay in milliseconds, for autovacuum."), - gettext_noop("-1 means use \"vacuum_cost_delay\"."), - GUC_UNIT_MS - }, - &autovacuum_vac_cost_delay, - 2, -1, 100, - NULL, NULL, NULL - }, - - { - {"autovacuum_vacuum_scale_factor", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Number of tuple updates or deletes prior to vacuum as a fraction of reltuples."), - NULL - }, - &autovacuum_vac_scale, - 0.2, 0.0, 100.0, - NULL, NULL, NULL - }, - - { - {"autovacuum_vacuum_insert_scale_factor", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Number of tuple inserts prior to vacuum as a fraction of reltuples."), - NULL - }, - &autovacuum_vac_ins_scale, - 0.2, 0.0, 100.0, - NULL, NULL, NULL - }, - - { - {"autovacuum_analyze_scale_factor", PGC_SIGHUP, VACUUM_AUTOVACUUM, - gettext_noop("Number of tuple inserts, updates, or deletes prior to analyze as a fraction of reltuples."), - NULL - }, - &autovacuum_anl_scale, - 0.1, 0.0, 100.0, - NULL, NULL, NULL - }, - - { - {"checkpoint_completion_target", PGC_SIGHUP, WAL_CHECKPOINTS, - gettext_noop("Time spent flushing dirty buffers during checkpoint, as fraction of checkpoint interval."), - NULL - }, - &CheckPointCompletionTarget, - 0.9, 0.0, 1.0, - NULL, assign_checkpoint_completion_target, NULL - }, - - { - {"log_statement_sample_rate", PGC_SUSET, LOGGING_WHEN, - gettext_noop("Fraction of statements exceeding \"log_min_duration_sample\" to be logged."), - gettext_noop("Use a value between 0.0 (never log) and 1.0 (always log).") - }, - &log_statement_sample_rate, - 1.0, 0.0, 1.0, - NULL, NULL, NULL - }, - - { - {"log_transaction_sample_rate", PGC_SUSET, LOGGING_WHEN, - gettext_noop("Sets the fraction of transactions from which to log all statements."), - gettext_noop("Use a value between 0.0 (never log) and 1.0 (log all " - "statements for all transactions).") - }, - &log_xact_sample_rate, - 0.0, 0.0, 1.0, - NULL, NULL, NULL - }, - - { - {"vacuum_max_eager_freeze_failure_rate", PGC_USERSET, VACUUM_FREEZING, - gettext_noop("Fraction of pages in a relation vacuum can scan and fail to freeze before disabling eager scanning."), - gettext_noop("A value of 0.0 disables eager scanning and a value of 1.0 will eagerly scan up to 100 percent of the all-visible pages in the relation. If vacuum successfully freezes these pages, the cap is lower than 100 percent, because the goal is to amortize page freezing across multiple vacuums.") - }, - &vacuum_max_eager_freeze_failure_rate, - 0.03, 0.0, 1.0, - NULL, NULL, NULL - }, - - /* End-of-list marker */ - { - {NULL, 0, 0, NULL, NULL}, NULL, 0.0, 0.0, 0.0, NULL, NULL, NULL - } -}; - - -struct config_string ConfigureNamesString[] = -{ - { - {"archive_command", PGC_SIGHUP, WAL_ARCHIVING, - gettext_noop("Sets the shell command that will be called to archive a WAL file."), - gettext_noop("An empty string means use \"archive_library\".") - }, - &XLogArchiveCommand, - "", - NULL, NULL, show_archive_command - }, - - { - {"archive_library", PGC_SIGHUP, WAL_ARCHIVING, - gettext_noop("Sets the library that will be called to archive a WAL file."), - gettext_noop("An empty string means use \"archive_command\".") - }, - &XLogArchiveLibrary, - "", - NULL, NULL, NULL - }, - - { - {"restore_command", PGC_SIGHUP, WAL_ARCHIVE_RECOVERY, - gettext_noop("Sets the shell command that will be called to retrieve an archived WAL file."), - NULL - }, - &recoveryRestoreCommand, - "", - NULL, NULL, NULL - }, - - { - {"archive_cleanup_command", PGC_SIGHUP, WAL_ARCHIVE_RECOVERY, - gettext_noop("Sets the shell command that will be executed at every restart point."), - NULL - }, - &archiveCleanupCommand, - "", - NULL, NULL, NULL - }, - - { - {"recovery_end_command", PGC_SIGHUP, WAL_ARCHIVE_RECOVERY, - gettext_noop("Sets the shell command that will be executed once at the end of recovery."), - NULL - }, - &recoveryEndCommand, - "", - NULL, NULL, NULL - }, - - { - {"recovery_target_timeline", PGC_POSTMASTER, WAL_RECOVERY_TARGET, - gettext_noop("Specifies the timeline to recover into."), - NULL - }, - &recovery_target_timeline_string, - "latest", - check_recovery_target_timeline, assign_recovery_target_timeline, NULL - }, - - { - {"recovery_target", PGC_POSTMASTER, WAL_RECOVERY_TARGET, - gettext_noop("Set to \"immediate\" to end recovery as soon as a consistent state is reached."), - NULL - }, - &recovery_target_string, - "", - check_recovery_target, assign_recovery_target, NULL - }, - { - {"recovery_target_xid", PGC_POSTMASTER, WAL_RECOVERY_TARGET, - gettext_noop("Sets the transaction ID up to which recovery will proceed."), - NULL - }, - &recovery_target_xid_string, - "", - check_recovery_target_xid, assign_recovery_target_xid, NULL - }, - { - {"recovery_target_time", PGC_POSTMASTER, WAL_RECOVERY_TARGET, - gettext_noop("Sets the time stamp up to which recovery will proceed."), - NULL - }, - &recovery_target_time_string, - "", - check_recovery_target_time, assign_recovery_target_time, NULL - }, - { - {"recovery_target_name", PGC_POSTMASTER, WAL_RECOVERY_TARGET, - gettext_noop("Sets the named restore point up to which recovery will proceed."), - NULL - }, - &recovery_target_name_string, - "", - check_recovery_target_name, assign_recovery_target_name, NULL - }, - { - {"recovery_target_lsn", PGC_POSTMASTER, WAL_RECOVERY_TARGET, - gettext_noop("Sets the LSN of the write-ahead log location up to which recovery will proceed."), - NULL - }, - &recovery_target_lsn_string, - "", - check_recovery_target_lsn, assign_recovery_target_lsn, NULL - }, - - { - {"primary_conninfo", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Sets the connection string to be used to connect to the sending server."), - NULL, - GUC_SUPERUSER_ONLY - }, - &PrimaryConnInfo, - "", - NULL, NULL, NULL - }, - - { - {"primary_slot_name", PGC_SIGHUP, REPLICATION_STANDBY, - gettext_noop("Sets the name of the replication slot to use on the sending server."), - NULL - }, - &PrimarySlotName, - "", - check_primary_slot_name, NULL, NULL - }, - - { - {"client_encoding", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets the client's character set encoding."), - NULL, - GUC_IS_NAME | GUC_REPORT - }, - &client_encoding_string, - "SQL_ASCII", - check_client_encoding, assign_client_encoding, NULL - }, - - { - {"log_line_prefix", PGC_SIGHUP, LOGGING_WHAT, - gettext_noop("Controls information prefixed to each log line."), - gettext_noop("An empty string means no prefix.") - }, - &Log_line_prefix, - "%m [%p] ", - NULL, NULL, NULL - }, - - { - {"log_timezone", PGC_SIGHUP, LOGGING_WHAT, - gettext_noop("Sets the time zone to use in log messages."), - NULL - }, - &log_timezone_string, - "GMT", - check_log_timezone, assign_log_timezone, show_log_timezone - }, - - { - {"DateStyle", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets the display format for date and time values."), - gettext_noop("Also controls interpretation of ambiguous " - "date inputs."), - GUC_LIST_INPUT | GUC_REPORT - }, - &datestyle_string, - "ISO, MDY", - check_datestyle, assign_datestyle, NULL - }, - - { - {"default_table_access_method", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the default table access method for new tables."), - NULL, - GUC_IS_NAME - }, - &default_table_access_method, - DEFAULT_TABLE_ACCESS_METHOD, - check_default_table_access_method, NULL, NULL - }, - - { - {"default_tablespace", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the default tablespace to create tables and indexes in."), - gettext_noop("An empty string means use the database's default tablespace."), - GUC_IS_NAME - }, - &default_tablespace, - "", - check_default_tablespace, NULL, NULL - }, - - { - {"temp_tablespaces", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the tablespace(s) to use for temporary tables and sort files."), - gettext_noop("An empty string means use the database's default tablespace."), - GUC_LIST_INPUT | GUC_LIST_QUOTE - }, - &temp_tablespaces, - "", - check_temp_tablespaces, assign_temp_tablespaces, NULL - }, - - { - {"createrole_self_grant", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets whether a CREATEROLE user automatically grants " - "the role to themselves, and with which options."), - gettext_noop("An empty string disables automatic self grants."), - GUC_LIST_INPUT - }, - &createrole_self_grant, - "", - check_createrole_self_grant, assign_createrole_self_grant, NULL - }, - - { - {"dynamic_library_path", PGC_SUSET, CLIENT_CONN_OTHER, - gettext_noop("Sets the path for dynamically loadable modules."), - gettext_noop("If a dynamically loadable module needs to be opened and " - "the specified name does not have a directory component (i.e., the " - "name does not contain a slash), the system will search this path for " - "the specified file."), - GUC_SUPERUSER_ONLY - }, - &Dynamic_library_path, - "$libdir", - NULL, NULL, NULL - }, - - { - {"extension_control_path", PGC_SUSET, CLIENT_CONN_OTHER, - gettext_noop("Sets the path for extension control files."), - gettext_noop("The remaining extension script and secondary control files are then loaded " - "from the same directory where the primary control file was found."), - GUC_SUPERUSER_ONLY - }, - &Extension_control_path, - "$system", - NULL, NULL, NULL - }, - - { - {"krb_server_keyfile", PGC_SIGHUP, CONN_AUTH_AUTH, - gettext_noop("Sets the location of the Kerberos server key file."), - NULL, - GUC_SUPERUSER_ONLY - }, - &pg_krb_server_keyfile, - PG_KRB_SRVTAB, - NULL, NULL, NULL - }, - - { - {"bonjour_name", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Sets the Bonjour service name."), - gettext_noop("An empty string means use the computer name.") - }, - &bonjour_name, - "", - NULL, NULL, NULL - }, - - { - {"lc_messages", PGC_SUSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets the language in which messages are displayed."), - gettext_noop("An empty string means use the operating system setting.") - }, - &locale_messages, - "", - check_locale_messages, assign_locale_messages, NULL - }, - - { - {"lc_monetary", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets the locale for formatting monetary amounts."), - gettext_noop("An empty string means use the operating system setting.") - }, - &locale_monetary, - "C", - check_locale_monetary, assign_locale_monetary, NULL - }, - - { - {"lc_numeric", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets the locale for formatting numbers."), - gettext_noop("An empty string means use the operating system setting.") - }, - &locale_numeric, - "C", - check_locale_numeric, assign_locale_numeric, NULL - }, - - { - {"lc_time", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets the locale for formatting date and time values."), - gettext_noop("An empty string means use the operating system setting.") - }, - &locale_time, - "C", - check_locale_time, assign_locale_time, NULL - }, - - { - {"session_preload_libraries", PGC_SUSET, CLIENT_CONN_PRELOAD, - gettext_noop("Lists shared libraries to preload into each backend."), - NULL, - GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY - }, - &session_preload_libraries_string, - "", - NULL, NULL, NULL - }, - - { - {"shared_preload_libraries", PGC_POSTMASTER, CLIENT_CONN_PRELOAD, - gettext_noop("Lists shared libraries to preload into server."), - NULL, - GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY - }, - &shared_preload_libraries_string, - "", - NULL, NULL, NULL - }, - - { - {"local_preload_libraries", PGC_USERSET, CLIENT_CONN_PRELOAD, - gettext_noop("Lists unprivileged shared libraries to preload into each backend."), - NULL, - GUC_LIST_INPUT | GUC_LIST_QUOTE - }, - &local_preload_libraries_string, - "", - NULL, NULL, NULL - }, - - { - {"search_path", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the schema search order for names that are not schema-qualified."), - NULL, - GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_EXPLAIN | GUC_REPORT - }, - &namespace_search_path, - "\"$user\", public", - check_search_path, assign_search_path, NULL - }, - - { - /* Can't be set in postgresql.conf */ - {"server_encoding", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the server (database) character set encoding."), - NULL, - GUC_IS_NAME | GUC_REPORT | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &server_encoding_string, - "SQL_ASCII", - NULL, NULL, NULL - }, - - { - /* Can't be set in postgresql.conf */ - {"server_version", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the server version."), - NULL, - GUC_REPORT | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &server_version_string, - PG_VERSION, - NULL, NULL, NULL - }, - - { - /* Not for general use --- used by SET ROLE */ - {"role", PGC_USERSET, UNGROUPED, - gettext_noop("Sets the current role."), - NULL, - GUC_IS_NAME | GUC_NO_SHOW_ALL | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_NOT_WHILE_SEC_REST - }, - &role_string, - "none", - check_role, assign_role, show_role - }, - - { - /* Not for general use --- used by SET SESSION AUTHORIZATION */ - {"session_authorization", PGC_USERSET, UNGROUPED, - gettext_noop("Sets the session user name."), - NULL, - GUC_IS_NAME | GUC_REPORT | GUC_NO_SHOW_ALL | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE | GUC_NOT_WHILE_SEC_REST - }, - &session_authorization_string, - NULL, - check_session_authorization, assign_session_authorization, NULL - }, - - { - {"log_destination", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Sets the destination for server log output."), - gettext_noop("Valid values are combinations of \"stderr\", " - "\"syslog\", \"csvlog\", \"jsonlog\", and \"eventlog\", " - "depending on the platform."), - GUC_LIST_INPUT - }, - &Log_destination_string, - "stderr", - check_log_destination, assign_log_destination, NULL - }, - { - {"log_directory", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Sets the destination directory for log files."), - gettext_noop("Can be specified as relative to the data directory " - "or as absolute path."), - GUC_SUPERUSER_ONLY - }, - &Log_directory, - "log", - check_canonical_path, NULL, NULL - }, - { - {"log_filename", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Sets the file name pattern for log files."), - NULL, - GUC_SUPERUSER_ONLY - }, - &Log_filename, - "postgresql-%Y-%m-%d_%H%M%S.log", - NULL, NULL, NULL - }, - - { - {"syslog_ident", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Sets the program name used to identify PostgreSQL " - "messages in syslog."), - NULL - }, - &syslog_ident_str, - "postgres", - NULL, assign_syslog_ident, NULL - }, - - { - {"event_source", PGC_POSTMASTER, LOGGING_WHERE, - gettext_noop("Sets the application name used to identify " - "PostgreSQL messages in the event log."), - NULL - }, - &event_source, - DEFAULT_EVENT_SOURCE, - NULL, NULL, NULL - }, - - { - {"TimeZone", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets the time zone for displaying and interpreting time stamps."), - NULL, - GUC_REPORT - }, - &timezone_string, - "GMT", - check_timezone, assign_timezone, show_timezone - }, - { - {"timezone_abbreviations", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Selects a file of time zone abbreviations."), - NULL - }, - &timezone_abbreviations_string, - NULL, - check_timezone_abbreviations, assign_timezone_abbreviations, NULL - }, - - { - {"unix_socket_group", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Sets the owning group of the Unix-domain socket."), - gettext_noop("The owning user of the socket is always the user that starts the server. " - "An empty string means use the user's default group.") - }, - &Unix_socket_group, - "", - NULL, NULL, NULL - }, - - { - {"unix_socket_directories", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Sets the directories where Unix-domain sockets will be created."), - NULL, - GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY - }, - &Unix_socket_directories, - DEFAULT_PGSOCKET_DIR, - NULL, NULL, NULL - }, - - { - {"listen_addresses", PGC_POSTMASTER, CONN_AUTH_SETTINGS, - gettext_noop("Sets the host name or IP address(es) to listen to."), - NULL, - GUC_LIST_INPUT - }, - &ListenAddresses, - "localhost", - NULL, NULL, NULL - }, - - { - /* - * Can't be set by ALTER SYSTEM as it can lead to recursive definition - * of data_directory. - */ - {"data_directory", PGC_POSTMASTER, FILE_LOCATIONS, - gettext_noop("Sets the server's data directory."), - NULL, - GUC_SUPERUSER_ONLY | GUC_DISALLOW_IN_AUTO_FILE - }, - &data_directory, - NULL, - NULL, NULL, NULL - }, - - { - {"config_file", PGC_POSTMASTER, FILE_LOCATIONS, - gettext_noop("Sets the server's main configuration file."), - NULL, - GUC_DISALLOW_IN_FILE | GUC_SUPERUSER_ONLY - }, - &ConfigFileName, - NULL, - NULL, NULL, NULL - }, - - { - {"hba_file", PGC_POSTMASTER, FILE_LOCATIONS, - gettext_noop("Sets the server's \"hba\" configuration file."), - NULL, - GUC_SUPERUSER_ONLY - }, - &HbaFileName, - NULL, - NULL, NULL, NULL - }, - - { - {"ident_file", PGC_POSTMASTER, FILE_LOCATIONS, - gettext_noop("Sets the server's \"ident\" configuration file."), - NULL, - GUC_SUPERUSER_ONLY - }, - &IdentFileName, - NULL, - NULL, NULL, NULL - }, - - { - {"external_pid_file", PGC_POSTMASTER, FILE_LOCATIONS, - gettext_noop("Writes the postmaster PID to the specified file."), - NULL, - GUC_SUPERUSER_ONLY - }, - &external_pid_file, - NULL, - check_canonical_path, NULL, NULL - }, - - { - {"ssl_library", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Shows the name of the SSL library."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &ssl_library, -#ifdef USE_SSL - "OpenSSL", -#else - "", -#endif - NULL, NULL, NULL - }, - - { - {"ssl_cert_file", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Location of the SSL server certificate file."), - NULL - }, - &ssl_cert_file, - "server.crt", - NULL, NULL, NULL - }, - - { - {"ssl_key_file", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Location of the SSL server private key file."), - NULL - }, - &ssl_key_file, - "server.key", - NULL, NULL, NULL - }, - - { - {"ssl_ca_file", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Location of the SSL certificate authority file."), - NULL - }, - &ssl_ca_file, - "", - NULL, NULL, NULL - }, - - { - {"ssl_crl_file", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Location of the SSL certificate revocation list file."), - NULL - }, - &ssl_crl_file, - "", - NULL, NULL, NULL - }, - - { - {"ssl_crl_dir", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Location of the SSL certificate revocation list directory."), - NULL - }, - &ssl_crl_dir, - "", - NULL, NULL, NULL - }, - - { - {"synchronous_standby_names", PGC_SIGHUP, REPLICATION_PRIMARY, - gettext_noop("Number of synchronous standbys and list of names of potential synchronous ones."), - NULL, - GUC_LIST_INPUT - }, - &SyncRepStandbyNames, - "", - check_synchronous_standby_names, assign_synchronous_standby_names, NULL - }, - - { - {"default_text_search_config", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets default text search configuration."), - NULL - }, - &TSCurrentConfig, - "pg_catalog.simple", - check_default_text_search_config, assign_default_text_search_config, NULL - }, - - { - {"ssl_tls13_ciphers", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Sets the list of allowed TLSv1.3 cipher suites."), - gettext_noop("An empty string means use the default cipher suites."), - GUC_SUPERUSER_ONLY - }, - &SSLCipherSuites, - "", - NULL, NULL, NULL - }, - - { - {"ssl_ciphers", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Sets the list of allowed TLSv1.2 (and lower) ciphers."), - NULL, - GUC_SUPERUSER_ONLY - }, - &SSLCipherList, -#ifdef USE_OPENSSL - "HIGH:MEDIUM:+3DES:!aNULL", -#else - "none", -#endif - NULL, NULL, NULL - }, - - { - {"ssl_groups", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Sets the group(s) to use for Diffie-Hellman key exchange."), - gettext_noop("Multiple groups can be specified using a colon-separated list."), - GUC_SUPERUSER_ONLY - }, - &SSLECDHCurve, -#ifdef USE_SSL - "X25519:prime256v1", -#else - "none", -#endif - NULL, NULL, NULL - }, - - { - {"ssl_dh_params_file", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Location of the SSL DH parameters file."), - gettext_noop("An empty string means use compiled-in default parameters."), - GUC_SUPERUSER_ONLY - }, - &ssl_dh_params_file, - "", - NULL, NULL, NULL - }, - - { - {"ssl_passphrase_command", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Command to obtain passphrases for SSL."), - gettext_noop("An empty string means use the built-in prompting mechanism."), - GUC_SUPERUSER_ONLY - }, - &ssl_passphrase_command, - "", - NULL, NULL, NULL - }, - - { - {"application_name", PGC_USERSET, LOGGING_WHAT, - gettext_noop("Sets the application name to be reported in statistics and logs."), - NULL, - GUC_IS_NAME | GUC_REPORT | GUC_NOT_IN_SAMPLE - }, - &application_name, - "", - check_application_name, assign_application_name, NULL - }, - - { - {"cluster_name", PGC_POSTMASTER, PROCESS_TITLE, - gettext_noop("Sets the name of the cluster, which is included in the process title."), - NULL, - GUC_IS_NAME - }, - &cluster_name, - "", - check_cluster_name, NULL, NULL - }, - - { - {"wal_consistency_checking", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Sets the WAL resource managers for which WAL consistency checks are done."), - gettext_noop("Full-page images will be logged for all data blocks and cross-checked against the results of WAL replay."), - GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE - }, - &wal_consistency_checking_string, - "", - check_wal_consistency_checking, assign_wal_consistency_checking, NULL - }, - - { - {"jit_provider", PGC_POSTMASTER, CLIENT_CONN_PRELOAD, - gettext_noop("JIT provider to use."), - NULL, - GUC_SUPERUSER_ONLY - }, - &jit_provider, - "llvmjit", - NULL, NULL, NULL - }, - - { - {"backtrace_functions", PGC_SUSET, DEVELOPER_OPTIONS, - gettext_noop("Log backtrace for errors in these functions."), - NULL, - GUC_NOT_IN_SAMPLE - }, - &backtrace_functions, - "", - check_backtrace_functions, assign_backtrace_functions, NULL - }, - - { - {"debug_io_direct", PGC_POSTMASTER, DEVELOPER_OPTIONS, - gettext_noop("Use direct I/O for file access."), - gettext_noop("An empty string disables direct I/O."), - GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE - }, - &debug_io_direct_string, - "", - check_debug_io_direct, assign_debug_io_direct, NULL - }, - - { - {"synchronized_standby_slots", PGC_SIGHUP, REPLICATION_PRIMARY, - gettext_noop("Lists streaming replication standby server replication slot " - "names that logical WAL sender processes will wait for."), - gettext_noop("Logical WAL sender processes will send decoded " - "changes to output plugins only after the specified " - "replication slots have confirmed receiving WAL."), - GUC_LIST_INPUT - }, - &synchronized_standby_slots, - "", - check_synchronized_standby_slots, assign_synchronized_standby_slots, NULL - }, - - { - {"restrict_nonsystem_relation_kind", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Prohibits access to non-system relations of specified kinds."), - NULL, - GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE - }, - &restrict_nonsystem_relation_kind_string, - "", - check_restrict_nonsystem_relation_kind, assign_restrict_nonsystem_relation_kind, NULL - }, - - { - {"oauth_validator_libraries", PGC_SIGHUP, CONN_AUTH_AUTH, - gettext_noop("Lists libraries that may be called to validate OAuth v2 bearer tokens."), - NULL, - GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY - }, - &oauth_validator_libraries_string, - "", - NULL, NULL, NULL - }, - - { - {"log_connections", PGC_SU_BACKEND, LOGGING_WHAT, - gettext_noop("Logs specified aspects of connection establishment and setup."), - NULL, - GUC_LIST_INPUT - }, - &log_connections_string, - "", - check_log_connections, assign_log_connections, NULL - }, - - - /* End-of-list marker */ - { - {NULL, 0, 0, NULL, NULL}, NULL, NULL, NULL, NULL, NULL - } -}; - - -struct config_enum ConfigureNamesEnum[] = -{ - { - {"backslash_quote", PGC_USERSET, COMPAT_OPTIONS_PREVIOUS, - gettext_noop("Sets whether \"\\'\" is allowed in string literals."), - NULL - }, - &backslash_quote, - BACKSLASH_QUOTE_SAFE_ENCODING, backslash_quote_options, - NULL, NULL, NULL - }, - - { - {"bytea_output", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the output format for bytea."), - NULL - }, - &bytea_output, - BYTEA_OUTPUT_HEX, bytea_output_options, - NULL, NULL, NULL - }, - - { - {"client_min_messages", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the message levels that are sent to the client."), - gettext_noop("Each level includes all the levels that follow it. The later" - " the level, the fewer messages are sent.") - }, - &client_min_messages, - NOTICE, client_message_level_options, - NULL, NULL, NULL - }, - - { - {"compute_query_id", PGC_SUSET, STATS_MONITORING, - gettext_noop("Enables in-core computation of query identifiers."), - NULL - }, - &compute_query_id, - COMPUTE_QUERY_ID_AUTO, compute_query_id_options, - NULL, NULL, NULL - }, - - { - {"constraint_exclusion", PGC_USERSET, QUERY_TUNING_OTHER, - gettext_noop("Enables the planner to use constraints to optimize queries."), - gettext_noop("Table scans will be skipped if their constraints" - " guarantee that no rows match the query."), - GUC_EXPLAIN - }, - &constraint_exclusion, - CONSTRAINT_EXCLUSION_PARTITION, constraint_exclusion_options, - NULL, NULL, NULL - }, - - { - {"default_toast_compression", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the default compression method for compressible values."), - NULL - }, - &default_toast_compression, - TOAST_PGLZ_COMPRESSION, - default_toast_compression_options, - NULL, NULL, NULL - }, - - { - {"default_transaction_isolation", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the transaction isolation level of each new transaction."), - NULL - }, - &DefaultXactIsoLevel, - XACT_READ_COMMITTED, isolation_level_options, - NULL, NULL, NULL - }, - - { - {"transaction_isolation", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the current transaction's isolation level."), - NULL, - GUC_NO_RESET | GUC_NO_RESET_ALL | GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &XactIsoLevel, - XACT_READ_COMMITTED, isolation_level_options, - check_transaction_isolation, NULL, NULL - }, - - { - {"IntervalStyle", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Sets the display format for interval values."), - NULL, - GUC_REPORT - }, - &IntervalStyle, - INTSTYLE_POSTGRES, intervalstyle_options, - NULL, NULL, NULL - }, - - { - {"icu_validation_level", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Log level for reporting invalid ICU locale strings."), - NULL - }, - &icu_validation_level, - WARNING, icu_validation_level_options, - NULL, NULL, NULL - }, - - { - {"log_error_verbosity", PGC_SUSET, LOGGING_WHAT, - gettext_noop("Sets the verbosity of logged messages."), - NULL - }, - &Log_error_verbosity, - PGERROR_DEFAULT, log_error_verbosity_options, - NULL, NULL, NULL - }, - - { - {"log_min_messages", PGC_SUSET, LOGGING_WHEN, - gettext_noop("Sets the message levels that are logged."), - gettext_noop("Each level includes all the levels that follow it. The later" - " the level, the fewer messages are sent.") - }, - &log_min_messages, - WARNING, server_message_level_options, - NULL, NULL, NULL - }, - - { - {"log_min_error_statement", PGC_SUSET, LOGGING_WHEN, - gettext_noop("Causes all statements generating error at or above this level to be logged."), - gettext_noop("Each level includes all the levels that follow it. The later" - " the level, the fewer messages are sent.") - }, - &log_min_error_statement, - ERROR, server_message_level_options, - NULL, NULL, NULL - }, - - { - {"log_statement", PGC_SUSET, LOGGING_WHAT, - gettext_noop("Sets the type of statements logged."), - NULL - }, - &log_statement, - LOGSTMT_NONE, log_statement_options, - NULL, NULL, NULL - }, - - { - {"syslog_facility", PGC_SIGHUP, LOGGING_WHERE, - gettext_noop("Sets the syslog \"facility\" to be used when syslog enabled."), - NULL - }, - &syslog_facility, - DEFAULT_SYSLOG_FACILITY, - syslog_facility_options, - NULL, assign_syslog_facility, NULL - }, - - { - {"session_replication_role", PGC_SUSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets the session's behavior for triggers and rewrite rules."), - NULL - }, - &SessionReplicationRole, - SESSION_REPLICATION_ROLE_ORIGIN, session_replication_role_options, - NULL, assign_session_replication_role, NULL - }, - - { - {"synchronous_commit", PGC_USERSET, WAL_SETTINGS, - gettext_noop("Sets the current transaction's synchronization level."), - NULL - }, - &synchronous_commit, - SYNCHRONOUS_COMMIT_ON, synchronous_commit_options, - NULL, assign_synchronous_commit, NULL - }, - - { - {"archive_mode", PGC_POSTMASTER, WAL_ARCHIVING, - gettext_noop("Allows archiving of WAL files using \"archive_command\"."), - NULL - }, - &XLogArchiveMode, - ARCHIVE_MODE_OFF, archive_mode_options, - NULL, NULL, NULL - }, - - { - {"recovery_target_action", PGC_POSTMASTER, WAL_RECOVERY_TARGET, - gettext_noop("Sets the action to perform upon reaching the recovery target."), - NULL - }, - &recoveryTargetAction, - RECOVERY_TARGET_ACTION_PAUSE, recovery_target_action_options, - NULL, NULL, NULL - }, - - { - {"track_functions", PGC_SUSET, STATS_CUMULATIVE, - gettext_noop("Collects function-level statistics on database activity."), - NULL - }, - &pgstat_track_functions, - TRACK_FUNC_OFF, track_function_options, - NULL, NULL, NULL - }, - - - { - {"stats_fetch_consistency", PGC_USERSET, STATS_CUMULATIVE, - gettext_noop("Sets the consistency of accesses to statistics data."), - NULL - }, - &pgstat_fetch_consistency, - PGSTAT_FETCH_CONSISTENCY_CACHE, stats_fetch_consistency, - NULL, assign_stats_fetch_consistency, NULL - }, - - { - {"wal_compression", PGC_SUSET, WAL_SETTINGS, - gettext_noop("Compresses full-page writes written in WAL file with specified method."), - NULL - }, - &wal_compression, - WAL_COMPRESSION_NONE, wal_compression_options, - NULL, NULL, NULL - }, - - { - {"wal_level", PGC_POSTMASTER, WAL_SETTINGS, - gettext_noop("Sets the level of information written to the WAL."), - NULL - }, - &wal_level, - WAL_LEVEL_REPLICA, wal_level_options, - NULL, NULL, NULL - }, - - { - {"dynamic_shared_memory_type", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Selects the dynamic shared memory implementation used."), - NULL - }, - &dynamic_shared_memory_type, - DEFAULT_DYNAMIC_SHARED_MEMORY_TYPE, dynamic_shared_memory_options, - NULL, NULL, NULL - }, - - { - {"shared_memory_type", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Selects the shared memory implementation used for the main shared memory region."), - NULL - }, - &shared_memory_type, - DEFAULT_SHARED_MEMORY_TYPE, shared_memory_options, - NULL, NULL, NULL - }, - - { - {"file_copy_method", PGC_USERSET, RESOURCES_DISK, - gettext_noop("Selects the file copy method."), - NULL - }, - &file_copy_method, - FILE_COPY_METHOD_COPY, file_copy_method_options, - NULL, NULL, NULL - }, - - { - {"wal_sync_method", PGC_SIGHUP, WAL_SETTINGS, - gettext_noop("Selects the method used for forcing WAL updates to disk."), - NULL - }, - &wal_sync_method, - DEFAULT_WAL_SYNC_METHOD, wal_sync_method_options, - NULL, assign_wal_sync_method, NULL - }, - - { - {"xmlbinary", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets how binary values are to be encoded in XML."), - NULL - }, - &xmlbinary, - XMLBINARY_BASE64, xmlbinary_options, - NULL, NULL, NULL - }, - - { - {"xmloption", PGC_USERSET, CLIENT_CONN_STATEMENT, - gettext_noop("Sets whether XML data in implicit parsing and serialization " - "operations is to be considered as documents or content fragments."), - NULL - }, - &xmloption, - XMLOPTION_CONTENT, xmloption_options, - NULL, NULL, NULL - }, - - { - {"huge_pages", PGC_POSTMASTER, RESOURCES_MEM, - gettext_noop("Use of huge pages on Linux or Windows."), - NULL - }, - &huge_pages, - HUGE_PAGES_TRY, huge_pages_options, - NULL, NULL, NULL - }, - - { - {"huge_pages_status", PGC_INTERNAL, PRESET_OPTIONS, - gettext_noop("Indicates the status of huge pages."), - NULL, - GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE - }, - &huge_pages_status, - HUGE_PAGES_UNKNOWN, huge_pages_status_options, - NULL, NULL, NULL - }, - - { - {"recovery_prefetch", PGC_SIGHUP, WAL_RECOVERY, - gettext_noop("Prefetch referenced blocks during recovery."), - gettext_noop("Look ahead in the WAL to find references to uncached data.") - }, - &recovery_prefetch, - RECOVERY_PREFETCH_TRY, recovery_prefetch_options, - check_recovery_prefetch, assign_recovery_prefetch, NULL - }, - - { - {"debug_parallel_query", PGC_USERSET, DEVELOPER_OPTIONS, - gettext_noop("Forces the planner's use parallel query nodes."), - gettext_noop("This can be useful for testing the parallel query infrastructure " - "by forcing the planner to generate plans that contain nodes " - "that perform tuple communication between workers and the main process."), - GUC_NOT_IN_SAMPLE | GUC_EXPLAIN - }, - &debug_parallel_query, - DEBUG_PARALLEL_OFF, debug_parallel_query_options, - NULL, NULL, NULL - }, - - { - {"password_encryption", PGC_USERSET, CONN_AUTH_AUTH, - gettext_noop("Chooses the algorithm for encrypting passwords."), - NULL - }, - &Password_encryption, - PASSWORD_TYPE_SCRAM_SHA_256, password_encryption_options, - NULL, NULL, NULL - }, - - { - {"plan_cache_mode", PGC_USERSET, QUERY_TUNING_OTHER, - gettext_noop("Controls the planner's selection of custom or generic plan."), - gettext_noop("Prepared statements can have custom and generic plans, and the planner " - "will attempt to choose which is better. This can be set to override " - "the default behavior."), - GUC_EXPLAIN - }, - &plan_cache_mode, - PLAN_CACHE_MODE_AUTO, plan_cache_mode_options, - NULL, NULL, NULL - }, - - { - {"ssl_min_protocol_version", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Sets the minimum SSL/TLS protocol version to use."), - NULL, - GUC_SUPERUSER_ONLY - }, - &ssl_min_protocol_version, - PG_TLS1_2_VERSION, - ssl_protocol_versions_info + 1, /* don't allow PG_TLS_ANY */ - NULL, NULL, NULL - }, - - { - {"ssl_max_protocol_version", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Sets the maximum SSL/TLS protocol version to use."), - NULL, - GUC_SUPERUSER_ONLY - }, - &ssl_max_protocol_version, - PG_TLS_ANY, - ssl_protocol_versions_info, - NULL, NULL, NULL - }, - - { - {"recovery_init_sync_method", PGC_SIGHUP, ERROR_HANDLING_OPTIONS, - gettext_noop("Sets the method for synchronizing the data directory before crash recovery."), - }, - &recovery_init_sync_method, - DATA_DIR_SYNC_METHOD_FSYNC, recovery_init_sync_method_options, - NULL, NULL, NULL - }, - - { - {"debug_logical_replication_streaming", PGC_USERSET, DEVELOPER_OPTIONS, - gettext_noop("Forces immediate streaming or serialization of changes in large transactions."), - gettext_noop("On the publisher, it allows streaming or serializing each change in logical decoding. " - "On the subscriber, it allows serialization of all changes to files and notifies the " - "parallel apply workers to read and apply them at the end of the transaction."), - GUC_NOT_IN_SAMPLE - }, - &debug_logical_replication_streaming, - DEBUG_LOGICAL_REP_STREAMING_BUFFERED, debug_logical_replication_streaming_options, - NULL, NULL, NULL - }, - - { - {"io_method", PGC_POSTMASTER, RESOURCES_IO, - gettext_noop("Selects the method for executing asynchronous I/O."), - NULL - }, - &io_method, - DEFAULT_IO_METHOD, io_method_options, - NULL, assign_io_method, NULL - }, - - /* End-of-list marker */ - { - {NULL, 0, 0, NULL, NULL}, NULL, 0, NULL, NULL, NULL, NULL - } -}; +#include "utils/guc_tables.inc.c" diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index a9d8293474af5..26c0869356485 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -581,6 +581,7 @@ # - What to Log - +#debug_print_raw_parse = off #debug_print_parse = off #debug_print_rewritten = off #debug_print_plan = off diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c index e08b26e8c14f2..4df25944deb33 100644 --- a/src/backend/utils/misc/ps_status.c +++ b/src/backend/utils/misc/ps_status.c @@ -100,6 +100,17 @@ static void flush_ps_display(void); static int save_argc; static char **save_argv; +/* + * Valgrind seems not to consider the global "environ" variable as a valid + * root pointer; so when we allocate a new environment array, it claims that + * data is leaked. To fix that, keep our own statically-allocated copy of the + * pointer. (Oddly, this doesn't seem to be a problem for "argv".) + */ +#if defined(PS_USE_CLOBBER_ARGV) && defined(USE_VALGRIND) +extern char **ps_status_new_environ; +char **ps_status_new_environ; +#endif + /* * Call this early in startup to save the original argc/argv values. @@ -206,6 +217,11 @@ save_ps_display_args(int argc, char **argv) } new_environ[i] = NULL; environ = new_environ; + + /* See notes about Valgrind above. */ +#ifdef USE_VALGRIND + ps_status_new_environ = new_environ; +#endif } /* diff --git a/src/backend/utils/mmgr/alignedalloc.c b/src/backend/utils/mmgr/alignedalloc.c index 7eea695de62c5..b1be742691497 100644 --- a/src/backend/utils/mmgr/alignedalloc.c +++ b/src/backend/utils/mmgr/alignedalloc.c @@ -45,6 +45,15 @@ AlignedAllocFree(void *pointer) GetMemoryChunkContext(unaligned)->name, chunk); #endif + /* + * Create a dummy vchunk covering the start of the unaligned chunk, but + * not overlapping the aligned chunk. This will be freed while pfree'ing + * the unaligned chunk, keeping Valgrind happy. Then when we return to + * the outer pfree, that will clean up the vchunk for the aligned chunk. + */ + VALGRIND_MEMPOOL_ALLOC(GetMemoryChunkContext(unaligned), unaligned, + (char *) pointer - (char *) unaligned); + /* Recursively pfree the unaligned chunk */ pfree(unaligned); } @@ -123,6 +132,15 @@ AlignedAllocRealloc(void *pointer, Size size, int flags) VALGRIND_MAKE_MEM_DEFINED(pointer, old_size); memcpy(newptr, pointer, Min(size, old_size)); + /* + * Create a dummy vchunk covering the start of the old unaligned chunk, + * but not overlapping the aligned chunk. This will be freed while + * pfree'ing the old unaligned chunk, keeping Valgrind happy. Then when + * we return to repalloc, it will move the vchunk for the aligned chunk. + */ + VALGRIND_MEMPOOL_ALLOC(ctx, unaligned, + (char *) pointer - (char *) unaligned); + pfree(unaligned); return newptr; diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index 666ecd8f78d0e..9ef109ca586bd 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -103,6 +103,8 @@ #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData)) #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk) +#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(AllocSetContext)) + \ + ALLOC_BLOCKHDRSZ) typedef struct AllocBlockData *AllocBlock; /* forward reference */ @@ -458,6 +460,21 @@ AllocSetContextCreateInternal(MemoryContext parent, * we'd leak the header/initial block if we ereport in this stretch. */ + /* Create a vpool associated with the context */ + VALGRIND_CREATE_MEMPOOL(set, 0, false); + + /* + * Create a vchunk covering both the AllocSetContext struct and the keeper + * block's header. (Perhaps it would be more sensible for these to be two + * separate vchunks, but doing that seems to tickle bugs in some versions + * of Valgrind.) We must have these vchunks, and also a vchunk for each + * subsequently-added block header, so that Valgrind considers the + * pointers within them while checking for leaked memory. Note that + * Valgrind doesn't distinguish between these vchunks and those created by + * mcxt.c for the user-accessible-data chunks we allocate. + */ + VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ); + /* Fill in the initial block's block header */ block = KeeperBlock(set); block->aset = set; @@ -585,6 +602,14 @@ AllocSetReset(MemoryContext context) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, block->freeptr - ((char *) block)); #endif + + /* + * We need to free the block header's vchunk explicitly, although + * the user-data vchunks within will go away in the TRIM below. + * Otherwise Valgrind complains about leaked allocations. + */ + VALGRIND_MEMPOOL_FREE(set, block); + free(block); } block = next; @@ -592,6 +617,14 @@ AllocSetReset(MemoryContext context) Assert(context->mem_allocated == keepersize); + /* + * Instruct Valgrind to throw away all the vchunks associated with this + * context, except for the one covering the AllocSetContext and + * keeper-block header. This gets rid of the vchunks for whatever user + * data is getting discarded by the context reset. + */ + VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ); + /* Reset block size allocation sequence, too */ set->nextBlockSize = set->initBlockSize; } @@ -648,6 +681,9 @@ AllocSetDelete(MemoryContext context) freelist->first_free = (AllocSetContext *) oldset->header.nextchild; freelist->num_free--; + /* Destroy the context's vpool --- see notes below */ + VALGRIND_DESTROY_MEMPOOL(oldset); + /* All that remains is to free the header/initial block */ free(oldset); } @@ -675,13 +711,24 @@ AllocSetDelete(MemoryContext context) #endif if (!IsKeeperBlock(set, block)) + { + /* As in AllocSetReset, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(set, block); free(block); + } block = next; } Assert(context->mem_allocated == keepersize); + /* + * Destroy the vpool. We don't seem to need to explicitly free the + * initial block's header vchunk, nor any user-data vchunks that Valgrind + * still knows about; they'll all go away automatically. + */ + VALGRIND_DESTROY_MEMPOOL(set); + /* Finally, free the context header, including the keeper block */ free(set); } @@ -716,6 +763,9 @@ AllocSetAllocLarge(MemoryContext context, Size size, int flags) if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ); + context->mem_allocated += blksize; block->aset = set; @@ -922,6 +972,9 @@ AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags, if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ); + context->mem_allocated += blksize; block->aset = set; @@ -1104,6 +1157,10 @@ AllocSetFree(void *pointer) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, block->freeptr - ((char *) block)); #endif + + /* As in AllocSetReset, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(set, block); + free(block); } else @@ -1184,6 +1241,7 @@ AllocSetRealloc(void *pointer, Size size, int flags) * realloc() to make the containing block bigger, or smaller, with * minimum space wastage. */ + AllocBlock newblock; Size chksize; Size blksize; Size oldblksize; @@ -1223,14 +1281,21 @@ AllocSetRealloc(void *pointer, Size size, int flags) blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ; oldblksize = block->endptr - ((char *) block); - block = (AllocBlock) realloc(block, blksize); - if (block == NULL) + newblock = (AllocBlock) realloc(block, blksize); + if (newblock == NULL) { /* Disallow access to the chunk header. */ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ); return MemoryContextAllocationFailure(&set->header, size, flags); } + /* + * Move the block-header vchunk explicitly. (mcxt.c will take care of + * moving the vchunk for the user data.) + */ + VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ); + block = newblock; + /* updated separately, not to underflow when (oldblksize > blksize) */ set->header.mem_allocated -= oldblksize; set->header.mem_allocated += blksize; @@ -1294,7 +1359,7 @@ AllocSetRealloc(void *pointer, Size size, int flags) /* Ensure any padding bytes are marked NOACCESS. */ VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size); - /* Disallow access to the chunk header . */ + /* Disallow access to the chunk header. */ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ); return pointer; diff --git a/src/backend/utils/mmgr/bump.c b/src/backend/utils/mmgr/bump.c index f7a37d1b3e86c..2805d55a2eca9 100644 --- a/src/backend/utils/mmgr/bump.c +++ b/src/backend/utils/mmgr/bump.c @@ -45,7 +45,9 @@ #include "utils/memutils_memorychunk.h" #include "utils/memutils_internal.h" -#define Bump_BLOCKHDRSZ MAXALIGN(sizeof(BumpBlock)) +#define Bump_BLOCKHDRSZ MAXALIGN(sizeof(BumpBlock)) +#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(BumpContext)) + \ + Bump_BLOCKHDRSZ) /* No chunk header unless built with MEMORY_CONTEXT_CHECKING */ #ifdef MEMORY_CONTEXT_CHECKING @@ -189,6 +191,12 @@ BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize, * Avoid writing code that can fail between here and MemoryContextCreate; * we'd leak the header and initial block if we ereport in this stretch. */ + + /* See comments about Valgrind interactions in aset.c */ + VALGRIND_CREATE_MEMPOOL(set, 0, false); + /* This vchunk covers the BumpContext and the keeper block header */ + VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ); + dlist_init(&set->blocks); /* Fill in the initial block's block header */ @@ -262,6 +270,14 @@ BumpReset(MemoryContext context) BumpBlockFree(set, block); } + /* + * Instruct Valgrind to throw away all the vchunks associated with this + * context, except for the one covering the BumpContext and keeper-block + * header. This gets rid of the vchunks for whatever user data is getting + * discarded by the context reset. + */ + VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ); + /* Reset block size allocation sequence, too */ set->nextBlockSize = set->initBlockSize; @@ -279,6 +295,10 @@ BumpDelete(MemoryContext context) { /* Reset to release all releasable BumpBlocks */ BumpReset(context); + + /* Destroy the vpool -- see notes in aset.c */ + VALGRIND_DESTROY_MEMPOOL(context); + /* And free the context header and keeper block */ free(context); } @@ -318,6 +338,9 @@ BumpAllocLarge(MemoryContext context, Size size, int flags) if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ); + context->mem_allocated += blksize; /* the block is completely full */ @@ -455,6 +478,9 @@ BumpAllocFromNewBlock(MemoryContext context, Size size, int flags, if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ); + context->mem_allocated += blksize; /* initialize the new block */ @@ -606,6 +632,9 @@ BumpBlockFree(BumpContext *set, BumpBlock *block) wipe_mem(block, ((char *) block->endptr - (char *) block)); #endif + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(set, block); + free(block); } diff --git a/src/backend/utils/mmgr/generation.c b/src/backend/utils/mmgr/generation.c index 18679ad4f1e41..cfafc9bf0829d 100644 --- a/src/backend/utils/mmgr/generation.c +++ b/src/backend/utils/mmgr/generation.c @@ -45,6 +45,8 @@ #define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock)) #define Generation_CHUNKHDRSZ sizeof(MemoryChunk) +#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(GenerationContext)) + \ + Generation_BLOCKHDRSZ) #define Generation_CHUNK_FRACTION 8 @@ -221,6 +223,12 @@ GenerationContextCreate(MemoryContext parent, * Avoid writing code that can fail between here and MemoryContextCreate; * we'd leak the header if we ereport in this stretch. */ + + /* See comments about Valgrind interactions in aset.c */ + VALGRIND_CREATE_MEMPOOL(set, 0, false); + /* This vchunk covers the GenerationContext and the keeper block header */ + VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ); + dlist_init(&set->blocks); /* Fill in the initial block's block header */ @@ -309,6 +317,14 @@ GenerationReset(MemoryContext context) GenerationBlockFree(set, block); } + /* + * Instruct Valgrind to throw away all the vchunks associated with this + * context, except for the one covering the GenerationContext and + * keeper-block header. This gets rid of the vchunks for whatever user + * data is getting discarded by the context reset. + */ + VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ); + /* set it so new allocations to make use of the keeper block */ set->block = KeeperBlock(set); @@ -329,6 +345,10 @@ GenerationDelete(MemoryContext context) { /* Reset to release all releasable GenerationBlocks */ GenerationReset(context); + + /* Destroy the vpool -- see notes in aset.c */ + VALGRIND_DESTROY_MEMPOOL(context); + /* And free the context header and keeper block */ free(context); } @@ -365,6 +385,9 @@ GenerationAllocLarge(MemoryContext context, Size size, int flags) if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ); + context->mem_allocated += blksize; /* block with a single (used) chunk */ @@ -487,6 +510,9 @@ GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags, if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ); + context->mem_allocated += blksize; /* initialize the new block */ @@ -677,6 +703,9 @@ GenerationBlockFree(GenerationContext *set, GenerationBlock *block) wipe_mem(block, block->blksize); #endif + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(set, block); + free(block); } diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index ce01dce9861da..47fd774c7d280 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -8,6 +8,23 @@ * context-type-specific operations via the function pointers in a * context's MemoryContextMethods struct. * + * A note about Valgrind support: when USE_VALGRIND is defined, we provide + * support for memory leak tracking at the allocation-unit level. Valgrind + * does leak detection by tracking allocated "chunks", which can be grouped + * into "pools". The "chunk" terminology is overloaded, since we use that + * word for our allocation units, and it's sometimes important to distinguish + * those from the Valgrind objects that describe them. To reduce confusion, + * let's use the terms "vchunk" and "vpool" for the Valgrind objects. + * + * We use a separate vpool for each memory context. The context-type-specific + * code is responsible for creating and deleting the vpools, and also for + * creating vchunks to cover its management data structures such as block + * headers. (There must be a vchunk that includes every pointer we want + * Valgrind to consider for leak-tracking purposes.) This module creates + * and deletes the vchunks that cover the caller-visible allocated chunks. + * However, the context-type-specific code must handle cleaning up those + * vchunks too during memory context reset operations. + * * * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -418,8 +435,6 @@ MemoryContextResetOnly(MemoryContext context) context->methods->reset(context); context->isReset = true; - VALGRIND_DESTROY_MEMPOOL(context); - VALGRIND_CREATE_MEMPOOL(context, 0, false); } } @@ -526,8 +541,6 @@ MemoryContextDeleteOnly(MemoryContext context) context->ident = NULL; context->methods->delete_context(context); - - VALGRIND_DESTROY_MEMPOOL(context); } /* @@ -1170,8 +1183,6 @@ MemoryContextCreate(MemoryContext node, node->nextchild = NULL; node->allowInCritSection = false; } - - VALGRIND_CREATE_MEMPOOL(node, 0, false); } /* @@ -1454,7 +1465,13 @@ MemoryContextAllocAligned(MemoryContext context, void *unaligned; void *aligned; - /* wouldn't make much sense to waste that much space */ + /* + * Restrict alignto to ensure that it can fit into the "value" field of + * the redirection MemoryChunk, and that the distance back to the start of + * the unaligned chunk will fit into the space available for that. This + * isn't a limitation in practice, since it wouldn't make much sense to + * waste that much space. + */ Assert(alignto < (128 * 1024 * 1024)); /* ensure alignto is a power of 2 */ @@ -1491,10 +1508,15 @@ MemoryContextAllocAligned(MemoryContext context, alloc_size += 1; #endif - /* perform the actual allocation */ - unaligned = MemoryContextAllocExtended(context, alloc_size, flags); + /* + * Perform the actual allocation, but do not pass down MCXT_ALLOC_ZERO. + * This ensures that wasted bytes beyond the aligned chunk do not become + * DEFINED. + */ + unaligned = MemoryContextAllocExtended(context, alloc_size, + flags & ~MCXT_ALLOC_ZERO); - /* set the aligned pointer */ + /* compute the aligned pointer */ aligned = (void *) TYPEALIGN(alignto, (char *) unaligned + sizeof(MemoryChunk)); @@ -1522,12 +1544,23 @@ MemoryContextAllocAligned(MemoryContext context, set_sentinel(aligned, size); #endif - /* Mark the bytes before the redirection header as noaccess */ - VALGRIND_MAKE_MEM_NOACCESS(unaligned, - (char *) alignedchunk - (char *) unaligned); + /* + * MemoryContextAllocExtended marked the whole unaligned chunk as a + * vchunk. Undo that, instead making just the aligned chunk be a vchunk. + * This prevents Valgrind from complaining that the vchunk is possibly + * leaked, since only pointers to the aligned chunk will exist. + * + * After these calls, the aligned chunk will be marked UNDEFINED, and all + * the rest of the unaligned chunk (the redirection chunk header, the + * padding bytes before it, and any wasted trailing bytes) will be marked + * NOACCESS, which is what we want. + */ + VALGRIND_MEMPOOL_FREE(context, unaligned); + VALGRIND_MEMPOOL_ALLOC(context, aligned, size); - /* Disallow access to the redirection chunk header. */ - VALGRIND_MAKE_MEM_NOACCESS(alignedchunk, sizeof(MemoryChunk)); + /* Now zero (and make DEFINED) just the aligned chunk, if requested */ + if ((flags & MCXT_ALLOC_ZERO) != 0) + MemSetAligned(aligned, 0, size); return aligned; } @@ -1561,16 +1594,12 @@ void pfree(void *pointer) { #ifdef USE_VALGRIND - MemoryContextMethodID method = GetMemoryChunkMethodID(pointer); MemoryContext context = GetMemoryChunkContext(pointer); #endif MCXT_METHOD(pointer, free_p) (pointer); -#ifdef USE_VALGRIND - if (method != MCTX_ALIGNED_REDIRECT_ID) - VALGRIND_MEMPOOL_FREE(context, pointer); -#endif + VALGRIND_MEMPOOL_FREE(context, pointer); } /* @@ -1580,9 +1609,6 @@ pfree(void *pointer) void * repalloc(void *pointer, Size size) { -#ifdef USE_VALGRIND - MemoryContextMethodID method = GetMemoryChunkMethodID(pointer); -#endif #if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND) MemoryContext context = GetMemoryChunkContext(pointer); #endif @@ -1605,10 +1631,7 @@ repalloc(void *pointer, Size size) */ ret = MCXT_METHOD(pointer, realloc) (pointer, size, 0); -#ifdef USE_VALGRIND - if (method != MCTX_ALIGNED_REDIRECT_ID) - VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size); -#endif + VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size); return ret; } diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c index d32c0d318fbf4..0e35abcf5a055 100644 --- a/src/backend/utils/mmgr/slab.c +++ b/src/backend/utils/mmgr/slab.c @@ -377,6 +377,11 @@ SlabContextCreate(MemoryContext parent, * we'd leak the header if we ereport in this stretch. */ + /* See comments about Valgrind interactions in aset.c */ + VALGRIND_CREATE_MEMPOOL(slab, 0, false); + /* This vchunk covers the SlabContext only */ + VALGRIND_MEMPOOL_ALLOC(slab, slab, sizeof(SlabContext)); + /* Fill in SlabContext-specific header fields */ slab->chunkSize = (uint32) chunkSize; slab->fullChunkSize = (uint32) fullChunkSize; @@ -451,6 +456,10 @@ SlabReset(MemoryContext context) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, slab->blockSize); #endif + + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(slab, block); + free(block); context->mem_allocated -= slab->blockSize; } @@ -467,11 +476,23 @@ SlabReset(MemoryContext context) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, slab->blockSize); #endif + + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(slab, block); + free(block); context->mem_allocated -= slab->blockSize; } } + /* + * Instruct Valgrind to throw away all the vchunks associated with this + * context, except for the one covering the SlabContext. This gets rid of + * the vchunks for whatever user data is getting discarded by the context + * reset. + */ + VALGRIND_MEMPOOL_TRIM(slab, slab, sizeof(SlabContext)); + slab->curBlocklistIndex = 0; Assert(context->mem_allocated == 0); @@ -486,6 +507,10 @@ SlabDelete(MemoryContext context) { /* Reset to release all the SlabBlocks */ SlabReset(context); + + /* Destroy the vpool -- see notes in aset.c */ + VALGRIND_DESTROY_MEMPOOL(context); + /* And free the context header */ free(context); } @@ -567,6 +592,9 @@ SlabAllocFromNewBlock(MemoryContext context, Size size, int flags) if (unlikely(block == NULL)) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(slab, block, Slab_BLOCKHDRSZ); + block->slab = slab; context->mem_allocated += slab->blockSize; @@ -795,6 +823,10 @@ SlabFree(void *pointer) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, slab->blockSize); #endif + + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(slab, block); + free(block); slab->header.mem_allocated -= slab->blockSize; } diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c index d39f3e1b655cd..fca84ded6ddc3 100644 --- a/src/backend/utils/resowner/resowner.c +++ b/src/backend/utils/resowner/resowner.c @@ -231,11 +231,8 @@ hash_resource_elem(Datum value, const ResourceOwnerDesc *kind) * 'kind' into the hash. Just add it with hash_combine(), it perturbs the * result enough for our purposes. */ -#if SIZEOF_DATUM == 8 - return hash_combine64(murmurhash64((uint64) value), (uint64) kind); -#else - return hash_combine(murmurhash32((uint32) value), (uint32) kind); -#endif + return hash_combine64(murmurhash64((uint64) value), + (uint64) (uintptr_t) kind); } /* diff --git a/src/backend/utils/sort/sortsupport.c b/src/backend/utils/sort/sortsupport.c index e0f500b9aa29c..f582c6624f11a 100644 --- a/src/backend/utils/sort/sortsupport.c +++ b/src/backend/utils/sort/sortsupport.c @@ -57,7 +57,7 @@ comparison_shim(Datum x, Datum y, SortSupport ssup) if (extra->fcinfo.isnull) elog(ERROR, "function %u returned NULL", extra->flinfo.fn_oid); - return result; + return DatumGetInt32(result); } /* diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 65ab83fff8b26..5d4411dc33fee 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -512,7 +512,6 @@ qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state) return state->base.comparetup_tiebreak(a, b, state); } -#if SIZEOF_DATUM >= 8 /* Used if first key's comparator is ssup_datum_signed_cmp */ static pg_attribute_always_inline int qsort_tuple_signed_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state) @@ -535,7 +534,6 @@ qsort_tuple_signed_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state) return state->base.comparetup_tiebreak(a, b, state); } -#endif /* Used if first key's comparator is ssup_datum_int32_cmp */ static pg_attribute_always_inline int @@ -578,7 +576,6 @@ qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state) #define ST_DEFINE #include "lib/sort_template.h" -#if SIZEOF_DATUM >= 8 #define ST_SORT qsort_tuple_signed #define ST_ELEMENT_TYPE SortTuple #define ST_COMPARE(a, b, state) qsort_tuple_signed_compare(a, b, state) @@ -587,7 +584,6 @@ qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state) #define ST_SCOPE static #define ST_DEFINE #include "lib/sort_template.h" -#endif #define ST_SORT qsort_tuple_int32 #define ST_ELEMENT_TYPE SortTuple @@ -2692,7 +2688,6 @@ tuplesort_sort_memtuples(Tuplesortstate *state) state); return; } -#if SIZEOF_DATUM >= 8 else if (state->base.sortKeys[0].comparator == ssup_datum_signed_cmp) { qsort_tuple_signed(state->memtuples, @@ -2700,7 +2695,6 @@ tuplesort_sort_memtuples(Tuplesortstate *state) state); return; } -#endif else if (state->base.sortKeys[0].comparator == ssup_datum_int32_cmp) { qsort_tuple_int32(state->memtuples, @@ -3146,7 +3140,6 @@ ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup) return 0; } -#if SIZEOF_DATUM >= 8 int ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup) { @@ -3160,7 +3153,6 @@ ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup) else return 0; } -#endif int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup) diff --git a/src/backend/utils/sort/tuplesortvariants.c b/src/backend/utils/sort/tuplesortvariants.c index 5f70e8dddac57..890cdbe120418 100644 --- a/src/backend/utils/sort/tuplesortvariants.c +++ b/src/backend/utils/sort/tuplesortvariants.c @@ -31,6 +31,7 @@ #include "utils/datum.h" #include "utils/guc.h" #include "utils/lsyscache.h" +#include "utils/rel.h" #include "utils/tuplesort.h" @@ -865,7 +866,7 @@ tuplesort_putbrintuple(Tuplesortstate *state, BrinTuple *tuple, Size size) memcpy(&bstup->tuple, tuple, size); stup.tuple = bstup; - stup.datum1 = tuple->bt_blkno; + stup.datum1 = UInt32GetDatum(tuple->bt_blkno); stup.isnull1 = false; /* GetMemoryChunkSpace is not supported for bump contexts */ @@ -1836,7 +1837,7 @@ removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count) BrinSortTuple *tuple; tuple = stups[i].tuple; - stups[i].datum1 = tuple->tuple.bt_blkno; + stups[i].datum1 = UInt32GetDatum(tuple->tuple.bt_blkno); } } @@ -1893,7 +1894,7 @@ readtup_index_brin(Tuplesortstate *state, SortTuple *stup, stup->tuple = tuple; /* set up first-column key value, which is block number */ - stup->datum1 = tuple->tuple.bt_blkno; + stup->datum1 = UInt32GetDatum(tuple->tuple.bt_blkno); } /* diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index ea35f30f49457..65561cc6bc337 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -271,12 +271,23 @@ Snapshot GetTransactionSnapshot(void) { /* - * This should not be called while doing logical decoding. Historic - * snapshots are only usable for catalog access, not for general-purpose - * queries. + * Return historic snapshot if doing logical decoding. + * + * Historic snapshots are only usable for catalog access, not for + * general-purpose queries. The caller is responsible for ensuring that + * the snapshot is used correctly! (PostgreSQL code never calls this + * during logical decoding, but extensions can do it.) */ if (HistoricSnapshotActive()) - elog(ERROR, "cannot take query snapshot during logical decoding"); + { + /* + * We'll never need a non-historic transaction snapshot in this + * (sub-)transaction, so there's no need to be careful to set one up + * for later calls to GetTransactionSnapshot(). + */ + Assert(!FirstSnapshotSet); + return HistoricSnapshot; + } /* First call in transaction? */ if (!FirstSnapshotSet) diff --git a/src/bin/initdb/Makefile b/src/bin/initdb/Makefile index 997e0a013e956..c0470efda92a3 100644 --- a/src/bin/initdb/Makefile +++ b/src/bin/initdb/Makefile @@ -20,7 +20,7 @@ include $(top_builddir)/src/Makefile.global # from libpq, else we have risks of version skew if we run with a libpq # shared library from a different PG version. Define # USE_PRIVATE_ENCODING_FUNCS to ensure that that happens. -override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(ICU_CFLAGS) $(CPPFLAGS) +override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(CPPFLAGS) $(ICU_CFLAGS) # We need libpq only because fe_utils does. LDFLAGS_INTERNAL += -L$(top_builddir)/src/fe_utils -lpgfeutils $(libpq_pgport) $(ICU_LIBS) diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 62bbd08d9f658..92fe2f531f7a8 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -1580,9 +1580,6 @@ bootstrap_template1(void) bki_lines = replace_token(bki_lines, "ALIGNOF_POINTER", (sizeof(Pointer) == 4) ? "i" : "d"); - bki_lines = replace_token(bki_lines, "FLOAT8PASSBYVAL", - FLOAT8PASSBYVAL ? "true" : "false"); - bki_lines = replace_token(bki_lines, "POSTGRES", escape_quotes_bki(username)); diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 55621f35fb6b7..0a3ca4315de1e 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -35,6 +35,7 @@ #include "fe_utils/option_utils.h" #include "fe_utils/recovery_gen.h" #include "getopt_long.h" +#include "libpq/protocol.h" #include "receivelog.h" #include "streamutil.h" @@ -1338,7 +1339,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data) /* Each CopyData message begins with a type byte. */ switch (GetCopyDataByte(r, copybuf, &cursor)) { - case 'n': + case PqBackupMsg_NewArchive: { /* New archive. */ char *archive_name; @@ -1410,7 +1411,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data) break; } - case 'd': + case PqMsg_CopyData: { /* Archive or manifest data. */ if (state->manifest_buffer != NULL) @@ -1446,7 +1447,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data) break; } - case 'p': + case PqBackupMsg_ProgressReport: { /* * Progress report. @@ -1465,7 +1466,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data) break; } - case 'm': + case PqBackupMsg_Manifest: { /* * Manifest data will be sent next. This message is not diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c index 8a5dd24e6c9ad..7a4d1a2d2ca66 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.c +++ b/src/bin/pg_basebackup/pg_recvlogical.c @@ -24,6 +24,7 @@ #include "getopt_long.h" #include "libpq-fe.h" #include "libpq/pqsignal.h" +#include "libpq/protocol.h" #include "pqexpbuffer.h" #include "streamutil.h" @@ -149,7 +150,7 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested) LSN_FORMAT_ARGS(output_fsync_lsn), replication_slot); - replybuf[len] = 'r'; + replybuf[len] = PqReplMsg_StandbyStatusUpdate; len += 1; fe_sendint64(output_written_lsn, &replybuf[len]); /* write */ len += 8; @@ -454,7 +455,7 @@ StreamLogicalLog(void) } /* Check the message type. */ - if (copybuf[0] == 'k') + if (copybuf[0] == PqReplMsg_Keepalive) { int pos; bool replyRequested; @@ -466,7 +467,7 @@ StreamLogicalLog(void) * We just check if the server requested a reply, and ignore the * rest. */ - pos = 1; /* skip msgtype 'k' */ + pos = 1; /* skip msgtype PqReplMsg_Keepalive */ walEnd = fe_recvint64(©buf[pos]); output_written_lsn = Max(walEnd, output_written_lsn); @@ -509,7 +510,7 @@ StreamLogicalLog(void) continue; } - else if (copybuf[0] != 'w') + else if (copybuf[0] != PqReplMsg_WALData) { pg_log_error("unrecognized streaming header: \"%c\"", copybuf[0]); @@ -517,11 +518,11 @@ StreamLogicalLog(void) } /* - * Read the header of the XLogData message, enclosed in the CopyData + * Read the header of the WALData message, enclosed in the CopyData * message. We only need the WAL location field (dataStart), the rest * of the header is ignored. */ - hdr_len = 1; /* msgtype 'w' */ + hdr_len = 1; /* msgtype PqReplMsg_WALData */ hdr_len += 8; /* dataStart */ hdr_len += 8; /* walEnd */ hdr_len += 8; /* sendTime */ @@ -605,7 +606,7 @@ StreamLogicalLog(void) /* * We're doing a client-initiated clean exit and have sent CopyDone to * the server. Drain any messages, so we don't miss a last-minute - * ErrorResponse. The walsender stops generating XLogData records once + * ErrorResponse. The walsender stops generating WALData records once * it sees CopyDone, so expect this to finish quickly. After CopyDone, * it's too late for sendFeedback(), even if this were to take a long * time. Hence, use synchronous-mode PQgetCopyData(). diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c index d6b7f117fa3bb..25b13c7f55cd1 100644 --- a/src/bin/pg_basebackup/receivelog.c +++ b/src/bin/pg_basebackup/receivelog.c @@ -21,6 +21,7 @@ #include "access/xlog_internal.h" #include "common/logging.h" #include "libpq-fe.h" +#include "libpq/protocol.h" #include "receivelog.h" #include "streamutil.h" @@ -38,8 +39,8 @@ static int CopyStreamReceive(PGconn *conn, long timeout, pgsocket stop_socket, char **buffer); static bool ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, XLogRecPtr blockpos, TimestampTz *last_status); -static bool ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, - XLogRecPtr *blockpos); +static bool ProcessWALDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, + XLogRecPtr *blockpos); static PGresult *HandleEndOfCopyStream(PGconn *conn, StreamCtl *stream, char *copybuf, XLogRecPtr blockpos, XLogRecPtr *stoppos); static bool CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos); @@ -338,7 +339,7 @@ sendFeedback(PGconn *conn, XLogRecPtr blockpos, TimestampTz now, bool replyReque char replybuf[1 + 8 + 8 + 8 + 8 + 1]; int len = 0; - replybuf[len] = 'r'; + replybuf[len] = PqReplMsg_StandbyStatusUpdate; len += 1; fe_sendint64(blockpos, &replybuf[len]); /* write */ len += 8; @@ -823,15 +824,15 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream, } /* Check the message type. */ - if (copybuf[0] == 'k') + if (copybuf[0] == PqReplMsg_Keepalive) { if (!ProcessKeepaliveMsg(conn, stream, copybuf, r, blockpos, &last_status)) goto error; } - else if (copybuf[0] == 'w') + else if (copybuf[0] == PqReplMsg_WALData) { - if (!ProcessXLogDataMsg(conn, stream, copybuf, r, &blockpos)) + if (!ProcessWALDataMsg(conn, stream, copybuf, r, &blockpos)) goto error; /* @@ -1001,7 +1002,7 @@ ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, * Parse the keepalive message, enclosed in the CopyData message. We just * check if the server requested a reply, and ignore the rest. */ - pos = 1; /* skip msgtype 'k' */ + pos = 1; /* skip msgtype PqReplMsg_Keepalive */ pos += 8; /* skip walEnd */ pos += 8; /* skip sendTime */ @@ -1041,11 +1042,11 @@ ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, } /* - * Process XLogData message. + * Process WALData message. */ static bool -ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, - XLogRecPtr *blockpos) +ProcessWALDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, + XLogRecPtr *blockpos) { int xlogoff; int bytes_left; @@ -1054,17 +1055,17 @@ ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, /* * Once we've decided we don't want to receive any more, just ignore any - * subsequent XLogData messages. + * subsequent WALData messages. */ if (!(still_sending)) return true; /* - * Read the header of the XLogData message, enclosed in the CopyData + * Read the header of the WALData message, enclosed in the CopyData * message. We only need the WAL location field (dataStart), the rest of * the header is ignored. */ - hdr_len = 1; /* msgtype 'w' */ + hdr_len = 1; /* msgtype PqReplMsg_WALData */ hdr_len += 8; /* dataStart */ hdr_len += 8; /* walEnd */ hdr_len += 8; /* sendTime */ @@ -1162,7 +1163,7 @@ ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, return false; } still_sending = false; - return true; /* ignore the rest of this XLogData packet */ + return true; /* ignore the rest of this WALData packet */ } } } diff --git a/src/bin/pg_combinebackup/t/002_compare_backups.pl b/src/bin/pg_combinebackup/t/002_compare_backups.pl index 2c7ca89b92f7f..a3e29c055091e 100644 --- a/src/bin/pg_combinebackup/t/002_compare_backups.pl +++ b/src/bin/pg_combinebackup/t/002_compare_backups.pl @@ -174,6 +174,7 @@ $pitr1->command_ok( [ 'pg_dumpall', + '--restrict-key' => 'test', '--no-sync', '--no-unlogged-table-data', '--file' => $dump1, @@ -183,6 +184,7 @@ $pitr2->command_ok( [ 'pg_dumpall', + '--restrict-key' => 'test', '--no-sync', '--no-unlogged-table-data', '--file' => $dump2, diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c index aa1589e3331d2..a1976fae607d6 100644 --- a/src/bin/pg_dump/common.c +++ b/src/bin/pg_dump/common.c @@ -17,6 +17,7 @@ #include +#include "catalog/pg_am_d.h" #include "catalog/pg_class_d.h" #include "catalog/pg_collation_d.h" #include "catalog/pg_extension_d.h" @@ -944,6 +945,24 @@ findOprByOid(Oid oid) return (OprInfo *) dobj; } +/* + * findAccessMethodByOid + * finds the DumpableObject for the access method with the given oid + * returns NULL if not found + */ +AccessMethodInfo * +findAccessMethodByOid(Oid oid) +{ + CatalogId catId; + DumpableObject *dobj; + + catId.tableoid = AccessMethodRelationId; + catId.oid = oid; + dobj = findObjectByCatalogId(catId); + Assert(dobj == NULL || dobj->objType == DO_ACCESS_METHOD); + return (AccessMethodInfo *) dobj; +} + /* * findCollationByOid * finds the DumpableObject for the collation with the given oid diff --git a/src/bin/pg_dump/compress_gzip.c b/src/bin/pg_dump/compress_gzip.c index 5a30ebf9bf5b5..4a067e1402c3b 100644 --- a/src/bin/pg_dump/compress_gzip.c +++ b/src/bin/pg_dump/compress_gzip.c @@ -251,34 +251,49 @@ InitCompressorGzip(CompressorState *cs, *---------------------- */ -static bool -Gzip_read(void *ptr, size_t size, size_t *rsize, CompressFileHandle *CFH) +static size_t +Gzip_read(void *ptr, size_t size, CompressFileHandle *CFH) { gzFile gzfp = (gzFile) CFH->private_data; int gzret; gzret = gzread(gzfp, ptr, size); - if (gzret <= 0 && !gzeof(gzfp)) + + /* + * gzread returns zero on EOF as well as some error conditions, and less + * than zero on other error conditions, so we need to inspect for EOF on + * zero. + */ + if (gzret <= 0) { int errnum; - const char *errmsg = gzerror(gzfp, &errnum); + const char *errmsg; + + if (gzret == 0 && gzeof(gzfp)) + return 0; + + errmsg = gzerror(gzfp, &errnum); pg_fatal("could not read from input file: %s", errnum == Z_ERRNO ? strerror(errno) : errmsg); } - if (rsize) - *rsize = (size_t) gzret; - - return true; + return (size_t) gzret; } -static bool +static void Gzip_write(const void *ptr, size_t size, CompressFileHandle *CFH) { gzFile gzfp = (gzFile) CFH->private_data; + int errnum; + const char *errmsg; - return gzwrite(gzfp, ptr, size) > 0; + if (gzwrite(gzfp, ptr, size) != size) + { + errmsg = gzerror(gzfp, &errnum); + pg_fatal("could not write to file: %s", + errnum == Z_ERRNO ? strerror(errno) : errmsg); + } } static int diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c index 8c3d9c911c47b..9cadc6f2a3f34 100644 --- a/src/bin/pg_dump/compress_io.c +++ b/src/bin/pg_dump/compress_io.c @@ -269,6 +269,7 @@ InitDiscoverCompressFileHandle(const char *path, const char *mode) } CFH = InitCompressFileHandle(compression_spec); + errno = 0; if (!CFH->open_func(fname, -1, mode, CFH)) { free_keep_errno(CFH); @@ -289,6 +290,7 @@ EndCompressFileHandle(CompressFileHandle *CFH) { bool ret = false; + errno = 0; if (CFH->private_data) ret = CFH->close_func(CFH); diff --git a/src/bin/pg_dump/compress_io.h b/src/bin/pg_dump/compress_io.h index db9b38744c8e2..25a7bf0904d2e 100644 --- a/src/bin/pg_dump/compress_io.h +++ b/src/bin/pg_dump/compress_io.h @@ -123,21 +123,22 @@ struct CompressFileHandle CompressFileHandle *CFH); /* - * Read 'size' bytes of data from the file and store them into 'ptr'. - * Optionally it will store the number of bytes read in 'rsize'. + * Read up to 'size' bytes of data from the file and store them into + * 'ptr'. * - * Returns true on success and throws an internal error otherwise. + * Returns number of bytes read (this might be less than 'size' if EOF was + * reached). Exits via pg_fatal for all error conditions. */ - bool (*read_func) (void *ptr, size_t size, size_t *rsize, + size_t (*read_func) (void *ptr, size_t size, CompressFileHandle *CFH); /* * Write 'size' bytes of data into the file from 'ptr'. * - * Returns true on success and false on error. + * Returns nothing, exits via pg_fatal for all error conditions. */ - bool (*write_func) (const void *ptr, size_t size, - struct CompressFileHandle *CFH); + void (*write_func) (const void *ptr, size_t size, + CompressFileHandle *CFH); /* * Read at most size - 1 characters from the compress file handle into diff --git a/src/bin/pg_dump/compress_lz4.c b/src/bin/pg_dump/compress_lz4.c index e99f0cad71fcb..e2f7c46829308 100644 --- a/src/bin/pg_dump/compress_lz4.c +++ b/src/bin/pg_dump/compress_lz4.c @@ -12,6 +12,7 @@ *------------------------------------------------------------------------- */ #include "postgres_fe.h" +#include #include "compress_lz4.h" #include "pg_backup_utils.h" @@ -358,7 +359,6 @@ LZ4Stream_init(LZ4State *state, int size, bool compressing) return true; state->compressing = compressing; - state->inited = true; /* When compressing, write LZ4 header to the output stream. */ if (state->compressing) @@ -367,6 +367,7 @@ LZ4Stream_init(LZ4State *state, int size, bool compressing) if (!LZ4State_compression_init(state)) return false; + errno = 0; if (fwrite(state->buffer, 1, state->compressedlen, state->fp) != state->compressedlen) { errno = (errno) ? errno : ENOSPC; @@ -390,6 +391,7 @@ LZ4Stream_init(LZ4State *state, int size, bool compressing) state->overflowlen = 0; } + state->inited = true; return true; } @@ -457,7 +459,11 @@ LZ4Stream_read_internal(LZ4State *state, void *ptr, int ptrsize, bool eol_flag) /* Lazy init */ if (!LZ4Stream_init(state, size, false /* decompressing */ )) + { + pg_log_error("unable to initialize LZ4 library: %s", + LZ4F_getErrorName(state->errcode)); return -1; + } /* No work needs to be done for a zero-sized output buffer */ if (size <= 0) @@ -484,7 +490,10 @@ LZ4Stream_read_internal(LZ4State *state, void *ptr, int ptrsize, bool eol_flag) rsize = fread(readbuf, 1, size, state->fp); if (rsize < size && !feof(state->fp)) + { + pg_log_error("could not read from input file: %m"); return -1; + } rp = (char *) readbuf; rend = (char *) readbuf + rsize; @@ -501,6 +510,8 @@ LZ4Stream_read_internal(LZ4State *state, void *ptr, int ptrsize, bool eol_flag) if (LZ4F_isError(status)) { state->errcode = status; + pg_log_error("could not read from input file: %s", + LZ4F_getErrorName(state->errcode)); return -1; } @@ -558,7 +569,7 @@ LZ4Stream_read_internal(LZ4State *state, void *ptr, int ptrsize, bool eol_flag) /* * Compress size bytes from ptr and write them to the stream. */ -static bool +static void LZ4Stream_write(const void *ptr, size_t size, CompressFileHandle *CFH) { LZ4State *state = (LZ4State *) CFH->private_data; @@ -567,7 +578,8 @@ LZ4Stream_write(const void *ptr, size_t size, CompressFileHandle *CFH) /* Lazy init */ if (!LZ4Stream_init(state, size, true)) - return false; + pg_fatal("unable to initialize LZ4 library: %s", + LZ4F_getErrorName(state->errcode)); while (remaining > 0) { @@ -578,28 +590,24 @@ LZ4Stream_write(const void *ptr, size_t size, CompressFileHandle *CFH) status = LZ4F_compressUpdate(state->ctx, state->buffer, state->buflen, ptr, chunk, NULL); if (LZ4F_isError(status)) - { - state->errcode = status; - return false; - } + pg_fatal("error during writing: %s", LZ4F_getErrorName(status)); + errno = 0; if (fwrite(state->buffer, 1, status, state->fp) != status) { errno = (errno) ? errno : ENOSPC; - return false; + pg_fatal("error during writing: %m"); } ptr = ((const char *) ptr) + chunk; } - - return true; } /* * fread() equivalent implementation for LZ4 compressed files. */ -static bool -LZ4Stream_read(void *ptr, size_t size, size_t *rsize, CompressFileHandle *CFH) +static size_t +LZ4Stream_read(void *ptr, size_t size, CompressFileHandle *CFH) { LZ4State *state = (LZ4State *) CFH->private_data; int ret; @@ -607,10 +615,7 @@ LZ4Stream_read(void *ptr, size_t size, size_t *rsize, CompressFileHandle *CFH) if ((ret = LZ4Stream_read_internal(state, ptr, size, false)) < 0) pg_fatal("could not read from input file: %s", LZ4Stream_get_error(CFH)); - if (rsize) - *rsize = (size_t) ret; - - return true; + return (size_t) ret; } /* @@ -643,11 +648,13 @@ LZ4Stream_gets(char *ptr, int size, CompressFileHandle *CFH) int ret; ret = LZ4Stream_read_internal(state, ptr, size - 1, true); - if (ret < 0 || (ret == 0 && !LZ4Stream_eof(CFH))) - pg_fatal("could not read from input file: %s", LZ4Stream_get_error(CFH)); - /* Done reading */ - if (ret == 0) + /* + * LZ4Stream_read_internal returning 0 or -1 means that it was either an + * EOF or an error, but gets_func is defined to return NULL in either case + * so we can treat both the same here. + */ + if (ret <= 0) return NULL; /* @@ -669,6 +676,7 @@ LZ4Stream_close(CompressFileHandle *CFH) FILE *fp; LZ4State *state = (LZ4State *) CFH->private_data; size_t status; + int ret; fp = state->fp; if (state->inited) @@ -677,25 +685,31 @@ LZ4Stream_close(CompressFileHandle *CFH) { status = LZ4F_compressEnd(state->ctx, state->buffer, state->buflen, NULL); if (LZ4F_isError(status)) - pg_fatal("could not end compression: %s", - LZ4F_getErrorName(status)); - else if (fwrite(state->buffer, 1, status, state->fp) != status) { - errno = (errno) ? errno : ENOSPC; - WRITE_ERROR_EXIT; + pg_log_error("could not end compression: %s", + LZ4F_getErrorName(status)); + } + else + { + errno = 0; + if (fwrite(state->buffer, 1, status, state->fp) != status) + { + errno = (errno) ? errno : ENOSPC; + pg_log_error("could not write to output file: %m"); + } } status = LZ4F_freeCompressionContext(state->ctx); if (LZ4F_isError(status)) - pg_fatal("could not end compression: %s", - LZ4F_getErrorName(status)); + pg_log_error("could not end compression: %s", + LZ4F_getErrorName(status)); } else { status = LZ4F_freeDecompressionContext(state->dtx); if (LZ4F_isError(status)) - pg_fatal("could not end decompression: %s", - LZ4F_getErrorName(status)); + pg_log_error("could not end decompression: %s", + LZ4F_getErrorName(status)); pg_free(state->overflowbuf); } @@ -703,29 +717,35 @@ LZ4Stream_close(CompressFileHandle *CFH) } pg_free(state); + CFH->private_data = NULL; - return fclose(fp) == 0; + errno = 0; + ret = fclose(fp); + if (ret != 0) + { + pg_log_error("could not close file: %m"); + return false; + } + + return true; } static bool LZ4Stream_open(const char *path, int fd, const char *mode, CompressFileHandle *CFH) { - FILE *fp; LZ4State *state = (LZ4State *) CFH->private_data; if (fd >= 0) - fp = fdopen(fd, mode); + state->fp = fdopen(dup(fd), mode); else - fp = fopen(path, mode); - if (fp == NULL) + state->fp = fopen(path, mode); + if (state->fp == NULL) { state->errcode = errno; return false; } - state->fp = fp; - return true; } diff --git a/src/bin/pg_dump/compress_none.c b/src/bin/pg_dump/compress_none.c index 3fc89c9985461..4abb2e95abc88 100644 --- a/src/bin/pg_dump/compress_none.c +++ b/src/bin/pg_dump/compress_none.c @@ -83,35 +83,31 @@ InitCompressorNone(CompressorState *cs, * Private routines */ -static bool -read_none(void *ptr, size_t size, size_t *rsize, CompressFileHandle *CFH) +static size_t +read_none(void *ptr, size_t size, CompressFileHandle *CFH) { FILE *fp = (FILE *) CFH->private_data; size_t ret; - if (size == 0) - return true; - ret = fread(ptr, 1, size, fp); - if (ret != size && !feof(fp)) + if (ferror(fp)) pg_fatal("could not read from input file: %m"); - if (rsize) - *rsize = ret; - - return true; + return ret; } -static bool +static void write_none(const void *ptr, size_t size, CompressFileHandle *CFH) { size_t ret; + errno = 0; ret = fwrite(ptr, 1, size, (FILE *) CFH->private_data); if (ret != size) - return false; - - return true; + { + errno = (errno) ? errno : ENOSPC; + pg_fatal("could not write to file: %m"); + } } static const char * @@ -153,7 +149,12 @@ close_none(CompressFileHandle *CFH) CFH->private_data = NULL; if (fp) + { + errno = 0; ret = fclose(fp); + if (ret != 0) + pg_log_error("could not close file: %m"); + } return ret == 0; } diff --git a/src/bin/pg_dump/compress_zstd.c b/src/bin/pg_dump/compress_zstd.c index cb595b10c2d32..e24d45e1bbe07 100644 --- a/src/bin/pg_dump/compress_zstd.c +++ b/src/bin/pg_dump/compress_zstd.c @@ -13,6 +13,7 @@ */ #include "postgres_fe.h" +#include #include "compress_zstd.h" #include "pg_backup_utils.h" @@ -258,8 +259,8 @@ InitCompressorZstd(CompressorState *cs, * Compressed stream API */ -static bool -Zstd_read(void *ptr, size_t size, size_t *rdsize, CompressFileHandle *CFH) +static size_t +Zstd_read_internal(void *ptr, size_t size, CompressFileHandle *CFH, bool exit_on_error) { ZstdCompressorState *zstdcs = (ZstdCompressorState *) CFH->private_data; ZSTD_inBuffer *input = &zstdcs->input; @@ -268,6 +269,22 @@ Zstd_read(void *ptr, size_t size, size_t *rdsize, CompressFileHandle *CFH) size_t res, cnt; + /* + * If this is the first call to the reading function, initialize the + * required datastructures. + */ + if (zstdcs->dstream == NULL) + { + zstdcs->input.src = pg_malloc0(input_allocated_size); + zstdcs->dstream = ZSTD_createDStream(); + if (zstdcs->dstream == NULL) + { + if (exit_on_error) + pg_fatal("could not initialize compression library"); + return -1; + } + } + output->size = size; output->dst = ptr; output->pos = 0; @@ -292,6 +309,13 @@ Zstd_read(void *ptr, size_t size, size_t *rdsize, CompressFileHandle *CFH) if (input->pos == input->size) { cnt = fread(unconstify(void *, input->src), 1, input_allocated_size, zstdcs->fp); + if (ferror(zstdcs->fp)) + { + if (exit_on_error) + pg_fatal("could not read from input file: %m"); + return -1; + } + input->size = cnt; Assert(cnt <= input_allocated_size); @@ -307,7 +331,11 @@ Zstd_read(void *ptr, size_t size, size_t *rdsize, CompressFileHandle *CFH) res = ZSTD_decompressStream(zstdcs->dstream, output, input); if (ZSTD_isError(res)) - pg_fatal("could not decompress data: %s", ZSTD_getErrorName(res)); + { + if (exit_on_error) + pg_fatal("could not decompress data: %s", ZSTD_getErrorName(res)); + return -1; + } if (output->pos == output->size) break; /* No more room for output */ @@ -320,13 +348,10 @@ Zstd_read(void *ptr, size_t size, size_t *rdsize, CompressFileHandle *CFH) break; /* We read all the data that fits */ } - if (rdsize != NULL) - *rdsize = output->pos; - - return true; + return output->pos; } -static bool +static void Zstd_write(const void *ptr, size_t size, CompressFileHandle *CFH) { ZstdCompressorState *zstdcs = (ZstdCompressorState *) CFH->private_data; @@ -339,41 +364,40 @@ Zstd_write(const void *ptr, size_t size, CompressFileHandle *CFH) input->size = size; input->pos = 0; + if (zstdcs->cstream == NULL) + { + zstdcs->output.size = ZSTD_CStreamOutSize(); + zstdcs->output.dst = pg_malloc0(zstdcs->output.size); + zstdcs->cstream = _ZstdCStreamParams(CFH->compression_spec); + if (zstdcs->cstream == NULL) + pg_fatal("could not initialize compression library"); + } + /* Consume all input, to be flushed later */ while (input->pos != input->size) { output->pos = 0; res = ZSTD_compressStream2(zstdcs->cstream, output, input, ZSTD_e_continue); if (ZSTD_isError(res)) - { - zstdcs->zstderror = ZSTD_getErrorName(res); - return false; - } + pg_fatal("could not write to file: %s", ZSTD_getErrorName(res)); + errno = 0; cnt = fwrite(output->dst, 1, output->pos, zstdcs->fp); if (cnt != output->pos) { - zstdcs->zstderror = strerror(errno); - return false; + errno = (errno) ? errno : ENOSPC; + pg_fatal("could not write to file: %m"); } } - - return size; } static int Zstd_getc(CompressFileHandle *CFH) { - ZstdCompressorState *zstdcs = (ZstdCompressorState *) CFH->private_data; - int ret; + unsigned char ret; - if (CFH->read_func(&ret, 1, NULL, CFH) != 1) - { - if (feof(zstdcs->fp)) - pg_fatal("could not read from input file: end of file"); - else - pg_fatal("could not read from input file: %m"); - } + if (CFH->read_func(&ret, 1, CFH) != 1) + pg_fatal("could not read from input file: end of file"); return ret; } @@ -390,11 +414,7 @@ Zstd_gets(char *buf, int len, CompressFileHandle *CFH) */ for (i = 0; i < len - 1; ++i) { - size_t readsz; - - if (!CFH->read_func(&buf[i], 1, &readsz, CFH)) - break; - if (readsz != 1) + if (Zstd_read_internal(&buf[i], 1, CFH, false) != 1) break; if (buf[i] == '\n') { @@ -406,10 +426,17 @@ Zstd_gets(char *buf, int len, CompressFileHandle *CFH) return i > 0 ? buf : NULL; } +static size_t +Zstd_read(void *ptr, size_t size, CompressFileHandle *CFH) +{ + return Zstd_read_internal(ptr, size, CFH, true); +} + static bool Zstd_close(CompressFileHandle *CFH) { ZstdCompressorState *zstdcs = (ZstdCompressorState *) CFH->private_data; + bool success = true; if (zstdcs->cstream) { @@ -426,14 +453,18 @@ Zstd_close(CompressFileHandle *CFH) if (ZSTD_isError(res)) { zstdcs->zstderror = ZSTD_getErrorName(res); - return false; + success = false; + break; } + errno = 0; cnt = fwrite(output->dst, 1, output->pos, zstdcs->fp); if (cnt != output->pos) { + errno = (errno) ? errno : ENOSPC; zstdcs->zstderror = strerror(errno); - return false; + success = false; + break; } if (res == 0) @@ -450,11 +481,16 @@ Zstd_close(CompressFileHandle *CFH) pg_free(unconstify(void *, zstdcs->input.src)); } + errno = 0; if (fclose(zstdcs->fp) != 0) - return false; + { + zstdcs->zstderror = strerror(errno); + success = false; + } pg_free(zstdcs); - return true; + CFH->private_data = NULL; + return success; } static bool @@ -472,35 +508,33 @@ Zstd_open(const char *path, int fd, const char *mode, FILE *fp; ZstdCompressorState *zstdcs; + /* + * Clear state storage to avoid having the fd point to non-NULL memory on + * error return. + */ + CFH->private_data = NULL; + + zstdcs = (ZstdCompressorState *) pg_malloc_extended(sizeof(*zstdcs), + MCXT_ALLOC_NO_OOM | MCXT_ALLOC_ZERO); + if (!zstdcs) + { + errno = ENOMEM; + return false; + } + if (fd >= 0) - fp = fdopen(fd, mode); + fp = fdopen(dup(fd), mode); else fp = fopen(path, mode); if (fp == NULL) + { + pg_free(zstdcs); return false; + } - zstdcs = (ZstdCompressorState *) pg_malloc0(sizeof(*zstdcs)); - CFH->private_data = zstdcs; zstdcs->fp = fp; - - if (mode[0] == 'r') - { - zstdcs->input.src = pg_malloc0(ZSTD_DStreamInSize()); - zstdcs->dstream = ZSTD_createDStream(); - if (zstdcs->dstream == NULL) - pg_fatal("could not initialize compression library"); - } - else if (mode[0] == 'w' || mode[0] == 'a') - { - zstdcs->output.size = ZSTD_CStreamOutSize(); - zstdcs->output.dst = pg_malloc0(zstdcs->output.size); - zstdcs->cstream = _ZstdCStreamParams(CFH->compression_spec); - if (zstdcs->cstream == NULL) - pg_fatal("could not initialize compression library"); - } - else - pg_fatal("unhandled mode \"%s\"", mode); + CFH->private_data = zstdcs; return true; } diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c index 73ce34346b278..05b84c0d6e7f7 100644 --- a/src/bin/pg_dump/dumputils.c +++ b/src/bin/pg_dump/dumputils.c @@ -21,6 +21,7 @@ #include "dumputils.h" #include "fe_utils/string_utils.h" +static const char restrict_chars[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; static bool parseAclItem(const char *item, const char *type, const char *name, const char *subname, int remoteVersion, @@ -31,6 +32,43 @@ static void AddAcl(PQExpBuffer aclbuf, const char *keyword, const char *subname); +/* + * Sanitize a string to be included in an SQL comment or TOC listing, by + * replacing any newlines with spaces. This ensures each logical output line + * is in fact one physical output line, to prevent corruption of the dump + * (which could, in the worst case, present an SQL injection vulnerability + * if someone were to incautiously load a dump containing objects with + * maliciously crafted names). + * + * The result is a freshly malloc'd string. If the input string is NULL, + * return a malloc'ed empty string, unless want_hyphen, in which case return a + * malloc'ed hyphen. + * + * Note that we currently don't bother to quote names, meaning that the name + * fields aren't automatically parseable. "pg_restore -L" doesn't care because + * it only examines the dumpId field, but someday we might want to try harder. + */ +char * +sanitize_line(const char *str, bool want_hyphen) +{ + char *result; + char *s; + + if (!str) + return pg_strdup(want_hyphen ? "-" : ""); + + result = pg_strdup(str); + + for (s = result; *s != '\0'; s++) + { + if (*s == '\n' || *s == '\r') + *s = ' '; + } + + return result; +} + + /* * Build GRANT/REVOKE command(s) for an object. * @@ -686,7 +724,7 @@ emitShSecLabels(PGconn *conn, PGresult *res, PQExpBuffer buffer, * currently known to guc.c, so that it'd be unsafe for extensions to declare * GUC_LIST_QUOTE variables anyway. Lacking a solution for that, it doesn't * seem worth the work to do more than have this list, which must be kept in - * sync with the variables actually marked GUC_LIST_QUOTE in guc_tables.c. + * sync with the variables actually marked GUC_LIST_QUOTE in guc_parameters.dat. */ bool variable_is_guc_list_quote(const char *name) @@ -920,3 +958,40 @@ create_or_open_dir(const char *dirname) pg_fatal("directory \"%s\" is not empty", dirname); } } + +/* + * Generates a valid restrict key (i.e., an alphanumeric string) for use with + * psql's \restrict and \unrestrict meta-commands. For safety, the value is + * chosen at random. + */ +char * +generate_restrict_key(void) +{ + uint8 buf[64]; + char *ret = palloc(sizeof(buf)); + + if (!pg_strong_random(buf, sizeof(buf))) + return NULL; + + for (int i = 0; i < sizeof(buf) - 1; i++) + { + uint8 idx = buf[i] % strlen(restrict_chars); + + ret[i] = restrict_chars[idx]; + } + ret[sizeof(buf) - 1] = '\0'; + + return ret; +} + +/* + * Checks that a given restrict key (intended for use with psql's \restrict and + * \unrestrict meta-commands) contains only alphanumeric characters. + */ +bool +valid_restrict_key(const char *restrict_key) +{ + return restrict_key != NULL && + restrict_key[0] != '\0' && + strspn(restrict_key, restrict_chars) == strlen(restrict_key); +} diff --git a/src/bin/pg_dump/dumputils.h b/src/bin/pg_dump/dumputils.h index 91c6e612e282e..10f6e27c0a0fa 100644 --- a/src/bin/pg_dump/dumputils.h +++ b/src/bin/pg_dump/dumputils.h @@ -36,6 +36,7 @@ #endif +extern char *sanitize_line(const char *str, bool want_hyphen); extern bool buildACLCommands(const char *name, const char *subname, const char *nspname, const char *type, const char *acls, const char *baseacls, const char *owner, const char *prefix, int remoteVersion, @@ -64,4 +65,7 @@ extern void makeAlterConfigCommand(PGconn *conn, const char *configitem, PQExpBuffer buf); extern void create_or_open_dir(const char *dirname); +extern char *generate_restrict_key(void); +extern bool valid_restrict_key(const char *restrict_key); + #endif /* DUMPUTILS_H */ diff --git a/src/bin/pg_dump/filter.c b/src/bin/pg_dump/filter.c index 7214d51413771..e3cdcf4097563 100644 --- a/src/bin/pg_dump/filter.c +++ b/src/bin/pg_dump/filter.c @@ -171,9 +171,8 @@ pg_log_filter_error(FilterStateData *fstate, const char *fmt,...) /* * filter_get_keyword - read the next filter keyword from buffer * - * Search for keywords (limited to ascii alphabetic characters) in - * the passed in line buffer. Returns NULL when the buffer is empty or the first - * char is not alpha. The char '_' is allowed, except as the first character. + * Search for keywords (strings of non-whitespace characters) in the passed + * in line buffer. Returns NULL when the buffer is empty or no keyword exists. * The length of the found keyword is returned in the size parameter. */ static const char * @@ -182,6 +181,9 @@ filter_get_keyword(const char **line, int *size) const char *ptr = *line; const char *result = NULL; + /* The passed buffer must not be NULL */ + Assert(*line != NULL); + /* Set returned length preemptively in case no keyword is found */ *size = 0; @@ -189,11 +191,12 @@ filter_get_keyword(const char **line, int *size) while (isspace((unsigned char) *ptr)) ptr++; - if (isalpha((unsigned char) *ptr)) + /* Grab one keyword that's the string of non-whitespace characters */ + if (*ptr != '\0' && !isspace((unsigned char) *ptr)) { result = ptr++; - while (isalpha((unsigned char) *ptr) || *ptr == '_') + while (*ptr != '\0' && !isspace((unsigned char) *ptr)) ptr++; *size = ptr - result; diff --git a/src/bin/pg_dump/meson.build b/src/bin/pg_dump/meson.build index 4a4ebbd8ec94f..a2233b0a1b431 100644 --- a/src/bin/pg_dump/meson.build +++ b/src/bin/pg_dump/meson.build @@ -102,7 +102,6 @@ tests += { 't/003_pg_dump_with_server.pl', 't/004_pg_dump_parallel.pl', 't/005_pg_dump_filterfile.pl', - 't/006_pg_dumpall.pl', 't/010_dump_connstr.pl', ], }, diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c index 5974d6706fd57..086adcdc50295 100644 --- a/src/bin/pg_dump/parallel.c +++ b/src/bin/pg_dump/parallel.c @@ -333,16 +333,6 @@ on_exit_close_archive(Archive *AHX) on_exit_nicely(archive_close_connection, &shutdown_info); } -/* - * When pg_restore restores multiple databases, then update already added entry - * into array for cleanup. - */ -void -replace_on_exit_close_archive(Archive *AHX) -{ - shutdown_info.AHX = AHX; -} - /* * on_exit_nicely handler for shutting down database connections and * worker processes cleanly. diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h index af0007fb6d2f1..d9041dad72068 100644 --- a/src/bin/pg_dump/pg_backup.h +++ b/src/bin/pg_dump/pg_backup.h @@ -163,6 +163,8 @@ typedef struct _restoreOptions bool dumpSchema; bool dumpData; bool dumpStatistics; + + char *restrict_key; } RestoreOptions; typedef struct _dumpOptions @@ -213,6 +215,8 @@ typedef struct _dumpOptions bool dumpSchema; bool dumpData; bool dumpStatistics; + + char *restrict_key; } DumpOptions; /* @@ -308,7 +312,7 @@ extern void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ro extern void ProcessArchiveRestoreOptions(Archive *AHX); -extern void RestoreArchive(Archive *AHX, bool append_data); +extern void RestoreArchive(Archive *AHX); /* Open an existing archive */ extern Archive *OpenArchive(const char *FileSpec, const ArchiveFormat fmt); diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 30e0da31aa340..058b5d659bacf 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -59,7 +59,6 @@ static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt, DataDirSyncMethod sync_method); static void _getObjectDescription(PQExpBuffer buf, const TocEntry *te); static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx); -static char *sanitize_line(const char *str, bool want_hyphen); static void _doSetFixedOutputState(ArchiveHandle *AH); static void _doSetSessionAuth(ArchiveHandle *AH, const char *user); static void _reconnectToDB(ArchiveHandle *AH, const char *dbname); @@ -87,7 +86,7 @@ static int RestoringToDB(ArchiveHandle *AH); static void dump_lo_buf(ArchiveHandle *AH); static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim); static void SetOutput(ArchiveHandle *AH, const char *filename, - const pg_compress_specification compression_spec, bool append_data); + const pg_compress_specification compression_spec); static CompressFileHandle *SaveOutput(ArchiveHandle *AH); static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput); @@ -198,6 +197,7 @@ dumpOptionsFromRestoreOptions(RestoreOptions *ropt) dopt->include_everything = ropt->include_everything; dopt->enable_row_security = ropt->enable_row_security; dopt->sequence_data = ropt->sequence_data; + dopt->restrict_key = ropt->restrict_key ? pg_strdup(ropt->restrict_key) : NULL; return dopt; } @@ -339,14 +339,9 @@ ProcessArchiveRestoreOptions(Archive *AHX) StrictNamesCheck(ropt); } -/* - * RestoreArchive - * - * If append_data is set, then append data into file as we are restoring dump - * of multiple databases which was taken by pg_dumpall. - */ +/* Public */ void -RestoreArchive(Archive *AHX, bool append_data) +RestoreArchive(Archive *AHX) { ArchiveHandle *AH = (ArchiveHandle *) AHX; RestoreOptions *ropt = AH->public.ropt; @@ -463,10 +458,21 @@ RestoreArchive(Archive *AHX, bool append_data) */ sav = SaveOutput(AH); if (ropt->filename || ropt->compression_spec.algorithm != PG_COMPRESSION_NONE) - SetOutput(AH, ropt->filename, ropt->compression_spec, append_data); + SetOutput(AH, ropt->filename, ropt->compression_spec); ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n"); + /* + * If generating plain-text output, enter restricted mode to block any + * unexpected psql meta-commands. A malicious source might try to inject + * a variety of things via bogus responses to queries. While we cannot + * prevent such sources from affecting the destination at restore time, we + * can block psql meta-commands so that the client machine that runs psql + * with the dump output remains unaffected. + */ + if (ropt->restrict_key) + ahprintf(AH, "\\restrict %s\n\n", ropt->restrict_key); + if (AH->archiveRemoteVersion) ahprintf(AH, "-- Dumped from database version %s\n", AH->archiveRemoteVersion); @@ -807,6 +813,14 @@ RestoreArchive(Archive *AHX, bool append_data) ahprintf(AH, "--\n-- PostgreSQL database dump complete\n--\n\n"); + /* + * If generating plain-text output, exit restricted mode at the very end + * of the script. This is not pro forma; in particular, pg_dumpall + * requires this when transitioning from one database to another. + */ + if (ropt->restrict_key) + ahprintf(AH, "\\unrestrict %s\n\n", ropt->restrict_key); + /* * Clean up & we're done. */ @@ -1302,7 +1316,7 @@ PrintTOCSummary(Archive *AHX) sav = SaveOutput(AH); if (ropt->filename) - SetOutput(AH, ropt->filename, out_compression_spec, false); + SetOutput(AH, ropt->filename, out_compression_spec); if (strftime(stamp_str, sizeof(stamp_str), PGDUMP_STRFTIME_FMT, localtime(&AH->createDate)) == 0) @@ -1681,8 +1695,7 @@ archprintf(Archive *AH, const char *fmt,...) static void SetOutput(ArchiveHandle *AH, const char *filename, - const pg_compress_specification compression_spec, - bool append_data) + const pg_compress_specification compression_spec) { CompressFileHandle *CFH; const char *mode; @@ -1702,7 +1715,7 @@ SetOutput(ArchiveHandle *AH, const char *filename, else fn = fileno(stdout); - if (append_data || AH->mode == archModeAppend) + if (AH->mode == archModeAppend) mode = PG_BINARY_A; else mode = PG_BINARY_W; @@ -1870,8 +1883,8 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH) { CompressFileHandle *CFH = (CompressFileHandle *) AH->OF; - if (CFH->write_func(ptr, size * nmemb, CFH)) - bytes_written = size * nmemb; + CFH->write_func(ptr, size * nmemb, CFH); + bytes_written = size * nmemb; } if (bytes_written != size * nmemb) @@ -3459,11 +3472,21 @@ _reconnectToDB(ArchiveHandle *AH, const char *dbname) else { PQExpBufferData connectbuf; + RestoreOptions *ropt = AH->public.ropt; + + /* + * We must temporarily exit restricted mode for \connect, etc. + * Anything added between this line and the following \restrict must + * be careful to avoid any possible meta-command injection vectors. + */ + ahprintf(AH, "\\unrestrict %s\n", ropt->restrict_key); initPQExpBuffer(&connectbuf); appendPsqlMetaConnect(&connectbuf, dbname); - ahprintf(AH, "%s\n", connectbuf.data); + ahprintf(AH, "%s", connectbuf.data); termPQExpBuffer(&connectbuf); + + ahprintf(AH, "\\restrict %s\n\n", ropt->restrict_key); } /* @@ -4056,42 +4079,6 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx) } } -/* - * Sanitize a string to be included in an SQL comment or TOC listing, by - * replacing any newlines with spaces. This ensures each logical output line - * is in fact one physical output line, to prevent corruption of the dump - * (which could, in the worst case, present an SQL injection vulnerability - * if someone were to incautiously load a dump containing objects with - * maliciously crafted names). - * - * The result is a freshly malloc'd string. If the input string is NULL, - * return a malloc'ed empty string, unless want_hyphen, in which case return a - * malloc'ed hyphen. - * - * Note that we currently don't bother to quote names, meaning that the name - * fields aren't automatically parseable. "pg_restore -L" doesn't care because - * it only examines the dumpId field, but someday we might want to try harder. - */ -static char * -sanitize_line(const char *str, bool want_hyphen) -{ - char *result; - char *s; - - if (!str) - return pg_strdup(want_hyphen ? "-" : ""); - - result = pg_strdup(str); - - for (s = result; *s != '\0'; s++) - { - if (*s == '\n' || *s == '\r') - *s = ' '; - } - - return result; -} - /* * Write the file header for a custom-format archive */ diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h index 365073b3eae45..325b53fc9bd4b 100644 --- a/src/bin/pg_dump/pg_backup_archiver.h +++ b/src/bin/pg_dump/pg_backup_archiver.h @@ -394,7 +394,6 @@ struct _tocEntry extern int parallel_restore(ArchiveHandle *AH, TocEntry *te); extern void on_exit_close_archive(Archive *AHX); -extern void replace_on_exit_close_archive(Archive *AHX); extern void warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...) pg_attribute_printf(2, 3); diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c index bc2a2fb479741..94d401d8a4e5a 100644 --- a/src/bin/pg_dump/pg_backup_directory.c +++ b/src/bin/pg_dump/pg_backup_directory.c @@ -316,15 +316,9 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen) lclContext *ctx = (lclContext *) AH->formatData; CompressFileHandle *CFH = ctx->dataFH; - errno = 0; - if (dLen > 0 && !CFH->write_func(data, dLen, CFH)) - { - /* if write didn't set errno, assume problem is no disk space */ - if (errno == 0) - errno = ENOSPC; - pg_fatal("could not write to output file: %s", - CFH->get_error_func(CFH)); - } + if (dLen <= 0) + return; + CFH->write_func(data, dLen, CFH); } /* @@ -351,7 +345,7 @@ _EndData(ArchiveHandle *AH, TocEntry *te) static void _PrintFileData(ArchiveHandle *AH, char *filename) { - size_t cnt = 0; + size_t cnt; char *buf; size_t buflen; CompressFileHandle *CFH; @@ -366,7 +360,7 @@ _PrintFileData(ArchiveHandle *AH, char *filename) buflen = DEFAULT_IO_BUFFER_SIZE; buf = pg_malloc(buflen); - while (CFH->read_func(buf, buflen, &cnt, CFH) && cnt > 0) + while ((cnt = CFH->read_func(buf, buflen, CFH)) > 0) { ahwrite(buf, 1, cnt, AH); } @@ -470,16 +464,7 @@ _WriteByte(ArchiveHandle *AH, const int i) lclContext *ctx = (lclContext *) AH->formatData; CompressFileHandle *CFH = ctx->dataFH; - errno = 0; - if (!CFH->write_func(&c, 1, CFH)) - { - /* if write didn't set errno, assume problem is no disk space */ - if (errno == 0) - errno = ENOSPC; - pg_fatal("could not write to output file: %s", - CFH->get_error_func(CFH)); - } - + CFH->write_func(&c, 1, CFH); return 1; } @@ -508,15 +493,7 @@ _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len) lclContext *ctx = (lclContext *) AH->formatData; CompressFileHandle *CFH = ctx->dataFH; - errno = 0; - if (!CFH->write_func(buf, len, CFH)) - { - /* if write didn't set errno, assume problem is no disk space */ - if (errno == 0) - errno = ENOSPC; - pg_fatal("could not write to output file: %s", - CFH->get_error_func(CFH)); - } + CFH->write_func(buf, len, CFH); } /* @@ -531,10 +508,10 @@ _ReadBuf(ArchiveHandle *AH, void *buf, size_t len) CompressFileHandle *CFH = ctx->dataFH; /* - * If there was an I/O error, we already exited in readF(), so here we - * exit on short reads. + * We do not expect a short read, so fail if we get one. The read_func + * already dealt with any outright I/O error. */ - if (!CFH->read_func(buf, len, NULL, CFH)) + if (CFH->read_func(buf, len, CFH) != len) pg_fatal("could not read from input file: end of file"); } @@ -677,14 +654,7 @@ _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid) /* register the LO in blobs_NNN.toc */ len = snprintf(buf, sizeof(buf), "%u blob_%u.dat\n", oid, oid); - if (!CFH->write_func(buf, len, CFH)) - { - /* if write didn't set errno, assume problem is no disk space */ - if (errno == 0) - errno = ENOSPC; - pg_fatal("could not write to LOs TOC file: %s", - CFH->get_error_func(CFH)); - } + CFH->write_func(buf, len, CFH); } /* diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index d94d0de2a5d17..b5ba3b46dd999 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -826,7 +826,7 @@ _CloseArchive(ArchiveHandle *AH) savVerbose = AH->public.verbose; AH->public.verbose = 0; - RestoreArchive((Archive *) AH, false); + RestoreArchive((Archive *) AH); SetArchiveOptions((Archive *) AH, savDopt, savRopt); diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 6298edb26b5df..b4c45ad803e94 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -449,8 +449,6 @@ main(int argc, char **argv) bool data_only = false; bool schema_only = false; bool statistics_only = false; - bool with_data = false; - bool with_schema = false; bool with_statistics = false; bool no_data = false; bool no_schema = false; @@ -514,6 +512,7 @@ main(int argc, char **argv) {"section", required_argument, NULL, 5}, {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1}, {"snapshot", required_argument, NULL, 6}, + {"statistics", no_argument, NULL, 22}, {"statistics-only", no_argument, NULL, 18}, {"strict-names", no_argument, &strict_names, 1}, {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1}, @@ -528,9 +527,6 @@ main(int argc, char **argv) {"no-toast-compression", no_argument, &dopt.no_toast_compression, 1}, {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1}, {"no-sync", no_argument, NULL, 7}, - {"with-data", no_argument, NULL, 22}, - {"with-schema", no_argument, NULL, 23}, - {"with-statistics", no_argument, NULL, 24}, {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1}, {"rows-per-insert", required_argument, NULL, 10}, {"include-foreign-data", required_argument, NULL, 11}, @@ -541,6 +537,7 @@ main(int argc, char **argv) {"filter", required_argument, NULL, 16}, {"exclude-extension", required_argument, NULL, 17}, {"sequence-data", no_argument, &dopt.sequence_data, 1}, + {"restrict-key", required_argument, NULL, 25}, {NULL, 0, NULL, 0} }; @@ -798,15 +795,11 @@ main(int argc, char **argv) break; case 22: - with_data = true; - break; - - case 23: - with_schema = true; + with_statistics = true; break; - case 24: - with_statistics = true; + case 25: + dopt.restrict_key = pg_strdup(optarg); break; default: @@ -852,13 +845,17 @@ main(int argc, char **argv) if (statistics_only && no_statistics) pg_fatal("options --statistics-only and --no-statistics cannot be used together"); - /* reject conflicting "with-" and "no-" options */ - if (with_data && no_data) - pg_fatal("options --with-data and --no-data cannot be used together"); - if (with_schema && no_schema) - pg_fatal("options --with-schema and --no-schema cannot be used together"); + /* reject conflicting "no-" options */ if (with_statistics && no_statistics) - pg_fatal("options --with-statistics and --no-statistics cannot be used together"); + pg_fatal("options --statistics and --no-statistics cannot be used together"); + + /* reject conflicting "-only" options */ + if (data_only && with_statistics) + pg_fatal("options %s and %s cannot be used together", + "-a/--data-only", "--statistics"); + if (schema_only && with_statistics) + pg_fatal("options %s and %s cannot be used together", + "-s/--schema-only", "--statistics"); if (schema_only && foreign_servers_include_patterns.head != NULL) pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together"); @@ -873,16 +870,14 @@ main(int argc, char **argv) pg_fatal("option --if-exists requires option -c/--clean"); /* - * Set derivative flags. An "-only" option may be overridden by an - * explicit "with-" option; e.g. "--schema-only --with-statistics" will - * include schema and statistics. Other ambiguous or nonsensical - * combinations, e.g. "--schema-only --no-schema", will have already - * caused an error in one of the checks above. + * Set derivative flags. Ambiguous or nonsensical combinations, e.g. + * "--schema-only --no-schema", will have already caused an error in one + * of the checks above. */ dopt.dumpData = ((dopt.dumpData && !schema_only && !statistics_only) || - (data_only || with_data)) && !no_data; + data_only) && !no_data; dopt.dumpSchema = ((dopt.dumpSchema && !data_only && !statistics_only) || - (schema_only || with_schema)) && !no_schema; + schema_only) && !no_schema; dopt.dumpStatistics = ((dopt.dumpStatistics && !schema_only && !data_only) || (statistics_only || with_statistics)) && !no_statistics; @@ -899,8 +894,22 @@ main(int argc, char **argv) /* archiveFormat specific setup */ if (archiveFormat == archNull) + { plainText = 1; + /* + * If you don't provide a restrict key, one will be appointed for you. + */ + if (!dopt.restrict_key) + dopt.restrict_key = generate_restrict_key(); + if (!dopt.restrict_key) + pg_fatal("could not generate restrict key"); + if (!valid_restrict_key(dopt.restrict_key)) + pg_fatal("invalid restrict key"); + } + else if (dopt.restrict_key) + pg_fatal("option --restrict-key can only be used with --format=plain"); + /* * Custom and directory formats are compressed by default with gzip when * available, not the others. If gzip is not available, no compression is @@ -1122,6 +1131,23 @@ main(int argc, char **argv) shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass " "AND dbid = (SELECT oid FROM pg_database " " WHERE datname = current_database())"; + + /* + * If upgrading from v16 or newer, only dump large objects with + * comments/seclabels. For these upgrades, pg_upgrade can copy/link + * pg_largeobject_metadata's files (which is usually faster) but we + * still need to dump LOs with comments/seclabels here so that the + * subsequent COMMENT and SECURITY LABEL commands work. pg_upgrade + * can't copy/link the files from older versions because aclitem + * (needed by pg_largeobject_metadata.lomacl) changed its storage + * format in v16. + */ + if (fout->remoteVersion >= 160000) + lo_metadata->dataObj->filtercond = "WHERE oid IN " + "(SELECT objoid FROM pg_description " + "WHERE classoid = " CppAsString2(LargeObjectRelationId) " " + "UNION SELECT objoid FROM pg_seclabel " + "WHERE classoid = " CppAsString2(LargeObjectRelationId) ")"; } /* @@ -1239,6 +1265,7 @@ main(int argc, char **argv) ropt->enable_row_security = dopt.enable_row_security; ropt->sequence_data = dopt.sequence_data; ropt->binary_upgrade = dopt.binary_upgrade; + ropt->restrict_key = dopt.restrict_key ? pg_strdup(dopt.restrict_key) : NULL; ropt->compression_spec = compression_spec; @@ -1265,7 +1292,7 @@ main(int argc, char **argv) * right now. */ if (plainText) - RestoreArchive(fout, false); + RestoreArchive(fout); CloseArchive(fout); @@ -1350,11 +1377,13 @@ help(const char *progname) printf(_(" --no-unlogged-table-data do not dump unlogged table data\n")); printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n")); printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n")); + printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n")); printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n")); printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n")); printf(_(" --sequence-data include sequence data in dump\n")); printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n")); printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n")); + printf(_(" --statistics dump the statistics\n")); printf(_(" --statistics-only dump only the statistics, not schema or data\n")); printf(_(" --strict-names require table and/or schema include patterns to\n" " match at least one entity each\n")); @@ -1363,9 +1392,6 @@ help(const char *progname) printf(_(" --use-set-session-authorization\n" " use SET SESSION AUTHORIZATION commands instead of\n" " ALTER OWNER commands to set ownership\n")); - printf(_(" --with-data dump the data\n")); - printf(_(" --with-schema dump the schema\n")); - printf(_(" --with-statistics dump the statistics\n")); printf(_("\nConnection options:\n")); printf(_(" -d, --dbname=DBNAME database to dump\n")); @@ -2207,6 +2233,13 @@ selectDumpableProcLang(ProcLangInfo *plang, Archive *fout) static void selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout) { + /* see getAccessMethods() comment about v9.6. */ + if (fout->remoteVersion < 90600) + { + method->dobj.dump = DUMP_COMPONENT_NONE; + return; + } + if (checkExtensionMembership(&method->dobj, fout)) return; /* extension membership overrides all else */ @@ -2845,11 +2878,14 @@ dumpTableData(Archive *fout, const TableDataInfo *tdinfo) forcePartitionRootLoad(tbinfo))) { TableInfo *parentTbinfo; + char *sanitized; parentTbinfo = getRootTableInfo(tbinfo); copyFrom = fmtQualifiedDumpable(parentTbinfo); + sanitized = sanitize_line(copyFrom, true); printfPQExpBuffer(copyBuf, "-- load via partition root %s", - copyFrom); + sanitized); + free(sanitized); tdDefn = pg_strdup(copyBuf->data); } else @@ -3610,26 +3646,32 @@ dumpDatabase(Archive *fout) /* * pg_largeobject comes from the old system intact, so set its * relfrozenxids, relminmxids and relfilenode. + * + * pg_largeobject_metadata also comes from the old system intact for + * upgrades from v16 and newer, so set its relfrozenxids, relminmxids, and + * relfilenode, too. pg_upgrade can't copy/link the files from older + * versions because aclitem (needed by pg_largeobject_metadata.lomacl) + * changed its storage format in v16. */ if (dopt->binary_upgrade) { PGresult *lo_res; PQExpBuffer loFrozenQry = createPQExpBuffer(); PQExpBuffer loOutQry = createPQExpBuffer(); + PQExpBuffer lomOutQry = createPQExpBuffer(); PQExpBuffer loHorizonQry = createPQExpBuffer(); + PQExpBuffer lomHorizonQry = createPQExpBuffer(); int ii_relfrozenxid, ii_relfilenode, ii_oid, ii_relminmxid; - /* - * pg_largeobject - */ if (fout->remoteVersion >= 90300) appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid, relfilenode, oid\n" "FROM pg_catalog.pg_class\n" - "WHERE oid IN (%u, %u);\n", - LargeObjectRelationId, LargeObjectLOidPNIndexId); + "WHERE oid IN (%u, %u, %u, %u);\n", + LargeObjectRelationId, LargeObjectLOidPNIndexId, + LargeObjectMetadataRelationId, LargeObjectMetadataOidIndexId); else appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid, relfilenode, oid\n" "FROM pg_catalog.pg_class\n" @@ -3644,35 +3686,57 @@ dumpDatabase(Archive *fout) ii_oid = PQfnumber(lo_res, "oid"); appendPQExpBufferStr(loHorizonQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n"); + appendPQExpBufferStr(lomHorizonQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n"); appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n"); + appendPQExpBufferStr(lomOutQry, "\n-- For binary upgrade, preserve pg_largeobject_metadata and index relfilenodes\n"); for (int i = 0; i < PQntuples(lo_res); ++i) { Oid oid; RelFileNumber relfilenumber; + PQExpBuffer horizonQry; + PQExpBuffer outQry; + + oid = atooid(PQgetvalue(lo_res, i, ii_oid)); + relfilenumber = atooid(PQgetvalue(lo_res, i, ii_relfilenode)); + + if (oid == LargeObjectRelationId || + oid == LargeObjectLOidPNIndexId) + { + horizonQry = loHorizonQry; + outQry = loOutQry; + } + else + { + horizonQry = lomHorizonQry; + outQry = lomOutQry; + } - appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n" + appendPQExpBuffer(horizonQry, "UPDATE pg_catalog.pg_class\n" "SET relfrozenxid = '%u', relminmxid = '%u'\n" "WHERE oid = %u;\n", atooid(PQgetvalue(lo_res, i, ii_relfrozenxid)), atooid(PQgetvalue(lo_res, i, ii_relminmxid)), atooid(PQgetvalue(lo_res, i, ii_oid))); - oid = atooid(PQgetvalue(lo_res, i, ii_oid)); - relfilenumber = atooid(PQgetvalue(lo_res, i, ii_relfilenode)); - - if (oid == LargeObjectRelationId) - appendPQExpBuffer(loOutQry, + if (oid == LargeObjectRelationId || + oid == LargeObjectMetadataRelationId) + appendPQExpBuffer(outQry, "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n", relfilenumber); - else if (oid == LargeObjectLOidPNIndexId) - appendPQExpBuffer(loOutQry, + else if (oid == LargeObjectLOidPNIndexId || + oid == LargeObjectMetadataOidIndexId) + appendPQExpBuffer(outQry, "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n", relfilenumber); } appendPQExpBufferStr(loOutQry, "TRUNCATE pg_catalog.pg_largeobject;\n"); + appendPQExpBufferStr(lomOutQry, + "TRUNCATE pg_catalog.pg_largeobject_metadata;\n"); + appendPQExpBufferStr(loOutQry, loHorizonQry->data); + appendPQExpBufferStr(lomOutQry, lomHorizonQry->data); ArchiveEntry(fout, nilCatalogId, createDumpId(), ARCHIVE_OPTS(.tag = "pg_largeobject", @@ -3680,11 +3744,20 @@ dumpDatabase(Archive *fout) .section = SECTION_PRE_DATA, .createStmt = loOutQry->data)); + if (fout->remoteVersion >= 160000) + ArchiveEntry(fout, nilCatalogId, createDumpId(), + ARCHIVE_OPTS(.tag = "pg_largeobject_metadata", + .description = "pg_largeobject_metadata", + .section = SECTION_PRE_DATA, + .createStmt = lomOutQry->data)); + PQclear(lo_res); destroyPQExpBuffer(loFrozenQry); destroyPQExpBuffer(loHorizonQry); + destroyPQExpBuffer(lomHorizonQry); destroyPQExpBuffer(loOutQry); + destroyPQExpBuffer(lomOutQry); } PQclear(res); @@ -5029,6 +5102,7 @@ getSubscriptions(Archive *fout) int i_subenabled; int i_subfailover; int i_subretaindeadtuples; + int i_submaxretention; int i, ntups; @@ -5108,10 +5182,17 @@ getSubscriptions(Archive *fout) if (fout->remoteVersion >= 190000) appendPQExpBufferStr(query, - " s.subretaindeadtuples\n"); + " s.subretaindeadtuples,\n"); else appendPQExpBufferStr(query, - " false AS subretaindeadtuples\n"); + " false AS subretaindeadtuples,\n"); + + if (fout->remoteVersion >= 190000) + appendPQExpBufferStr(query, + " s.submaxretention\n"); + else + appendPQExpBuffer(query, + " 0 AS submaxretention\n"); appendPQExpBufferStr(query, "FROM pg_subscription s\n"); @@ -5146,6 +5227,7 @@ getSubscriptions(Archive *fout) i_subrunasowner = PQfnumber(res, "subrunasowner"); i_subfailover = PQfnumber(res, "subfailover"); i_subretaindeadtuples = PQfnumber(res, "subretaindeadtuples"); + i_submaxretention = PQfnumber(res, "submaxretention"); i_subconninfo = PQfnumber(res, "subconninfo"); i_subslotname = PQfnumber(res, "subslotname"); i_subsynccommit = PQfnumber(res, "subsynccommit"); @@ -5181,6 +5263,8 @@ getSubscriptions(Archive *fout) (strcmp(PQgetvalue(res, i, i_subfailover), "t") == 0); subinfo[i].subretaindeadtuples = (strcmp(PQgetvalue(res, i, i_subretaindeadtuples), "t") == 0); + subinfo[i].submaxretention = + atoi(PQgetvalue(res, i, i_submaxretention)); subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo)); if (PQgetisnull(res, i, i_subslotname)) @@ -5442,6 +5526,9 @@ dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo) if (subinfo->subretaindeadtuples) appendPQExpBufferStr(query, ", retain_dead_tuples = true"); + if (subinfo->submaxretention) + appendPQExpBuffer(query, ", max_retention_duration = %d", subinfo->submaxretention); + if (strcmp(subinfo->subsynccommit, "off") != 0) appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit)); @@ -6262,6 +6349,8 @@ getOperators(Archive *fout) int i_oprnamespace; int i_oprowner; int i_oprkind; + int i_oprleft; + int i_oprright; int i_oprcode; /* @@ -6273,6 +6362,8 @@ getOperators(Archive *fout) "oprnamespace, " "oprowner, " "oprkind, " + "oprleft, " + "oprright, " "oprcode::oid AS oprcode " "FROM pg_operator"); @@ -6288,6 +6379,8 @@ getOperators(Archive *fout) i_oprnamespace = PQfnumber(res, "oprnamespace"); i_oprowner = PQfnumber(res, "oprowner"); i_oprkind = PQfnumber(res, "oprkind"); + i_oprleft = PQfnumber(res, "oprleft"); + i_oprright = PQfnumber(res, "oprright"); i_oprcode = PQfnumber(res, "oprcode"); for (i = 0; i < ntups; i++) @@ -6301,6 +6394,8 @@ getOperators(Archive *fout) findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace))); oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner)); oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0]; + oprinfo[i].oprleft = atooid(PQgetvalue(res, i, i_oprleft)); + oprinfo[i].oprright = atooid(PQgetvalue(res, i, i_oprright)); oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode)); /* Decide whether we want to dump it */ @@ -6329,6 +6424,7 @@ getCollations(Archive *fout) int i_collname; int i_collnamespace; int i_collowner; + int i_collencoding; query = createPQExpBuffer(); @@ -6339,7 +6435,8 @@ getCollations(Archive *fout) appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, " "collnamespace, " - "collowner " + "collowner, " + "collencoding " "FROM pg_collation"); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); @@ -6353,6 +6450,7 @@ getCollations(Archive *fout) i_collname = PQfnumber(res, "collname"); i_collnamespace = PQfnumber(res, "collnamespace"); i_collowner = PQfnumber(res, "collowner"); + i_collencoding = PQfnumber(res, "collencoding"); for (i = 0; i < ntups; i++) { @@ -6364,6 +6462,7 @@ getCollations(Archive *fout) collinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_collnamespace))); collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner)); + collinfo[i].collencoding = atoi(PQgetvalue(res, i, i_collencoding)); /* Decide whether we want to dump it */ selectDumpableObject(&(collinfo[i].dobj), fout); @@ -6454,16 +6553,28 @@ getAccessMethods(Archive *fout) int i_amhandler; int i_amtype; - /* Before 9.6, there are no user-defined access methods */ - if (fout->remoteVersion < 90600) - return; - query = createPQExpBuffer(); - /* Select all access methods from pg_am table */ - appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, amtype, " - "amhandler::pg_catalog.regproc AS amhandler " - "FROM pg_am"); + /* + * Select all access methods from pg_am table. v9.6 introduced CREATE + * ACCESS METHOD, so earlier versions usually have only built-in access + * methods. v9.6 also changed the access method API, replacing dozens of + * pg_am columns with amhandler. Even if a user created an access method + * by "INSERT INTO pg_am", we have no way to translate pre-v9.6 pg_am + * columns to a v9.6+ CREATE ACCESS METHOD. Hence, before v9.6, read + * pg_am just to facilitate findAccessMethodByOid() providing the + * OID-to-name mapping. + */ + appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, "); + if (fout->remoteVersion >= 90600) + appendPQExpBufferStr(query, + "amtype, " + "amhandler::pg_catalog.regproc AS amhandler "); + else + appendPQExpBufferStr(query, + "'i'::pg_catalog.\"char\" AS amtype, " + "'-'::pg_catalog.regproc AS amhandler "); + appendPQExpBufferStr(query, "FROM pg_am"); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); @@ -6512,6 +6623,7 @@ getOpclasses(Archive *fout) OpclassInfo *opcinfo; int i_tableoid; int i_oid; + int i_opcmethod; int i_opcname; int i_opcnamespace; int i_opcowner; @@ -6521,7 +6633,7 @@ getOpclasses(Archive *fout) * system-defined opclasses at dump-out time. */ - appendPQExpBufferStr(query, "SELECT tableoid, oid, opcname, " + appendPQExpBufferStr(query, "SELECT tableoid, oid, opcmethod, opcname, " "opcnamespace, " "opcowner " "FROM pg_opclass"); @@ -6534,6 +6646,7 @@ getOpclasses(Archive *fout) i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); + i_opcmethod = PQfnumber(res, "opcmethod"); i_opcname = PQfnumber(res, "opcname"); i_opcnamespace = PQfnumber(res, "opcnamespace"); i_opcowner = PQfnumber(res, "opcowner"); @@ -6547,6 +6660,7 @@ getOpclasses(Archive *fout) opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname)); opcinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace))); + opcinfo[i].opcmethod = atooid(PQgetvalue(res, i, i_opcmethod)); opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner)); /* Decide whether we want to dump it */ @@ -6572,6 +6686,7 @@ getOpfamilies(Archive *fout) OpfamilyInfo *opfinfo; int i_tableoid; int i_oid; + int i_opfmethod; int i_opfname; int i_opfnamespace; int i_opfowner; @@ -6583,7 +6698,7 @@ getOpfamilies(Archive *fout) * system-defined opfamilies at dump-out time. */ - appendPQExpBufferStr(query, "SELECT tableoid, oid, opfname, " + appendPQExpBufferStr(query, "SELECT tableoid, oid, opfmethod, opfname, " "opfnamespace, " "opfowner " "FROM pg_opfamily"); @@ -6597,6 +6712,7 @@ getOpfamilies(Archive *fout) i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); i_opfname = PQfnumber(res, "opfname"); + i_opfmethod = PQfnumber(res, "opfmethod"); i_opfnamespace = PQfnumber(res, "opfnamespace"); i_opfowner = PQfnumber(res, "opfowner"); @@ -6609,6 +6725,7 @@ getOpfamilies(Archive *fout) opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname)); opfinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_opfnamespace))); + opfinfo[i].opfmethod = atooid(PQgetvalue(res, i, i_opfmethod)); opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner)); /* Decide whether we want to dump it */ diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 93a4475d51b80..bcc94ff07cc4b 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -260,6 +260,8 @@ typedef struct _oprInfo DumpableObject dobj; const char *rolname; char oprkind; + Oid oprleft; + Oid oprright; Oid oprcode; } OprInfo; @@ -273,12 +275,14 @@ typedef struct _accessMethodInfo typedef struct _opclassInfo { DumpableObject dobj; + Oid opcmethod; const char *rolname; } OpclassInfo; typedef struct _opfamilyInfo { DumpableObject dobj; + Oid opfmethod; const char *rolname; } OpfamilyInfo; @@ -286,6 +290,7 @@ typedef struct _collInfo { DumpableObject dobj; const char *rolname; + int collencoding; } CollInfo; typedef struct _convInfo @@ -712,6 +717,7 @@ typedef struct _SubscriptionInfo bool subrunasowner; bool subfailover; bool subretaindeadtuples; + int submaxretention; char *subconninfo; char *subslotname; char *subsynccommit; @@ -760,6 +766,7 @@ extern TableInfo *findTableByOid(Oid oid); extern TypeInfo *findTypeByOid(Oid oid); extern FuncInfo *findFuncByOid(Oid oid); extern OprInfo *findOprByOid(Oid oid); +extern AccessMethodInfo *findAccessMethodByOid(Oid oid); extern CollInfo *findCollationByOid(Oid oid); extern NamespaceInfo *findNamespaceByOid(Oid oid); extern ExtensionInfo *findExtensionByOid(Oid oid); diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c index f99a0797ea7fb..2d02456664b59 100644 --- a/src/bin/pg_dump/pg_dump_sort.c +++ b/src/bin/pg_dump/pg_dump_sort.c @@ -162,6 +162,8 @@ static DumpId postDataBoundId; static int DOTypeNameCompare(const void *p1, const void *p2); +static int pgTypeNameCompare(Oid typid1, Oid typid2); +static int accessMethodNameCompare(Oid am1, Oid am2); static bool TopoSort(DumpableObject **objs, int numObjs, DumpableObject **ordering, @@ -228,12 +230,39 @@ DOTypeNameCompare(const void *p1, const void *p2) else if (obj2->namespace) return 1; - /* Sort by name */ + /* + * Sort by name. With a few exceptions, names here are single catalog + * columns. To get a fuller picture, grep pg_dump.c for "dobj.name = ". + * Names here don't match "Name:" in plain format output, which is a + * _tocEntry.tag. For example, DumpableObject.name of a constraint is + * pg_constraint.conname, but _tocEntry.tag of a constraint is relname and + * conname joined with a space. + */ cmpval = strcmp(obj1->name, obj2->name); if (cmpval != 0) return cmpval; - /* To have a stable sort order, break ties for some object types */ + /* + * Sort by type. This helps types that share a type priority without + * sharing a unique name constraint, e.g. opclass and opfamily. + */ + cmpval = obj1->objType - obj2->objType; + if (cmpval != 0) + return cmpval; + + /* + * To have a stable sort order, break ties for some object types. Most + * catalogs have a natural key, e.g. pg_proc_proname_args_nsp_index. Where + * the above "namespace" and "name" comparisons don't cover all natural + * key columns, compare the rest here. + * + * The natural key usually refers to other catalogs by surrogate keys. + * Hence, this translates each of those references to the natural key of + * the referenced catalog. That may descend through multiple levels of + * catalog references. For example, to sort by pg_proc.proargtypes, + * descend to each pg_type and then further to its pg_namespace, for an + * overall sort by (nspname, typname). + */ if (obj1->objType == DO_FUNC || obj1->objType == DO_AGG) { FuncInfo *fobj1 = *(FuncInfo *const *) p1; @@ -246,22 +275,10 @@ DOTypeNameCompare(const void *p1, const void *p2) return cmpval; for (i = 0; i < fobj1->nargs; i++) { - TypeInfo *argtype1 = findTypeByOid(fobj1->argtypes[i]); - TypeInfo *argtype2 = findTypeByOid(fobj2->argtypes[i]); - - if (argtype1 && argtype2) - { - if (argtype1->dobj.namespace && argtype2->dobj.namespace) - { - cmpval = strcmp(argtype1->dobj.namespace->dobj.name, - argtype2->dobj.namespace->dobj.name); - if (cmpval != 0) - return cmpval; - } - cmpval = strcmp(argtype1->dobj.name, argtype2->dobj.name); - if (cmpval != 0) - return cmpval; - } + cmpval = pgTypeNameCompare(fobj1->argtypes[i], + fobj2->argtypes[i]); + if (cmpval != 0) + return cmpval; } } else if (obj1->objType == DO_OPERATOR) @@ -273,6 +290,57 @@ DOTypeNameCompare(const void *p1, const void *p2) cmpval = (oobj2->oprkind - oobj1->oprkind); if (cmpval != 0) return cmpval; + /* Within an oprkind, sort by argument type names */ + cmpval = pgTypeNameCompare(oobj1->oprleft, oobj2->oprleft); + if (cmpval != 0) + return cmpval; + cmpval = pgTypeNameCompare(oobj1->oprright, oobj2->oprright); + if (cmpval != 0) + return cmpval; + } + else if (obj1->objType == DO_OPCLASS) + { + OpclassInfo *opcobj1 = *(OpclassInfo *const *) p1; + OpclassInfo *opcobj2 = *(OpclassInfo *const *) p2; + + /* Sort by access method name, per pg_opclass_am_name_nsp_index */ + cmpval = accessMethodNameCompare(opcobj1->opcmethod, + opcobj2->opcmethod); + if (cmpval != 0) + return cmpval; + } + else if (obj1->objType == DO_OPFAMILY) + { + OpfamilyInfo *opfobj1 = *(OpfamilyInfo *const *) p1; + OpfamilyInfo *opfobj2 = *(OpfamilyInfo *const *) p2; + + /* Sort by access method name, per pg_opfamily_am_name_nsp_index */ + cmpval = accessMethodNameCompare(opfobj1->opfmethod, + opfobj2->opfmethod); + if (cmpval != 0) + return cmpval; + } + else if (obj1->objType == DO_COLLATION) + { + CollInfo *cobj1 = *(CollInfo *const *) p1; + CollInfo *cobj2 = *(CollInfo *const *) p2; + + /* + * Sort by encoding, per pg_collation_name_enc_nsp_index. Technically, + * this is not necessary, because wherever this changes dump order, + * restoring the dump fails anyway. CREATE COLLATION can't create a + * tie for this to break, because it imposes restrictions to make + * (nspname, collname) uniquely identify a collation within a given + * DatabaseEncoding. While pg_import_system_collations() can create a + * tie, pg_dump+restore fails after + * pg_import_system_collations('my_schema') does so. However, there's + * little to gain by ignoring one natural key column on the basis of + * those limitations elsewhere, so respect the full natural key like + * we do for other object types. + */ + cmpval = cobj1->collencoding - cobj2->collencoding; + if (cmpval != 0) + return cmpval; } else if (obj1->objType == DO_ATTRDEF) { @@ -317,11 +385,156 @@ DOTypeNameCompare(const void *p1, const void *p2) if (cmpval != 0) return cmpval; } + else if (obj1->objType == DO_CONSTRAINT) + { + ConstraintInfo *robj1 = *(ConstraintInfo *const *) p1; + ConstraintInfo *robj2 = *(ConstraintInfo *const *) p2; + + /* + * Sort domain constraints before table constraints, for consistency + * with our decision to sort CREATE DOMAIN before CREATE TABLE. + */ + if (robj1->condomain) + { + if (robj2->condomain) + { + /* Sort by domain name (domain namespace was considered) */ + cmpval = strcmp(robj1->condomain->dobj.name, + robj2->condomain->dobj.name); + if (cmpval != 0) + return cmpval; + } + else + return PRIO_TYPE - PRIO_TABLE; + } + else if (robj2->condomain) + return PRIO_TABLE - PRIO_TYPE; + else + { + /* Sort by table name (table namespace was considered already) */ + cmpval = strcmp(robj1->contable->dobj.name, + robj2->contable->dobj.name); + if (cmpval != 0) + return cmpval; + } + } + else if (obj1->objType == DO_DEFAULT_ACL) + { + DefaultACLInfo *daclobj1 = *(DefaultACLInfo *const *) p1; + DefaultACLInfo *daclobj2 = *(DefaultACLInfo *const *) p2; + + /* + * Sort by defaclrole, per pg_default_acl_role_nsp_obj_index. The + * (namespace, name) match (defaclnamespace, defaclobjtype). + */ + cmpval = strcmp(daclobj1->defaclrole, daclobj2->defaclrole); + if (cmpval != 0) + return cmpval; + } + else if (obj1->objType == DO_PUBLICATION_REL) + { + PublicationRelInfo *probj1 = *(PublicationRelInfo *const *) p1; + PublicationRelInfo *probj2 = *(PublicationRelInfo *const *) p2; - /* Usually shouldn't get here, but if we do, sort by OID */ + /* Sort by publication name, since (namespace, name) match the rel */ + cmpval = strcmp(probj1->publication->dobj.name, + probj2->publication->dobj.name); + if (cmpval != 0) + return cmpval; + } + else if (obj1->objType == DO_PUBLICATION_TABLE_IN_SCHEMA) + { + PublicationSchemaInfo *psobj1 = *(PublicationSchemaInfo *const *) p1; + PublicationSchemaInfo *psobj2 = *(PublicationSchemaInfo *const *) p2; + + /* Sort by publication name, since ->name is just nspname */ + cmpval = strcmp(psobj1->publication->dobj.name, + psobj2->publication->dobj.name); + if (cmpval != 0) + return cmpval; + } + + /* + * Shouldn't get here except after catalog corruption, but if we do, sort + * by OID. This may make logically-identical databases differ in the + * order of objects in dump output. Users will get spurious schema diffs. + * Expect flaky failures of 002_pg_upgrade.pl test 'dump outputs from + * original and restored regression databases match' if the regression + * database contains objects allowing that test to reach here. That's a + * consequence of the test using "pg_restore -j", which doesn't fully + * constrain OID assignment order. + */ + Assert(false); return oidcmp(obj1->catId.oid, obj2->catId.oid); } +/* Compare two OID-identified pg_type values by nspname, then by typname. */ +static int +pgTypeNameCompare(Oid typid1, Oid typid2) +{ + TypeInfo *typobj1; + TypeInfo *typobj2; + int cmpval; + + if (typid1 == typid2) + return 0; + + typobj1 = findTypeByOid(typid1); + typobj2 = findTypeByOid(typid2); + + if (!typobj1 || !typobj2) + { + /* + * getTypes() didn't find some OID. Assume catalog corruption, e.g. + * an oprright value without the corresponding OID in a pg_type row. + * Report as "equal", so the caller uses the next available basis for + * comparison, e.g. the next function argument. + * + * Unary operators have InvalidOid in oprleft (if oprkind='r') or in + * oprright (if oprkind='l'). Caller already sorted by oprkind, + * calling us only for like-kind operators. Hence, "typid1 == typid2" + * took care of InvalidOid. (v14 removed postfix operator support. + * Hence, when dumping from v14+, only oprleft can be InvalidOid.) + */ + Assert(false); + return 0; + } + + if (!typobj1->dobj.namespace || !typobj2->dobj.namespace) + Assert(false); /* catalog corruption */ + else + { + cmpval = strcmp(typobj1->dobj.namespace->dobj.name, + typobj2->dobj.namespace->dobj.name); + if (cmpval != 0) + return cmpval; + } + return strcmp(typobj1->dobj.name, typobj2->dobj.name); +} + +/* Compare two OID-identified pg_am values by amname. */ +static int +accessMethodNameCompare(Oid am1, Oid am2) +{ + AccessMethodInfo *amobj1; + AccessMethodInfo *amobj2; + + if (am1 == am2) + return 0; + + amobj1 = findAccessMethodByOid(am1); + amobj2 = findAccessMethodByOid(am2); + + if (!amobj1 || !amobj2) + { + /* catalog corruption: handle like pgTypeNameCompare() does */ + Assert(false); + return 0; + } + + return strcmp(amobj1->dobj.name, amobj2->dobj.name); +} + /* * Sort the given objects into a safe dump order using dependency diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 100317b1aa949..bb451c1bae144 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -65,10 +65,9 @@ static void dropTablespaces(PGconn *conn); static void dumpTablespaces(PGconn *conn); static void dropDBs(PGconn *conn); static void dumpUserConfig(PGconn *conn, const char *username); -static void dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat); +static void dumpDatabases(PGconn *conn); static void dumpTimestamp(const char *msg); -static int runPgDump(const char *dbname, const char *create_opts, - char *dbfile, ArchiveFormat archDumpFormat); +static int runPgDump(const char *dbname, const char *create_opts); static void buildShSecLabels(PGconn *conn, const char *catalog_name, Oid objectId, const char *objtype, const char *objname, @@ -77,7 +76,6 @@ static void executeCommand(PGconn *conn, const char *query); static void expand_dbname_patterns(PGconn *conn, SimpleStringList *patterns, SimpleStringList *names); static void read_dumpall_filters(const char *filename, SimpleStringList *pattern); -static ArchiveFormat parseDumpFormat(const char *format); static char pg_dump_bin[MAXPGPATH]; static PQExpBuffer pgdumpopts; @@ -107,8 +105,6 @@ static int no_subscriptions = 0; static int no_toast_compression = 0; static int no_unlogged_table_data = 0; static int no_role_passwords = 0; -static int with_data = 0; -static int with_schema = 0; static int with_statistics = 0; static int server_version; static int load_via_partition_root = 0; @@ -126,6 +122,8 @@ static char *filename = NULL; static SimpleStringList database_exclude_patterns = {NULL, NULL}; static SimpleStringList database_exclude_names = {NULL, NULL}; +static char *restrict_key; + int main(int argc, char *argv[]) { @@ -150,7 +148,6 @@ main(int argc, char *argv[]) {"password", no_argument, NULL, 'W'}, {"no-privileges", no_argument, NULL, 'x'}, {"no-acl", no_argument, NULL, 'x'}, - {"format", required_argument, NULL, 'F'}, /* * the following options don't have an equivalent short option letter @@ -183,14 +180,13 @@ main(int argc, char *argv[]) {"no-sync", no_argument, NULL, 4}, {"no-toast-compression", no_argument, &no_toast_compression, 1}, {"no-unlogged-table-data", no_argument, &no_unlogged_table_data, 1}, - {"with-data", no_argument, &with_data, 1}, - {"with-schema", no_argument, &with_schema, 1}, - {"with-statistics", no_argument, &with_statistics, 1}, {"on-conflict-do-nothing", no_argument, &on_conflict_do_nothing, 1}, {"rows-per-insert", required_argument, NULL, 7}, + {"statistics", no_argument, &with_statistics, 1}, {"statistics-only", no_argument, &statistics_only, 1}, {"filter", required_argument, NULL, 8}, {"sequence-data", no_argument, &sequence_data, 1}, + {"restrict-key", required_argument, NULL, 9}, {NULL, 0, NULL, 0} }; @@ -201,8 +197,6 @@ main(int argc, char *argv[]) char *pgdb = NULL; char *use_role = NULL; const char *dumpencoding = NULL; - ArchiveFormat archDumpFormat = archNull; - const char *formatName = "p"; trivalue prompt_password = TRI_DEFAULT; bool data_only = false; bool globals_only = false; @@ -252,7 +246,7 @@ main(int argc, char *argv[]) pgdumpopts = createPQExpBuffer(); - while ((c = getopt_long(argc, argv, "acd:E:f:F:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1) + while ((c = getopt_long(argc, argv, "acd:E:f:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1) { switch (c) { @@ -280,9 +274,7 @@ main(int argc, char *argv[]) appendPQExpBufferStr(pgdumpopts, " -f "); appendShellString(pgdumpopts, filename); break; - case 'F': - formatName = pg_strdup(optarg); - break; + case 'g': globals_only = true; break; @@ -382,6 +374,12 @@ main(int argc, char *argv[]) read_dumpall_filters(optarg, &database_exclude_patterns); break; + case 9: + restrict_key = pg_strdup(optarg); + appendPQExpBufferStr(pgdumpopts, " --restrict-key "); + appendShellString(pgdumpopts, optarg); + break; + default: /* getopt_long already emitted a complaint */ pg_log_error_hint("Try \"%s --help\" for more information.", progname); @@ -431,21 +429,6 @@ main(int argc, char *argv[]) exit_nicely(1); } - /* Get format for dump. */ - archDumpFormat = parseDumpFormat(formatName); - - /* - * If a non-plain format is specified, a file name is also required as the - * path to the main directory. - */ - if (archDumpFormat != archNull && - (!filename || strcmp(filename, "") == 0)) - { - pg_log_error("option -F/--format=d|c|t requires option -f/--file"); - pg_log_error_hint("Try \"%s --help\" for more information.", progname); - exit_nicely(1); - } - /* * If password values are not required in the dump, switch to using * pg_roles which is equally useful, just more likely to have unrestricted @@ -497,12 +480,8 @@ main(int argc, char *argv[]) appendPQExpBufferStr(pgdumpopts, " --no-toast-compression"); if (no_unlogged_table_data) appendPQExpBufferStr(pgdumpopts, " --no-unlogged-table-data"); - if (with_data) - appendPQExpBufferStr(pgdumpopts, " --with-data"); - if (with_schema) - appendPQExpBufferStr(pgdumpopts, " --with-schema"); if (with_statistics) - appendPQExpBufferStr(pgdumpopts, " --with-statistics"); + appendPQExpBufferStr(pgdumpopts, " --statistics"); if (on_conflict_do_nothing) appendPQExpBufferStr(pgdumpopts, " --on-conflict-do-nothing"); if (statistics_only) @@ -511,31 +490,14 @@ main(int argc, char *argv[]) appendPQExpBufferStr(pgdumpopts, " --sequence-data"); /* - * Open the output file if required, otherwise use stdout. If required, - * then create new directory and global.dat file. + * If you don't provide a restrict key, one will be appointed for you. */ - if (archDumpFormat != archNull) - { - char global_path[MAXPGPATH]; - - /* Create new directory or accept the empty existing directory. */ - create_or_open_dir(filename); - - snprintf(global_path, MAXPGPATH, "%s/global.dat", filename); - - OPF = fopen(global_path, PG_BINARY_W); - if (!OPF) - pg_fatal("could not open file \"%s\": %m", global_path); - } - else if (filename) - { - OPF = fopen(filename, PG_BINARY_W); - if (!OPF) - pg_fatal("could not open output file \"%s\": %m", - filename); - } - else - OPF = stdout; + if (!restrict_key) + restrict_key = generate_restrict_key(); + if (!restrict_key) + pg_fatal("could not generate restrict key"); + if (!valid_restrict_key(restrict_key)) + pg_fatal("invalid restrict key"); /* * If there was a database specified on the command line, use that, @@ -576,6 +538,19 @@ main(int argc, char *argv[]) expand_dbname_patterns(conn, &database_exclude_patterns, &database_exclude_names); + /* + * Open the output file if required, otherwise use stdout + */ + if (filename) + { + OPF = fopen(filename, PG_BINARY_W); + if (!OPF) + pg_fatal("could not open output file \"%s\": %m", + filename); + } + else + OPF = stdout; + /* * Set the client encoding if requested. */ @@ -614,6 +589,16 @@ main(int argc, char *argv[]) if (verbose) dumpTimestamp("Started on"); + /* + * Enter restricted mode to block any unexpected psql meta-commands. A + * malicious source might try to inject a variety of things via bogus + * responses to queries. While we cannot prevent such sources from + * affecting the destination at restore time, we can block psql + * meta-commands so that the client machine that runs psql with the dump + * output remains unaffected. + */ + fprintf(OPF, "\\restrict %s\n\n", restrict_key); + /* * We used to emit \connect postgres here, but that served no purpose * other than to break things for installations without a postgres @@ -674,8 +659,14 @@ main(int argc, char *argv[]) dumpTablespaces(conn); } + /* + * Exit restricted mode just before dumping the databases. pg_dump will + * handle entering restricted mode again as appropriate. + */ + fprintf(OPF, "\\unrestrict %s\n\n", restrict_key); + if (!globals_only && !roles_only && !tablespaces_only) - dumpDatabases(conn, archDumpFormat); + dumpDatabases(conn); PQfinish(conn); @@ -688,7 +679,7 @@ main(int argc, char *argv[]) fclose(OPF); /* sync the resulting file, errors are not fatal */ - if (dosync && (archDumpFormat == archNull)) + if (dosync) (void) fsync_fname(filename, false); } @@ -699,14 +690,12 @@ main(int argc, char *argv[]) static void help(void) { - printf(_("%s exports a PostgreSQL database cluster as an SQL script or to other formats.\n\n"), progname); + printf(_("%s exports a PostgreSQL database cluster as an SQL script.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]...\n"), progname); printf(_("\nGeneral options:\n")); printf(_(" -f, --file=FILENAME output file name\n")); - printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n" - " plain text (default))\n")); printf(_(" -v, --verbose verbose mode\n")); printf(_(" -V, --version output version information, then exit\n")); printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n")); @@ -748,15 +737,14 @@ help(void) printf(_(" --no-unlogged-table-data do not dump unlogged table data\n")); printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n")); printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n")); + printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n")); printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n")); printf(_(" --sequence-data include sequence data in dump\n")); + printf(_(" --statistics dump the statistics\n")); printf(_(" --statistics-only dump only the statistics, not schema or data\n")); printf(_(" --use-set-session-authorization\n" " use SET SESSION AUTHORIZATION commands instead of\n" " ALTER OWNER commands to set ownership\n")); - printf(_(" --with-data dump the data\n")); - printf(_(" --with-schema dump the schema\n")); - printf(_(" --with-statistics dump the statistics\n")); printf(_("\nConnection options:\n")); printf(_(" -d, --dbname=CONNSTR connect using connection string\n")); @@ -1013,6 +1001,9 @@ dumpRoles(PGconn *conn) * We do it this way because config settings for roles could mention the * names of other roles. */ + if (PQntuples(res) > 0) + fprintf(OPF, "\n--\n-- User Configurations\n--\n"); + for (i = 0; i < PQntuples(res); i++) dumpUserConfig(conn, PQgetvalue(res, i, i_rolname)); @@ -1526,7 +1517,6 @@ dumpUserConfig(PGconn *conn, const char *username) { PQExpBuffer buf = createPQExpBuffer(); PGresult *res; - static bool header_done = false; printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting " "WHERE setdatabase = 0 AND setrole = " @@ -1539,11 +1529,11 @@ dumpUserConfig(PGconn *conn, const char *username) if (PQntuples(res) > 0) { - if (!header_done) - fprintf(OPF, "\n--\n-- User Configurations\n--\n"); - header_done = true; + char *sanitized; - fprintf(OPF, "\n--\n-- User Config \"%s\"\n--\n\n", username); + sanitized = sanitize_line(username, true); + fprintf(OPF, "\n--\n-- User Config \"%s\"\n--\n\n", sanitized); + free(sanitized); } for (int i = 0; i < PQntuples(res); i++) @@ -1618,13 +1608,10 @@ expand_dbname_patterns(PGconn *conn, * Dump contents of databases. */ static void -dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) +dumpDatabases(PGconn *conn) { PGresult *res; int i; - char db_subdir[MAXPGPATH]; - char dbfilepath[MAXPGPATH]; - FILE *map_file = NULL; /* * Skip databases marked not datallowconn, since we'd be unable to connect @@ -1638,42 +1625,19 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) * doesn't have some failure mode with --clean. */ res = executeQuery(conn, - "SELECT datname, oid " + "SELECT datname " "FROM pg_database d " "WHERE datallowconn AND datconnlimit != -2 " "ORDER BY (datname <> 'template1'), datname"); - if (archDumpFormat == archNull && PQntuples(res) > 0) + if (PQntuples(res) > 0) fprintf(OPF, "--\n-- Databases\n--\n\n"); - /* - * If directory/tar/custom format is specified, create a subdirectory - * under the main directory and each database dump file or subdirectory - * will be created in that subdirectory by pg_dump. - */ - if (archDumpFormat != archNull) - { - char map_file_path[MAXPGPATH]; - - snprintf(db_subdir, MAXPGPATH, "%s/databases", filename); - - /* Create a subdirectory with 'databases' name under main directory. */ - if (mkdir(db_subdir, pg_dir_create_mode) != 0) - pg_fatal("could not create directory \"%s\": %m", db_subdir); - - snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename); - - /* Create a map file (to store dboid and dbname) */ - map_file = fopen(map_file_path, PG_BINARY_W); - if (!map_file) - pg_fatal("could not open file \"%s\": %m", map_file_path); - } - for (i = 0; i < PQntuples(res); i++) { char *dbname = PQgetvalue(res, i, 0); - char *oid = PQgetvalue(res, i, 1); - const char *create_opts = ""; + char *sanitized; + const char *create_opts; int ret; /* Skip template0, even if it's not marked !datallowconn. */ @@ -1687,27 +1651,11 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) continue; } - /* - * If this is not a plain format dump, then append dboid and dbname to - * the map.dat file. - */ - if (archDumpFormat != archNull) - { - if (archDumpFormat == archCustom) - snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".dmp", db_subdir, oid); - else if (archDumpFormat == archTar) - snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".tar", db_subdir, oid); - else - snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\"", db_subdir, oid); - - /* Put one line entry for dboid and dbname in map file. */ - fprintf(map_file, "%s %s\n", oid, dbname); - } - pg_log_info("dumping database \"%s\"", dbname); - if (archDumpFormat == archNull) - fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname); + sanitized = sanitize_line(dbname, true); + fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", sanitized); + free(sanitized); /* * We assume that "template1" and "postgres" already exist in the @@ -1721,9 +1669,12 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) { if (output_clean) create_opts = "--clean --create"; - /* Since pg_dump won't emit a \connect command, we must */ - else if (archDumpFormat == archNull) + else + { + create_opts = ""; + /* Since pg_dump won't emit a \connect command, we must */ fprintf(OPF, "\\connect %s\n\n", dbname); + } } else create_opts = "--create"; @@ -1731,30 +1682,19 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) if (filename) fclose(OPF); - ret = runPgDump(dbname, create_opts, dbfilepath, archDumpFormat); + ret = runPgDump(dbname, create_opts); if (ret != 0) pg_fatal("pg_dump failed on database \"%s\", exiting", dbname); if (filename) { - char global_path[MAXPGPATH]; - - if (archDumpFormat != archNull) - snprintf(global_path, MAXPGPATH, "%s/global.dat", filename); - else - snprintf(global_path, MAXPGPATH, "%s", filename); - - OPF = fopen(global_path, PG_BINARY_A); + OPF = fopen(filename, PG_BINARY_A); if (!OPF) pg_fatal("could not re-open the output file \"%s\": %m", - global_path); + filename); } } - /* Close map file */ - if (archDumpFormat != archNull) - fclose(map_file); - PQclear(res); } @@ -1764,8 +1704,7 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) * Run pg_dump on dbname, with specified options. */ static int -runPgDump(const char *dbname, const char *create_opts, char *dbfile, - ArchiveFormat archDumpFormat) +runPgDump(const char *dbname, const char *create_opts) { PQExpBufferData connstrbuf; PQExpBufferData cmd; @@ -1774,36 +1713,17 @@ runPgDump(const char *dbname, const char *create_opts, char *dbfile, initPQExpBuffer(&connstrbuf); initPQExpBuffer(&cmd); + printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin, + pgdumpopts->data, create_opts); + /* - * If this is not a plain format dump, then append file name and dump - * format to the pg_dump command to get archive dump. + * If we have a filename, use the undocumented plain-append pg_dump + * format. */ - if (archDumpFormat != archNull) - { - printfPQExpBuffer(&cmd, "\"%s\" -f %s %s", pg_dump_bin, - dbfile, create_opts); - - if (archDumpFormat == archDirectory) - appendPQExpBufferStr(&cmd, " --format=directory "); - else if (archDumpFormat == archCustom) - appendPQExpBufferStr(&cmd, " --format=custom "); - else if (archDumpFormat == archTar) - appendPQExpBufferStr(&cmd, " --format=tar "); - } + if (filename) + appendPQExpBufferStr(&cmd, " -Fa "); else - { - printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin, - pgdumpopts->data, create_opts); - - /* - * If we have a filename, use the undocumented plain-append pg_dump - * format. - */ - if (filename) - appendPQExpBufferStr(&cmd, " -Fa "); - else - appendPQExpBufferStr(&cmd, " -Fp "); - } + appendPQExpBufferStr(&cmd, " -Fp "); /* * Append the database name to the already-constructed stem of connection @@ -1948,36 +1868,3 @@ read_dumpall_filters(const char *filename, SimpleStringList *pattern) filter_free(&fstate); } - -/* - * parseDumpFormat - * - * This will validate dump formats. - */ -static ArchiveFormat -parseDumpFormat(const char *format) -{ - ArchiveFormat archDumpFormat; - - if (pg_strcasecmp(format, "c") == 0) - archDumpFormat = archCustom; - else if (pg_strcasecmp(format, "custom") == 0) - archDumpFormat = archCustom; - else if (pg_strcasecmp(format, "d") == 0) - archDumpFormat = archDirectory; - else if (pg_strcasecmp(format, "directory") == 0) - archDumpFormat = archDirectory; - else if (pg_strcasecmp(format, "p") == 0) - archDumpFormat = archNull; - else if (pg_strcasecmp(format, "plain") == 0) - archDumpFormat = archNull; - else if (pg_strcasecmp(format, "t") == 0) - archDumpFormat = archTar; - else if (pg_strcasecmp(format, "tar") == 0) - archDumpFormat = archTar; - else - pg_fatal("unrecognized output format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"", - format); - - return archDumpFormat; -} diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c index 6ef789cb06d63..c9776306c5c42 100644 --- a/src/bin/pg_dump/pg_restore.c +++ b/src/bin/pg_dump/pg_restore.c @@ -2,7 +2,7 @@ * * pg_restore.c * pg_restore is an utility extracting postgres database definitions - * from a backup archive created by pg_dump/pg_dumpall using the archiver + * from a backup archive created by pg_dump using the archiver * interface. * * pg_restore will read the backup archive and @@ -41,15 +41,12 @@ #include "postgres_fe.h" #include -#include #ifdef HAVE_TERMIOS_H #include #endif -#include "common/string.h" -#include "connectdb.h" +#include "dumputils.h" #include "fe_utils/option_utils.h" -#include "fe_utils/string_utils.h" #include "filter.h" #include "getopt_long.h" #include "parallel.h" @@ -57,43 +54,18 @@ static void usage(const char *progname); static void read_restore_filters(const char *filename, RestoreOptions *opts); -static bool file_exists_in_directory(const char *dir, const char *filename); -static int restore_one_database(const char *inputFileSpec, RestoreOptions *opts, - int numWorkers, bool append_data, int num); -static int read_one_statement(StringInfo inBuf, FILE *pfile); -static int restore_all_databases(PGconn *conn, const char *dumpdirpath, - SimpleStringList db_exclude_patterns, RestoreOptions *opts, int numWorkers); -static int process_global_sql_commands(PGconn *conn, const char *dumpdirpath, - const char *outfile); -static void copy_or_print_global_file(const char *outfile, FILE *pfile); -static int get_dbnames_list_to_restore(PGconn *conn, - SimplePtrList *dbname_oid_list, - SimpleStringList db_exclude_patterns); -static int get_dbname_oid_list_from_mfile(const char *dumpdirpath, - SimplePtrList *dbname_oid_list); - -/* - * Stores a database OID and the corresponding name. - */ -typedef struct DbOidName -{ - Oid oid; - char str[FLEXIBLE_ARRAY_MEMBER]; /* null-terminated string here */ -} DbOidName; - int main(int argc, char **argv) { RestoreOptions *opts; int c; + int exit_code; int numWorkers = 1; + Archive *AH; char *inputFileSpec; bool data_only = false; bool schema_only = false; - int n_errors = 0; - bool globals_only = false; - SimpleStringList db_exclude_patterns = {NULL, NULL}; static int disable_triggers = 0; static int enable_row_security = 0; static int if_exists = 0; @@ -111,15 +83,12 @@ main(int argc, char **argv) static int no_subscriptions = 0; static int strict_names = 0; static int statistics_only = 0; - static int with_data = 0; - static int with_schema = 0; static int with_statistics = 0; struct option cmdopts[] = { {"clean", 0, NULL, 'c'}, {"create", 0, NULL, 'C'}, {"data-only", 0, NULL, 'a'}, - {"globals-only", 0, NULL, 'g'}, {"dbname", 1, NULL, 'd'}, {"exit-on-error", 0, NULL, 'e'}, {"exclude-schema", 1, NULL, 'N'}, @@ -169,12 +138,10 @@ main(int argc, char **argv) {"no-security-labels", no_argument, &no_security_labels, 1}, {"no-subscriptions", no_argument, &no_subscriptions, 1}, {"no-statistics", no_argument, &no_statistics, 1}, - {"with-data", no_argument, &with_data, 1}, - {"with-schema", no_argument, &with_schema, 1}, - {"with-statistics", no_argument, &with_statistics, 1}, + {"statistics", no_argument, &with_statistics, 1}, {"statistics-only", no_argument, &statistics_only, 1}, {"filter", required_argument, NULL, 4}, - {"exclude-database", required_argument, NULL, 6}, + {"restrict-key", required_argument, NULL, 6}, {NULL, 0, NULL, 0} }; @@ -203,7 +170,7 @@ main(int argc, char **argv) } } - while ((c = getopt_long(argc, argv, "acCd:ef:F:gh:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1", + while ((c = getopt_long(argc, argv, "acCd:ef:F:h:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1", cmdopts, NULL)) != -1) { switch (c) @@ -230,14 +197,11 @@ main(int argc, char **argv) if (strlen(optarg) != 0) opts->formatName = pg_strdup(optarg); break; - case 'g': - /* restore only global.dat file from directory */ - globals_only = true; - break; case 'h': if (strlen(optarg) != 0) opts->cparams.pghost = pg_strdup(optarg); break; + case 'j': /* number of restore jobs */ if (!option_parse_int(optarg, "-j/--jobs", 1, PG_MAX_JOBS, @@ -352,8 +316,9 @@ main(int argc, char **argv) exit(1); opts->exit_on_error = true; break; - case 6: /* database patterns to skip */ - simple_string_list_append(&db_exclude_patterns, optarg); + + case 6: + opts->restrict_key = pg_strdup(optarg); break; default: @@ -382,13 +347,6 @@ main(int argc, char **argv) if (!opts->cparams.dbname && !opts->filename && !opts->tocSummary) pg_fatal("one of -d/--dbname and -f/--file must be specified"); - if (db_exclude_patterns.head != NULL && globals_only) - { - pg_log_error("option --exclude-database cannot be used together with -g/--globals-only"); - pg_log_error_hint("Try \"%s --help\" for more information.", progname); - exit_nicely(1); - } - /* Should get at most one of -d and -f, else user is confused */ if (opts->cparams.dbname) { @@ -398,8 +356,24 @@ main(int argc, char **argv) pg_log_error_hint("Try \"%s --help\" for more information.", progname); exit_nicely(1); } + + if (opts->restrict_key) + pg_fatal("options -d/--dbname and --restrict-key cannot be used together"); + opts->useDB = 1; } + else + { + /* + * If you don't provide a restrict key, one will be appointed for you. + */ + if (!opts->restrict_key) + opts->restrict_key = generate_restrict_key(); + if (!opts->restrict_key) + pg_fatal("could not generate restrict key"); + if (!valid_restrict_key(opts->restrict_key)) + pg_fatal("invalid restrict key"); + } /* reject conflicting "-only" options */ if (data_only && schema_only) @@ -417,13 +391,17 @@ main(int argc, char **argv) if (statistics_only && no_statistics) pg_fatal("options --statistics-only and --no-statistics cannot be used together"); - /* reject conflicting "with-" and "no-" options */ - if (with_data && no_data) - pg_fatal("options --with-data and --no-data cannot be used together"); - if (with_schema && no_schema) - pg_fatal("options --with-schema and --no-schema cannot be used together"); + /* reject conflicting "no-" options */ if (with_statistics && no_statistics) - pg_fatal("options --with-statistics and --no-statistics cannot be used together"); + pg_fatal("options --statistics and --no-statistics cannot be used together"); + + /* reject conflicting "only-" options */ + if (data_only && with_statistics) + pg_fatal("options %s and %s cannot be used together", + "-a/--data-only", "--statistics"); + if (schema_only && with_statistics) + pg_fatal("options %s and %s cannot be used together", + "-s/--schema-only", "--statistics"); if (data_only && opts->dropSchema) pg_fatal("options -c/--clean and -a/--data-only cannot be used together"); @@ -443,16 +421,14 @@ main(int argc, char **argv) pg_fatal("cannot specify both --single-transaction and multiple jobs"); /* - * Set derivative flags. An "-only" option may be overridden by an - * explicit "with-" option; e.g. "--schema-only --with-statistics" will - * include schema and statistics. Other ambiguous or nonsensical - * combinations, e.g. "--schema-only --no-schema", will have already - * caused an error in one of the checks above. + * Set derivative flags. Ambiguous or nonsensical combinations, e.g. + * "--schema-only --no-schema", will have already caused an error in one + * of the checks above. */ opts->dumpData = ((opts->dumpData && !schema_only && !statistics_only) || - (data_only || with_data)) && !no_data; + data_only) && !no_data; opts->dumpSchema = ((opts->dumpSchema && !data_only && !statistics_only) || - (schema_only || with_schema)) && !no_schema; + schema_only) && !no_schema; opts->dumpStatistics = ((opts->dumpStatistics && !schema_only && !data_only) || (statistics_only || with_statistics)) && !no_statistics; @@ -496,114 +472,6 @@ main(int argc, char **argv) opts->formatName); } - /* - * If toc.dat file is not present in the current path, then check for - * global.dat. If global.dat file is present, then restore all the - * databases from map.dat (if it exists), but skip restoring those - * matching --exclude-database patterns. - */ - if (inputFileSpec != NULL && !file_exists_in_directory(inputFileSpec, "toc.dat") && - file_exists_in_directory(inputFileSpec, "global.dat")) - { - PGconn *conn = NULL; /* Connection to restore global sql - * commands. */ - - /* - * Can only use --list or --use-list options with a single database - * dump. - */ - if (opts->tocSummary) - pg_fatal("option -l/--list cannot be used when restoring an archive created by pg_dumpall"); - else if (opts->tocFile) - pg_fatal("option -L/--use-list cannot be used when restoring an archive created by pg_dumpall"); - - /* - * To restore from a pg_dumpall archive, -C (create database) option - * must be specified unless we are only restoring globals. - */ - if (!globals_only && opts->createDB != 1) - { - pg_log_error("option -C/--create must be specified when restoring an archive created by pg_dumpall"); - pg_log_error_hint("Try \"%s --help\" for more information.", progname); - pg_log_error_hint("Individual databases can be restored using their specific archives."); - exit_nicely(1); - } - - /* - * Connect to the database to execute global sql commands from - * global.dat file. - */ - if (opts->cparams.dbname) - { - conn = ConnectDatabase(opts->cparams.dbname, NULL, opts->cparams.pghost, - opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, - false, progname, NULL, NULL, NULL, NULL); - - - if (!conn) - pg_fatal("could not connect to database \"%s\"", opts->cparams.dbname); - } - - /* If globals-only, then return from here. */ - if (globals_only) - { - /* - * Open global.dat file and execute/append all the global sql - * commands. - */ - n_errors = process_global_sql_commands(conn, inputFileSpec, - opts->filename); - - if (conn) - PQfinish(conn); - - pg_log_info("database restoring skipped because option -g/--globals-only was specified"); - } - else - { - /* Now restore all the databases from map.dat */ - n_errors = restore_all_databases(conn, inputFileSpec, db_exclude_patterns, - opts, numWorkers); - } - - /* Free db pattern list. */ - simple_string_list_destroy(&db_exclude_patterns); - } - else /* process if global.dat file does not exist. */ - { - if (db_exclude_patterns.head != NULL) - pg_fatal("option --exclude-database can be used only when restoring an archive created by pg_dumpall"); - - if (globals_only) - pg_fatal("option -g/--globals-only can be used only when restoring an archive created by pg_dumpall"); - - n_errors = restore_one_database(inputFileSpec, opts, numWorkers, false, 0); - } - - /* Done, print a summary of ignored errors during restore. */ - if (n_errors) - { - pg_log_warning("errors ignored on restore: %d", n_errors); - return 1; - } - - return 0; -} - -/* - * restore_one_database - * - * This will restore one database using toc.dat file. - * - * returns the number of errors while doing restore. - */ -static int -restore_one_database(const char *inputFileSpec, RestoreOptions *opts, - int numWorkers, bool append_data, int num) -{ - Archive *AH; - int n_errors; - AH = OpenArchive(inputFileSpec, opts->format); SetArchiveOptions(AH, NULL, opts); @@ -611,15 +479,9 @@ restore_one_database(const char *inputFileSpec, RestoreOptions *opts, /* * We don't have a connection yet but that doesn't matter. The connection * is initialized to NULL and if we terminate through exit_nicely() while - * it's still NULL, the cleanup function will just be a no-op. If we are - * restoring multiple databases, then only update AX handle for cleanup as - * the previous entry was already in the array and we had closed previous - * connection, so we can use the same array slot. + * it's still NULL, the cleanup function will just be a no-op. */ - if (!append_data || num == 0) - on_exit_close_archive(AH); - else - replace_on_exit_close_archive(AH); + on_exit_close_archive(AH); /* Let the archiver know how noisy to be */ AH->verbose = opts->verbose; @@ -639,21 +501,25 @@ restore_one_database(const char *inputFileSpec, RestoreOptions *opts, else { ProcessArchiveRestoreOptions(AH); - RestoreArchive(AH, append_data); + RestoreArchive(AH); } - n_errors = AH->n_errors; + /* done, print a summary of ignored errors */ + if (AH->n_errors) + pg_log_warning("errors ignored on restore: %d", AH->n_errors); /* AH may be freed in CloseArchive? */ + exit_code = AH->n_errors ? 1 : 0; + CloseArchive(AH); - return n_errors; + return exit_code; } static void usage(const char *progname) { - printf(_("%s restores PostgreSQL databases from archives created by pg_dump or pg_dumpall.\n\n"), progname); + printf(_("%s restores a PostgreSQL database from an archive created by pg_dump.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]... [FILE]\n"), progname); @@ -671,7 +537,6 @@ usage(const char *progname) printf(_(" -c, --clean clean (drop) database objects before recreating\n")); printf(_(" -C, --create create the target database\n")); printf(_(" -e, --exit-on-error exit on error, default is to continue\n")); - printf(_(" -g, --globals-only restore only global objects, no databases\n")); printf(_(" -I, --index=NAME restore named index\n")); printf(_(" -j, --jobs=NUM use this many parallel jobs to restore\n")); printf(_(" -L, --use-list=FILENAME use table of contents from this file for\n" @@ -688,7 +553,6 @@ usage(const char *progname) printf(_(" -1, --single-transaction restore as a single transaction\n")); printf(_(" --disable-triggers disable triggers during data-only restore\n")); printf(_(" --enable-row-security enable row security\n")); - printf(_(" --exclude-database=PATTERN do not restore the specified database(s)\n")); printf(_(" --filter=FILENAME restore or skip objects based on expressions\n" " in FILENAME\n")); printf(_(" --if-exists use IF EXISTS when dropping objects\n")); @@ -704,7 +568,9 @@ usage(const char *progname) printf(_(" --no-subscriptions do not restore subscriptions\n")); printf(_(" --no-table-access-method do not restore table access methods\n")); printf(_(" --no-tablespaces do not restore tablespace assignments\n")); + printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n")); printf(_(" --section=SECTION restore named section (pre-data, data, or post-data)\n")); + printf(_(" --statistics restore the statistics\n")); printf(_(" --statistics-only restore only the statistics, not schema or data\n")); printf(_(" --strict-names require table and/or schema include patterns to\n" " match at least one entity each\n")); @@ -712,9 +578,6 @@ usage(const char *progname) printf(_(" --use-set-session-authorization\n" " use SET SESSION AUTHORIZATION commands instead of\n" " ALTER OWNER commands to set ownership\n")); - printf(_(" --with-data restore the data\n")); - printf(_(" --with-schema restore the schema\n")); - printf(_(" --with-statistics restore the statistics\n")); printf(_("\nConnection options:\n")); printf(_(" -h, --host=HOSTNAME database server host or socket directory\n")); @@ -725,8 +588,8 @@ usage(const char *progname) printf(_(" --role=ROLENAME do SET ROLE before restore\n")); printf(_("\n" - "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be\n" - "combined and specified multiple times to select multiple objects.\n")); + "The options -I, -n, -N, -P, -t, -T, and --section can be combined and specified\n" + "multiple times to select multiple objects.\n")); printf(_("\nIf no input file name is supplied, then standard input is used.\n\n")); printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT); printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL); @@ -831,585 +694,3 @@ read_restore_filters(const char *filename, RestoreOptions *opts) filter_free(&fstate); } - -/* - * file_exists_in_directory - * - * Returns true if the file exists in the given directory. - */ -static bool -file_exists_in_directory(const char *dir, const char *filename) -{ - struct stat st; - char buf[MAXPGPATH]; - - if (snprintf(buf, MAXPGPATH, "%s/%s", dir, filename) >= MAXPGPATH) - pg_fatal("directory name too long: \"%s\"", dir); - - return (stat(buf, &st) == 0 && S_ISREG(st.st_mode)); -} - -/* - * read_one_statement - * - * This will start reading from passed file pointer using fgetc and read till - * semicolon(sql statement terminator for global.dat file) - * - * EOF is returned if end-of-file input is seen; time to shut down. - */ - -static int -read_one_statement(StringInfo inBuf, FILE *pfile) -{ - int c; /* character read from getc() */ - int m; - - StringInfoData q; - - initStringInfo(&q); - - resetStringInfo(inBuf); - - /* - * Read characters until EOF or the appropriate delimiter is seen. - */ - while ((c = fgetc(pfile)) != EOF) - { - if (c != '\'' && c != '"' && c != '\n' && c != ';') - { - appendStringInfoChar(inBuf, (char) c); - while ((c = fgetc(pfile)) != EOF) - { - if (c != '\'' && c != '"' && c != ';' && c != '\n') - appendStringInfoChar(inBuf, (char) c); - else - break; - } - } - - if (c == '\'' || c == '"') - { - appendStringInfoChar(&q, (char) c); - m = c; - - while ((c = fgetc(pfile)) != EOF) - { - appendStringInfoChar(&q, (char) c); - - if (c == m) - { - appendStringInfoString(inBuf, q.data); - resetStringInfo(&q); - break; - } - } - } - - if (c == ';') - { - appendStringInfoChar(inBuf, (char) ';'); - break; - } - - if (c == '\n') - appendStringInfoChar(inBuf, (char) '\n'); - } - - pg_free(q.data); - - /* No input before EOF signal means time to quit. */ - if (c == EOF && inBuf->len == 0) - return EOF; - - /* return something that's not EOF */ - return 'Q'; -} - -/* - * get_dbnames_list_to_restore - * - * This will mark for skipping any entries from dbname_oid_list that pattern match an - * entry in the db_exclude_patterns list. - * - * Returns the number of database to be restored. - * - */ -static int -get_dbnames_list_to_restore(PGconn *conn, - SimplePtrList *dbname_oid_list, - SimpleStringList db_exclude_patterns) -{ - int count_db = 0; - PQExpBuffer query; - PGresult *res; - - query = createPQExpBuffer(); - - if (!conn) - pg_log_info("considering PATTERN as NAME for --exclude-database option as no database connection while doing pg_restore"); - - /* - * Process one by one all dbnames and if specified to skip restoring, then - * remove dbname from list. - */ - for (SimplePtrListCell *db_cell = dbname_oid_list->head; - db_cell; db_cell = db_cell->next) - { - DbOidName *dbidname = (DbOidName *) db_cell->ptr; - bool skip_db_restore = false; - PQExpBuffer db_lit = createPQExpBuffer(); - - appendStringLiteralConn(db_lit, dbidname->str, conn); - - for (SimpleStringListCell *pat_cell = db_exclude_patterns.head; pat_cell; pat_cell = pat_cell->next) - { - /* - * If there is an exact match then we don't need to try a pattern - * match - */ - if (pg_strcasecmp(dbidname->str, pat_cell->val) == 0) - skip_db_restore = true; - /* Otherwise, try a pattern match if there is a connection */ - else if (conn) - { - int dotcnt; - - appendPQExpBufferStr(query, "SELECT 1 "); - processSQLNamePattern(conn, query, pat_cell->val, false, - false, NULL, db_lit->data, - NULL, NULL, NULL, &dotcnt); - - if (dotcnt > 0) - { - pg_log_error("improper qualified name (too many dotted names): %s", - dbidname->str); - PQfinish(conn); - exit_nicely(1); - } - - res = executeQuery(conn, query->data); - - if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res)) - { - skip_db_restore = true; - pg_log_info("database name \"%s\" matches exclude pattern \"%s\"", dbidname->str, pat_cell->val); - } - - PQclear(res); - resetPQExpBuffer(query); - } - - if (skip_db_restore) - break; - } - - destroyPQExpBuffer(db_lit); - - /* - * Mark db to be skipped or increment the counter of dbs to be - * restored - */ - if (skip_db_restore) - { - pg_log_info("excluding database \"%s\"", dbidname->str); - dbidname->oid = InvalidOid; - } - else - { - count_db++; - } - } - - destroyPQExpBuffer(query); - - return count_db; -} - -/* - * get_dbname_oid_list_from_mfile - * - * Open map.dat file and read line by line and then prepare a list of database - * names and corresponding db_oid. - * - * Returns, total number of database names in map.dat file. - */ -static int -get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oid_list) -{ - StringInfoData linebuf; - FILE *pfile; - char map_file_path[MAXPGPATH]; - int count = 0; - - - /* - * If there is only global.dat file in dump, then return from here as - * there is no database to restore. - */ - if (!file_exists_in_directory(dumpdirpath, "map.dat")) - { - pg_log_info("database restoring is skipped because file \"%s\" does not exist in directory \"%s\"", "map.dat", dumpdirpath); - return 0; - } - - snprintf(map_file_path, MAXPGPATH, "%s/map.dat", dumpdirpath); - - /* Open map.dat file. */ - pfile = fopen(map_file_path, PG_BINARY_R); - - if (pfile == NULL) - pg_fatal("could not open file \"%s\": %m", map_file_path); - - initStringInfo(&linebuf); - - /* Append all the dbname/db_oid combinations to the list. */ - while (pg_get_line_buf(pfile, &linebuf)) - { - Oid db_oid = InvalidOid; - char *dbname; - DbOidName *dbidname; - int namelen; - char *p = linebuf.data; - - /* Extract dboid. */ - while (isdigit((unsigned char) *p)) - p++; - if (p > linebuf.data && *p == ' ') - { - sscanf(linebuf.data, "%u", &db_oid); - p++; - } - - /* dbname is the rest of the line */ - dbname = p; - namelen = strlen(dbname); - - /* Report error and exit if the file has any corrupted data. */ - if (!OidIsValid(db_oid) || namelen <= 1) - pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path, - count + 1); - - pg_log_info("found database \"%s\" (OID: %u) in file \"%s\"", - dbname, db_oid, map_file_path); - - dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1); - dbidname->oid = db_oid; - strlcpy(dbidname->str, dbname, namelen); - - simple_ptr_list_append(dbname_oid_list, dbidname); - count++; - } - - /* Close map.dat file. */ - fclose(pfile); - - return count; -} - -/* - * restore_all_databases - * - * This will restore databases those dumps are present in - * directory based on map.dat file mapping. - * - * This will skip restoring for databases that are specified with - * exclude-database option. - * - * returns, number of errors while doing restore. - */ -static int -restore_all_databases(PGconn *conn, const char *dumpdirpath, - SimpleStringList db_exclude_patterns, RestoreOptions *opts, - int numWorkers) -{ - SimplePtrList dbname_oid_list = {NULL, NULL}; - int num_db_restore = 0; - int num_total_db; - int n_errors_total; - int count = 0; - char *connected_db = NULL; - bool dumpData = opts->dumpData; - bool dumpSchema = opts->dumpSchema; - bool dumpStatistics = opts->dumpSchema; - - /* Save db name to reuse it for all the database. */ - if (opts->cparams.dbname) - connected_db = opts->cparams.dbname; - - num_total_db = get_dbname_oid_list_from_mfile(dumpdirpath, &dbname_oid_list); - - /* If map.dat has no entries, return after processing global.dat */ - if (dbname_oid_list.head == NULL) - return process_global_sql_commands(conn, dumpdirpath, opts->filename); - - pg_log_info(ngettext("found %d database name in \"%s\"", - "found %d database names in \"%s\"", - num_total_db), - num_total_db, "map.dat"); - - if (!conn) - { - pg_log_info("trying to connect to database \"%s\"", "postgres"); - - conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost, - opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, - false, progname, NULL, NULL, NULL, NULL); - - /* Try with template1. */ - if (!conn) - { - pg_log_info("trying to connect to database \"%s\"", "template1"); - - conn = ConnectDatabase("template1", NULL, opts->cparams.pghost, - opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, - false, progname, NULL, NULL, NULL, NULL); - } - } - - /* - * filter the db list according to the exclude patterns - */ - num_db_restore = get_dbnames_list_to_restore(conn, &dbname_oid_list, - db_exclude_patterns); - - /* Open global.dat file and execute/append all the global sql commands. */ - n_errors_total = process_global_sql_commands(conn, dumpdirpath, opts->filename); - - /* Close the db connection as we are done with globals and patterns. */ - if (conn) - PQfinish(conn); - - /* Exit if no db needs to be restored. */ - if (dbname_oid_list.head == NULL || num_db_restore == 0) - { - pg_log_info(ngettext("no database needs restoring out of %d database", - "no database needs restoring out of %d databases", num_total_db), - num_total_db); - return n_errors_total; - } - - pg_log_info("need to restore %d databases out of %d databases", num_db_restore, num_total_db); - - /* - * We have a list of databases to restore after processing the - * exclude-database switch(es). Now we can restore them one by one. - */ - for (SimplePtrListCell *db_cell = dbname_oid_list.head; - db_cell; db_cell = db_cell->next) - { - DbOidName *dbidname = (DbOidName *) db_cell->ptr; - char subdirpath[MAXPGPATH]; - char subdirdbpath[MAXPGPATH]; - char dbfilename[MAXPGPATH]; - int n_errors; - - /* ignore dbs marked for skipping */ - if (dbidname->oid == InvalidOid) - continue; - - /* - * We need to reset override_dbname so that objects can be restored - * into an already created database. (used with -d/--dbname option) - */ - if (opts->cparams.override_dbname) - { - pfree(opts->cparams.override_dbname); - opts->cparams.override_dbname = NULL; - } - - snprintf(subdirdbpath, MAXPGPATH, "%s/databases", dumpdirpath); - - /* - * Look for the database dump file/dir. If there is an {oid}.tar or - * {oid}.dmp file, use it. Otherwise try to use a directory called - * {oid} - */ - snprintf(dbfilename, MAXPGPATH, "%u.tar", dbidname->oid); - if (file_exists_in_directory(subdirdbpath, dbfilename)) - snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.tar", dumpdirpath, dbidname->oid); - else - { - snprintf(dbfilename, MAXPGPATH, "%u.dmp", dbidname->oid); - - if (file_exists_in_directory(subdirdbpath, dbfilename)) - snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.dmp", dumpdirpath, dbidname->oid); - else - snprintf(subdirpath, MAXPGPATH, "%s/databases/%u", dumpdirpath, dbidname->oid); - } - - pg_log_info("restoring database \"%s\"", dbidname->str); - - /* If database is already created, then don't set createDB flag. */ - if (opts->cparams.dbname) - { - PGconn *test_conn; - - test_conn = ConnectDatabase(dbidname->str, NULL, opts->cparams.pghost, - opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, - false, progname, NULL, NULL, NULL, NULL); - if (test_conn) - { - PQfinish(test_conn); - - /* Use already created database for connection. */ - opts->createDB = 0; - opts->cparams.dbname = dbidname->str; - } - else - { - /* we'll have to create it */ - opts->createDB = 1; - opts->cparams.dbname = connected_db; - } - } - - /* - * Reset flags - might have been reset in pg_backup_archiver.c by the - * previous restore. - */ - opts->dumpData = dumpData; - opts->dumpSchema = dumpSchema; - opts->dumpStatistics = dumpStatistics; - - /* Restore the single database. */ - n_errors = restore_one_database(subdirpath, opts, numWorkers, true, count); - - /* Print a summary of ignored errors during single database restore. */ - if (n_errors) - { - n_errors_total += n_errors; - pg_log_warning("errors ignored on database \"%s\" restore: %d", dbidname->str, n_errors); - } - - count++; - } - - /* Log number of processed databases. */ - pg_log_info("number of restored databases is %d", num_db_restore); - - /* Free dbname and dboid list. */ - simple_ptr_list_destroy(&dbname_oid_list); - - return n_errors_total; -} - -/* - * process_global_sql_commands - * - * Open global.dat and execute or copy the sql commands one by one. - * - * If outfile is not NULL, copy all sql commands into outfile rather than - * executing them. - * - * Returns the number of errors while processing global.dat - */ -static int -process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *outfile) -{ - char global_file_path[MAXPGPATH]; - PGresult *result; - StringInfoData sqlstatement, - user_create; - FILE *pfile; - int n_errors = 0; - - snprintf(global_file_path, MAXPGPATH, "%s/global.dat", dumpdirpath); - - /* Open global.dat file. */ - pfile = fopen(global_file_path, PG_BINARY_R); - - if (pfile == NULL) - pg_fatal("could not open file \"%s\": %m", global_file_path); - - /* - * If outfile is given, then just copy all global.dat file data into - * outfile. - */ - if (outfile) - { - copy_or_print_global_file(outfile, pfile); - return 0; - } - - /* Init sqlstatement to append commands. */ - initStringInfo(&sqlstatement); - - /* creation statement for our current role */ - initStringInfo(&user_create); - appendStringInfoString(&user_create, "CREATE ROLE "); - /* should use fmtId here, but we don't know the encoding */ - appendStringInfoString(&user_create, PQuser(conn)); - appendStringInfoChar(&user_create, ';'); - - /* Process file till EOF and execute sql statements. */ - while (read_one_statement(&sqlstatement, pfile) != EOF) - { - /* don't try to create the role we are connected as */ - if (strstr(sqlstatement.data, user_create.data)) - continue; - - pg_log_info("executing query: %s", sqlstatement.data); - result = PQexec(conn, sqlstatement.data); - - switch (PQresultStatus(result)) - { - case PGRES_COMMAND_OK: - case PGRES_TUPLES_OK: - case PGRES_EMPTY_QUERY: - break; - default: - n_errors++; - pg_log_error("could not execute query: %s", PQerrorMessage(conn)); - pg_log_error_detail("Command was: %s", sqlstatement.data); - } - PQclear(result); - } - - /* Print a summary of ignored errors during global.dat. */ - if (n_errors) - pg_log_warning(ngettext("ignored %d error in file \"%s\"", - "ignored %d errors in file \"%s\"", n_errors), - n_errors, global_file_path); - fclose(pfile); - - return n_errors; -} - -/* - * copy_or_print_global_file - * - * Copy global.dat into the output file. If "-" is used as outfile, - * then print commands to stdout. - */ -static void -copy_or_print_global_file(const char *outfile, FILE *pfile) -{ - char out_file_path[MAXPGPATH]; - FILE *OPF; - int c; - - /* "-" is used for stdout. */ - if (strcmp(outfile, "-") == 0) - OPF = stdout; - else - { - snprintf(out_file_path, MAXPGPATH, "%s", outfile); - OPF = fopen(out_file_path, PG_BINARY_W); - - if (OPF == NULL) - { - fclose(pfile); - pg_fatal("could not open file: \"%s\"", outfile); - } - } - - /* Append global.dat into output file or print to stdout. */ - while ((c = fgetc(pfile)) != EOF) - fputc(c, OPF); - - fclose(pfile); - - /* Close output file. */ - if (strcmp(outfile, "-") != 0) - fclose(OPF); -} diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl index c3c5fae11eaaf..37d893d5e6a5f 100644 --- a/src/bin/pg_dump/t/001_basic.pl +++ b/src/bin/pg_dump/t/001_basic.pl @@ -237,24 +237,6 @@ 'pg_restore: options -C\/--create and -1\/--single-transaction cannot be used together' ); -command_fails_like( - [ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ], - qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/, - 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only' -); - -command_fails_like( - [ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ], - qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/, - 'When option --exclude-database is used in pg_restore with dump of pg_dump' -); - -command_fails_like( - [ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ], - qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/, - 'When option --globals-only is not used in pg_restore with dump of pg_dump' -); - # also fails for -r and -t, but it seems pointless to add more tests for those. command_fails_like( [ 'pg_dumpall', '--exclude-database=foo', '--globals-only' ], @@ -262,8 +244,4 @@ 'pg_dumpall: option --exclude-database cannot be used together with -g/--globals-only' ); -command_fails_like( - [ 'pg_dumpall', '--format', 'x' ], - qr/\Qpg_dumpall: error: unrecognized output format "x";\E/, - 'pg_dumpall: unrecognized output format'); done_testing(); diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 6c7ec80e271ce..e7a2d64f74130 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -68,7 +68,7 @@ '--no-data', '--sequence-data', '--binary-upgrade', - '--with-statistics', + '--statistics', '--dbname' => 'postgres', # alternative way to specify database ], restore_cmd => [ @@ -76,7 +76,7 @@ '--format' => 'custom', '--verbose', '--file' => "$tempdir/binary_upgrade.sql", - '--with-statistics', + '--statistics', "$tempdir/binary_upgrade.dump", ], }, @@ -90,13 +90,13 @@ '--format' => 'custom', '--compress' => '1', '--file' => "$tempdir/compression_gzip_custom.dump", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/compression_gzip_custom.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_gzip_custom.dump", ], command_like => { @@ -119,7 +119,7 @@ '--format' => 'directory', '--compress' => 'gzip:1', '--file' => "$tempdir/compression_gzip_dir", - '--with-statistics', + '--statistics', 'postgres', ], # Give coverage for manually compressed blobs.toc files during @@ -137,7 +137,7 @@ 'pg_restore', '--jobs' => '2', '--file' => "$tempdir/compression_gzip_dir.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_gzip_dir", ], }, @@ -150,7 +150,7 @@ '--format' => 'plain', '--compress' => '1', '--file' => "$tempdir/compression_gzip_plain.sql.gz", - '--with-statistics', + '--statistics', 'postgres', ], # Decompress the generated file to run through the tests. @@ -169,13 +169,13 @@ '--format' => 'custom', '--compress' => 'lz4', '--file' => "$tempdir/compression_lz4_custom.dump", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/compression_lz4_custom.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_lz4_custom.dump", ], command_like => { @@ -198,7 +198,7 @@ '--format' => 'directory', '--compress' => 'lz4:1', '--file' => "$tempdir/compression_lz4_dir", - '--with-statistics', + '--statistics', 'postgres', ], # Verify that data files were compressed @@ -210,7 +210,7 @@ 'pg_restore', '--jobs' => '2', '--file' => "$tempdir/compression_lz4_dir.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_lz4_dir", ], }, @@ -223,7 +223,7 @@ '--format' => 'plain', '--compress' => 'lz4', '--file' => "$tempdir/compression_lz4_plain.sql.lz4", - '--with-statistics', + '--statistics', 'postgres', ], # Decompress the generated file to run through the tests. @@ -245,13 +245,13 @@ '--format' => 'custom', '--compress' => 'zstd', '--file' => "$tempdir/compression_zstd_custom.dump", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/compression_zstd_custom.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_zstd_custom.dump", ], command_like => { @@ -273,7 +273,7 @@ '--format' => 'directory', '--compress' => 'zstd:1', '--file' => "$tempdir/compression_zstd_dir", - '--with-statistics', + '--statistics', 'postgres', ], # Give coverage for manually compressed blobs.toc files during @@ -294,7 +294,7 @@ 'pg_restore', '--jobs' => '2', '--file' => "$tempdir/compression_zstd_dir.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_zstd_dir", ], }, @@ -308,7 +308,7 @@ '--format' => 'plain', '--compress' => 'zstd:long', '--file' => "$tempdir/compression_zstd_plain.sql.zst", - '--with-statistics', + '--statistics', 'postgres', ], # Decompress the generated file to run through the tests. @@ -327,7 +327,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/clean.sql", '--clean', - '--with-statistics', + '--statistics', '--dbname' => 'postgres', # alternative way to specify database ], }, @@ -338,7 +338,7 @@ '--clean', '--if-exists', '--encoding' => 'UTF8', # no-op, just for testing - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -357,7 +357,7 @@ '--create', '--no-reconnect', # no-op, just for testing '--verbose', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -376,7 +376,7 @@ dump_cmd => [ 'pg_dump', '--no-sync', '--file' => "$tempdir/defaults.sql", - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -385,7 +385,7 @@ dump_cmd => [ 'pg_dump', '--no-sync', '--file' => "$tempdir/defaults_no_public.sql", - '--with-statistics', + '--statistics', 'regress_pg_dump_test', ], }, @@ -395,7 +395,7 @@ 'pg_dump', '--no-sync', '--clean', '--file' => "$tempdir/defaults_no_public_clean.sql", - '--with-statistics', + '--statistics', 'regress_pg_dump_test', ], }, @@ -404,7 +404,7 @@ dump_cmd => [ 'pg_dump', '--no-sync', '--file' => "$tempdir/defaults_public_owner.sql", - '--with-statistics', + '--statistics', 'regress_public_owner', ], }, @@ -419,14 +419,14 @@ 'pg_dump', '--format' => 'custom', '--file' => "$tempdir/defaults_custom_format.dump", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--format' => 'custom', '--file' => "$tempdir/defaults_custom_format.sql", - '--with-statistics', + '--statistics', "$tempdir/defaults_custom_format.dump", ], command_like => { @@ -451,14 +451,14 @@ 'pg_dump', '--format' => 'directory', '--file' => "$tempdir/defaults_dir_format", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--format' => 'directory', '--file' => "$tempdir/defaults_dir_format.sql", - '--with-statistics', + '--statistics', "$tempdir/defaults_dir_format", ], command_like => { @@ -484,13 +484,13 @@ '--format' => 'directory', '--jobs' => 2, '--file' => "$tempdir/defaults_parallel", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/defaults_parallel.sql", - '--with-statistics', + '--statistics', "$tempdir/defaults_parallel", ], }, @@ -502,14 +502,14 @@ 'pg_dump', '--format' => 'tar', '--file' => "$tempdir/defaults_tar_format.tar", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--format' => 'tar', '--file' => "$tempdir/defaults_tar_format.sql", - '--with-statistics', + '--statistics', "$tempdir/defaults_tar_format.tar", ], }, @@ -518,7 +518,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/exclude_dump_test_schema.sql", '--exclude-schema' => 'dump_test', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -527,7 +527,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/exclude_test_table.sql", '--exclude-table' => 'dump_test.test_table', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -536,7 +536,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/exclude_measurement.sql", '--exclude-table-and-children' => 'dump_test.measurement', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -546,7 +546,7 @@ '--file' => "$tempdir/exclude_measurement_data.sql", '--exclude-table-data-and-children' => 'dump_test.measurement', '--no-unlogged-table-data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -556,7 +556,7 @@ '--file' => "$tempdir/exclude_test_table_data.sql", '--exclude-table-data' => 'dump_test.test_table', '--no-unlogged-table-data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -575,7 +575,7 @@ '--file' => "$tempdir/pg_dumpall_globals.sql", '--globals-only', '--no-sync', - '--with-statistics', + '--statistics', ], }, pg_dumpall_globals_clean => { @@ -585,14 +585,14 @@ '--globals-only', '--clean', '--no-sync', - '--with-statistics', + '--statistics', ], }, pg_dumpall_dbprivs => { dump_cmd => [ 'pg_dumpall', '--no-sync', '--file' => "$tempdir/pg_dumpall_dbprivs.sql", - '--with-statistics', + '--statistics', ], }, pg_dumpall_exclude => { @@ -602,7 +602,7 @@ '--file' => "$tempdir/pg_dumpall_exclude.sql", '--exclude-database' => '*dump_test*', '--no-sync', - '--with-statistics', + '--statistics', ], }, no_toast_compression => { @@ -610,7 +610,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/no_toast_compression.sql", '--no-toast-compression', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -619,7 +619,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/no_large_objects.sql", '--no-large-objects', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -628,7 +628,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/no_policies.sql", '--no-policies', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -637,7 +637,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/no_privs.sql", '--no-privileges', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -646,7 +646,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/no_owner.sql", '--no-owner', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -655,7 +655,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/no_table_access_method.sql", '--no-table-access-method', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -664,7 +664,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/only_dump_test_schema.sql", '--schema' => 'dump_test', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -675,7 +675,7 @@ '--table' => 'dump_test.test_table', '--lock-wait-timeout' => (1000 * $PostgreSQL::Test::Utils::timeout_default), - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -686,7 +686,7 @@ '--table-and-children' => 'dump_test.measurement', '--lock-wait-timeout' => (1000 * $PostgreSQL::Test::Utils::timeout_default), - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -696,7 +696,7 @@ '--file' => "$tempdir/role.sql", '--role' => 'regress_dump_test_role', '--schema' => 'dump_test_second_schema', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -709,13 +709,13 @@ '--file' => "$tempdir/role_parallel", '--role' => 'regress_dump_test_role', '--schema' => 'dump_test_second_schema', - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/role_parallel.sql", - '--with-statistics', + '--statistics', "$tempdir/role_parallel", ], }, @@ -744,7 +744,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/section_pre_data.sql", '--section' => 'pre-data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -753,7 +753,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/section_data.sql", '--section' => 'data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -762,7 +762,7 @@ 'pg_dump', '--no-sync', '--file' => "$tempdir/section_post_data.sql", '--section' => 'post-data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -773,7 +773,7 @@ '--schema' => 'dump_test', '--large-objects', '--no-large-objects', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -789,7 +789,7 @@ 'pg_dump', '--no-sync', "--file=$tempdir/no_data_no_schema.sql", '--no-data', '--no-schema', 'postgres', - '--with-statistics', + '--statistics', ], }, statistics_only => { @@ -799,18 +799,11 @@ 'postgres', ], }, - schema_only_with_statistics => { - dump_cmd => [ - 'pg_dump', '--no-sync', - "--file=$tempdir/schema_only_with_statistics.sql", - '--schema-only', '--with-statistics', 'postgres', - ], - }, no_schema => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/no_schema.sql", '--no-schema', - '--with-statistics', 'postgres', + '--statistics', 'postgres', ], },); @@ -888,6 +881,16 @@ # This is where the actual tests are defined. my %tests = ( + 'restrict' => { + all_runs => 1, + regexp => qr/^\\restrict [a-zA-Z0-9]+$/m, + }, + + 'unrestrict' => { + all_runs => 1, + regexp => qr/^\\unrestrict [a-zA-Z0-9]+$/m, + }, + 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT' => { create_order => 14, create_sql => 'ALTER DEFAULT PRIVILEGES @@ -2231,6 +2234,27 @@ }, }, + 'newline of role or table name in comment' => { + create_sql => qq{CREATE ROLE regress_newline; + ALTER ROLE regress_newline SET enable_seqscan = off; + ALTER ROLE regress_newline + RENAME TO "regress_newline\nattack"; + + -- meet getPartitioningInfo() "unsafe" condition + CREATE TYPE pp_colors AS + ENUM ('green', 'blue', 'black'); + CREATE TABLE pp_enumpart (a pp_colors) + PARTITION BY HASH (a); + CREATE TABLE pp_enumpart1 PARTITION OF pp_enumpart + FOR VALUES WITH (MODULUS 2, REMAINDER 0); + CREATE TABLE pp_enumpart2 PARTITION OF pp_enumpart + FOR VALUES WITH (MODULUS 2, REMAINDER 1); + ALTER TABLE pp_enumpart + RENAME TO "pp_enumpart\nattack";}, + regexp => qr/\n--[^\n]*\nattack/s, + like => {}, + }, + 'CREATE TABLESPACE regress_dump_tablespace' => { create_order => 2, create_sql => q( @@ -4225,7 +4249,6 @@ }, 'ALTER TABLE measurement PRIMARY KEY' => { - all_runs => 1, catch_all => 'CREATE ... commands', create_order => 93, create_sql => @@ -4277,7 +4300,6 @@ }, 'ALTER INDEX ... ATTACH PARTITION (primary key)' => { - all_runs => 1, catch_all => 'CREATE ... commands', regexp => qr/^ \QALTER INDEX dump_test.measurement_pkey ATTACH PARTITION dump_test_second_schema.measurement_y2006m2_pkey\E @@ -5207,6 +5229,17 @@ qr/\Qpg_dump: error: no matching schemas were found for pattern\E/, 'no matching schemas'); +command_fails_like( + [ + 'pg_dump', + '--port' => $port, + '--strict-names', + '--schema-only', + '--statistics', + ], + qr/\Qpg_dump: error: options -s\/--schema-only and --statistics cannot be used together\E/, + 'cannot use --schema-only and --statistics together'); + command_fails_like( [ 'pg_dump', @@ -5399,9 +5432,10 @@ # Check for proper test definitions # - # There should be a "like" list, even if it is empty. (This - # makes the test more self-documenting.) - if (!defined($tests{$test}->{like})) + # Either "all_runs" should be set or there should be a "like" list, + # even if it is empty. (This makes the test more self-documenting.) + if (!defined($tests{$test}->{all_runs}) + && !defined($tests{$test}->{like})) { die "missing \"like\" in test \"$test\""; } @@ -5437,9 +5471,10 @@ next; } - # Run the test listed as a like, unless it is specifically noted - # as an unlike (generally due to an explicit exclusion or similar). - if ($tests{$test}->{like}->{$test_key} + # Run the test if all_runs is set or if listed as a like, unless it is + # specifically noted as an unlike (generally due to an explicit + # exclusion or similar). + if (($tests{$test}->{like}->{$test_key} || $tests{$test}->{all_runs}) && !defined($tests{$test}->{unlike}->{$test_key})) { if (!ok($output_file =~ $tests{$test}->{regexp}, diff --git a/src/bin/pg_dump/t/003_pg_dump_with_server.pl b/src/bin/pg_dump/t/003_pg_dump_with_server.pl index 8dc014ed6ed34..c83d3899dfbf7 100644 --- a/src/bin/pg_dump/t/003_pg_dump_with_server.pl +++ b/src/bin/pg_dump/t/003_pg_dump_with_server.pl @@ -16,6 +16,22 @@ $node->init; $node->start; +######################################### +# pg_dumpall: newline in database name + +$node->safe_psql('postgres', qq{CREATE DATABASE "regress_\nattack"}); + +my (@cmd, $stdout, $stderr); +@cmd = ("pg_dumpall", '--port' => $port, '--exclude-database=postgres'); +print("# Running: " . join(" ", @cmd) . "\n"); +my $result = IPC::Run::run \@cmd, '>' => \$stdout, '2>' => \$stderr; +ok(!$result, "newline in dbname: exit code not 0"); +like( + $stderr, + qr/shell command argument contains a newline/, + "newline in dbname: stderr matches"); +unlike($stdout, qr/^attack/m, "newline in dbname: no comment escape"); + ######################################### # Verify that dumping foreign data includes only foreign tables of # matching servers diff --git a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl index f05e8a20e0559..5c69ec31c393f 100644 --- a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl +++ b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl @@ -418,10 +418,16 @@ qr/invalid filter command/, "invalid syntax: incorrect filter command"); -# Test invalid object type +# Test invalid object type. +# +# This test also verifies that keywords are correctly recognized as strings of +# non-whitespace characters. If the parser incorrectly treats non-whitespace +# delimiters (like hyphens) as keyword boundaries, "table-data" might be +# misread as the valid object type "table". To catch such issues, +# "table-data" is used here as an intentionally invalid object type. open $inputfile, '>', "$tempdir/inputfile.txt" or die "unable to open filterfile for writing"; -print $inputfile "include xxx"; +print $inputfile "exclude table-data one"; close $inputfile; command_fails_like( @@ -432,8 +438,8 @@ '--filter' => "$tempdir/inputfile.txt", 'postgres' ], - qr/unsupported filter object type: "xxx"/, - "invalid syntax: invalid object type specified, should be table, schema, foreign_data or data" + qr/unsupported filter object type: "table-data"/, + "invalid syntax: invalid object type specified" ); # Test missing object identifier pattern diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl deleted file mode 100644 index c274b777586ad..0000000000000 --- a/src/bin/pg_dump/t/006_pg_dumpall.pl +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright (c) 2021-2025, PostgreSQL Global Development Group - -use strict; -use warnings FATAL => 'all'; - -use PostgreSQL::Test::Cluster; -use PostgreSQL::Test::Utils; -use Test::More; - -my $tempdir = PostgreSQL::Test::Utils::tempdir; -my $run_db = 'postgres'; -my $sep = $windows_os ? "\\" : "/"; - -# Tablespace locations used by "restore_tablespace" test case. -my $tablespace1 = "${tempdir}${sep}tbl1"; -my $tablespace2 = "${tempdir}${sep}tbl2"; -mkdir($tablespace1) || die "mkdir $tablespace1 $!"; -mkdir($tablespace2) || die "mkdir $tablespace2 $!"; - -# Scape tablespace locations on Windows. -$tablespace1 = $windows_os ? ($tablespace1 =~ s/\\/\\\\/gr) : $tablespace1; -$tablespace2 = $windows_os ? ($tablespace2 =~ s/\\/\\\\/gr) : $tablespace2; - -# Where pg_dumpall will be executed. -my $node = PostgreSQL::Test::Cluster->new('node'); -$node->init; -$node->start; - - -############################################################### -# Definition of the pg_dumpall test cases to run. -# -# Each of these test cases are named and those names are used for fail -# reporting and also to save the dump and restore information needed for the -# test to assert. -# -# The "setup_sql" is a psql valid script that contains SQL commands to execute -# before of actually execute the tests. The setups are all executed before of -# any test execution. -# -# The "dump_cmd" and "restore_cmd" are the commands that will be executed. The -# "restore_cmd" must have the --file flag to save the restore output so that we -# can assert on it. -# -# The "like" and "unlike" is a regexp that is used to match the pg_restore -# output. It must have at least one of then filled per test cases but it also -# can have both. See "excluding_databases" test case for example. -my %pgdumpall_runs = ( - restore_roles => { - setup_sql => ' - CREATE ROLE dumpall WITH ENCRYPTED PASSWORD \'admin\' SUPERUSER; - CREATE ROLE dumpall2 WITH REPLICATION CONNECTION LIMIT 10;', - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/restore_roles", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/restore_roles.sql", - "$tempdir/restore_roles", - ], - like => qr/ - ^\s*\QCREATE ROLE dumpall;\E\s*\n - \s*\QALTER ROLE dumpall WITH SUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN NOREPLICATION NOBYPASSRLS PASSWORD 'SCRAM-SHA-256\E - [^']+';\s*\n - \s*\QCREATE ROLE dumpall2;\E - \s*\QALTER ROLE dumpall2 WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN REPLICATION NOBYPASSRLS CONNECTION LIMIT 10;\E - /xm - }, - - restore_tablespace => { - setup_sql => " - CREATE ROLE tap; - CREATE TABLESPACE tbl1 OWNER tap LOCATION '$tablespace1'; - CREATE TABLESPACE tbl2 OWNER tap LOCATION '$tablespace2' WITH (seq_page_cost=1.0);", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/restore_tablespace", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/restore_tablespace.sql", - "$tempdir/restore_tablespace", - ], - # Match "E" as optional since it is added on LOCATION when running on - # Windows. - like => qr/^ - \n\QCREATE TABLESPACE tbl1 OWNER tap LOCATION \E(?:E)?\Q'$tablespace1';\E - \n\QCREATE TABLESPACE tbl2 OWNER tap LOCATION \E(?:E)?\Q'$tablespace2';\E - \n\QALTER TABLESPACE tbl2 SET (seq_page_cost=1.0);\E - /xm, - }, - - restore_grants => { - setup_sql => " - CREATE DATABASE tapgrantsdb; - CREATE SCHEMA private; - CREATE SEQUENCE serial START 101; - CREATE FUNCTION fn() RETURNS void AS \$\$ - BEGIN - END; - \$\$ LANGUAGE plpgsql; - CREATE ROLE super; - CREATE ROLE grant1; - CREATE ROLE grant2; - CREATE ROLE grant3; - CREATE ROLE grant4; - CREATE ROLE grant5; - CREATE ROLE grant6; - CREATE ROLE grant7; - CREATE ROLE grant8; - - CREATE TABLE t (id int); - INSERT INTO t VALUES (1), (2), (3), (4); - - GRANT SELECT ON TABLE t TO grant1; - GRANT INSERT ON TABLE t TO grant2; - GRANT ALL PRIVILEGES ON TABLE t to grant3; - GRANT CONNECT, CREATE ON DATABASE tapgrantsdb TO grant4; - GRANT USAGE, CREATE ON SCHEMA private TO grant5; - GRANT USAGE, SELECT, UPDATE ON SEQUENCE serial TO grant6; - GRANT super TO grant7; - GRANT EXECUTE ON FUNCTION fn() TO grant8; - ", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/restore_grants", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/restore_grants.sql", - "$tempdir/restore_grants", - ], - like => qr/^ - \n\QGRANT super TO grant7 WITH INHERIT TRUE GRANTED BY\E - (.*\n)* - \n\QGRANT ALL ON SCHEMA private TO grant5;\E - (.*\n)* - \n\QGRANT ALL ON FUNCTION public.fn() TO grant8;\E - (.*\n)* - \n\QGRANT ALL ON SEQUENCE public.serial TO grant6;\E - (.*\n)* - \n\QGRANT SELECT ON TABLE public.t TO grant1;\E - \n\QGRANT INSERT ON TABLE public.t TO grant2;\E - \n\QGRANT ALL ON TABLE public.t TO grant3;\E - (.*\n)* - \n\QGRANT CREATE,CONNECT ON DATABASE tapgrantsdb TO grant4;\E - /xm, - }, - - excluding_databases => { - setup_sql => 'CREATE DATABASE db1; - \c db1 - CREATE TABLE t1 (id int); - INSERT INTO t1 VALUES (1), (2), (3), (4); - CREATE TABLE t2 (id int); - INSERT INTO t2 VALUES (1), (2), (3), (4); - - CREATE DATABASE db2; - \c db2 - CREATE TABLE t3 (id int); - INSERT INTO t3 VALUES (1), (2), (3), (4); - CREATE TABLE t4 (id int); - INSERT INTO t4 VALUES (1), (2), (3), (4); - - CREATE DATABASE dbex3; - \c dbex3 - CREATE TABLE t5 (id int); - INSERT INTO t5 VALUES (1), (2), (3), (4); - CREATE TABLE t6 (id int); - INSERT INTO t6 VALUES (1), (2), (3), (4); - - CREATE DATABASE dbex4; - \c dbex4 - CREATE TABLE t7 (id int); - INSERT INTO t7 VALUES (1), (2), (3), (4); - CREATE TABLE t8 (id int); - INSERT INTO t8 VALUES (1), (2), (3), (4); - - CREATE DATABASE db5; - \c db5 - CREATE TABLE t9 (id int); - INSERT INTO t9 VALUES (1), (2), (3), (4); - CREATE TABLE t10 (id int); - INSERT INTO t10 VALUES (1), (2), (3), (4); - ', - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/excluding_databases", - '--exclude-database' => 'dbex*', - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/excluding_databases.sql", - '--exclude-database' => 'db5', - "$tempdir/excluding_databases", - ], - like => qr/^ - \n\QCREATE DATABASE db1\E - (.*\n)* - \n\QCREATE TABLE public.t1 (\E - (.*\n)* - \n\QCREATE TABLE public.t2 (\E - (.*\n)* - \n\QCREATE DATABASE db2\E - (.*\n)* - \n\QCREATE TABLE public.t3 (\E - (.*\n)* - \n\QCREATE TABLE public.t4 (/xm, - unlike => qr/^ - \n\QCREATE DATABASE db3\E - (.*\n)* - \n\QCREATE TABLE public.t5 (\E - (.*\n)* - \n\QCREATE TABLE public.t6 (\E - (.*\n)* - \n\QCREATE DATABASE db4\E - (.*\n)* - \n\QCREATE TABLE public.t7 (\E - (.*\n)* - \n\QCREATE TABLE public.t8 (\E - \n\QCREATE DATABASE db5\E - (.*\n)* - \n\QCREATE TABLE public.t9 (\E - (.*\n)* - \n\QCREATE TABLE public.t10 (\E - /xm, - }, - - format_directory => { - setup_sql => "CREATE TABLE format_directory(a int, b boolean, c text); - INSERT INTO format_directory VALUES (1, true, 'name1'), (2, false, 'name2');", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/format_directory", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/format_directory.sql", - "$tempdir/format_directory", - ], - like => qr/^\n\QCOPY public.format_directory (a, b, c) FROM stdin;/xm - }, - - format_tar => { - setup_sql => "CREATE TABLE format_tar(a int, b boolean, c text); - INSERT INTO format_tar VALUES (1, false, 'name3'), (2, true, 'name4');", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'tar', - '--file' => "$tempdir/format_tar", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'tar', - '--file' => "$tempdir/format_tar.sql", - "$tempdir/format_tar", - ], - like => qr/^\n\QCOPY public.format_tar (a, b, c) FROM stdin;/xm - }, - - format_custom => { - setup_sql => "CREATE TABLE format_custom(a int, b boolean, c text); - INSERT INTO format_custom VALUES (1, false, 'name5'), (2, true, 'name6');", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'custom', - '--file' => "$tempdir/format_custom", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'custom', - '--file' => "$tempdir/format_custom.sql", - "$tempdir/format_custom", - ], - like => qr/^ \n\QCOPY public.format_custom (a, b, c) FROM stdin;/xm - }, - - dump_globals_only => { - setup_sql => "CREATE TABLE format_dir(a int, b boolean, c text); - INSERT INTO format_dir VALUES (1, false, 'name5'), (2, true, 'name6');", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--globals-only', - '--file' => "$tempdir/dump_globals_only", - ], - restore_cmd => [ - 'pg_restore', '-C', '--globals-only', - '--format' => 'directory', - '--file' => "$tempdir/dump_globals_only.sql", - "$tempdir/dump_globals_only", - ], - like => qr/ - ^\s*\QCREATE ROLE dumpall;\E\s*\n - /xm - },); - -# First execute the setup_sql -foreach my $run (sort keys %pgdumpall_runs) -{ - if ($pgdumpall_runs{$run}->{setup_sql}) - { - $node->safe_psql($run_db, $pgdumpall_runs{$run}->{setup_sql}); - } -} - -# Execute the tests -foreach my $run (sort keys %pgdumpall_runs) -{ - # Create a new target cluster to pg_restore each test case run so that we - # don't need to take care of the cleanup from the target cluster after each - # run. - my $target_node = PostgreSQL::Test::Cluster->new("target_$run"); - $target_node->init; - $target_node->start; - - # Dumpall from node cluster. - $node->command_ok(\@{ $pgdumpall_runs{$run}->{dump_cmd} }, - "$run: pg_dumpall runs"); - - # Restore the dump on "target_node" cluster. - my @restore_cmd = ( - @{ $pgdumpall_runs{$run}->{restore_cmd} }, - '--host', $target_node->host, '--port', $target_node->port); - - my ($stdout, $stderr) = run_command(\@restore_cmd); - - # pg_restore --file output file. - my $output_file = slurp_file("$tempdir/${run}.sql"); - - if ( !($pgdumpall_runs{$run}->{like}) - && !($pgdumpall_runs{$run}->{unlike})) - { - die "missing \"like\" or \"unlike\" in test \"$run\""; - } - - if ($pgdumpall_runs{$run}->{like}) - { - like($output_file, $pgdumpall_runs{$run}->{like}, "should dump $run"); - } - - if ($pgdumpall_runs{$run}->{unlike}) - { - unlike( - $output_file, - $pgdumpall_runs{$run}->{unlike}, - "should not dump $run"); - } -} - -# Some negative test case with dump of pg_dumpall and restore using pg_restore -# test case 1: when -C is not used in pg_restore with dump of pg_dumpall -$node->command_fails_like( - [ - 'pg_restore', - "$tempdir/format_custom", - '--format' => 'custom', - '--file' => "$tempdir/error_test.sql", - ], - qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/, - 'When -C is not used in pg_restore with dump of pg_dumpall'); - -# test case 2: When --list option is used with dump of pg_dumpall -$node->command_fails_like( - [ - 'pg_restore', - "$tempdir/format_custom", '-C', - '--format' => 'custom', - '--list', - '--file' => "$tempdir/error_test.sql", - ], - qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/, - 'When --list is used in pg_restore with dump of pg_dumpall'); - -# test case 3: When non-exist database is given with -d option -$node->command_fails_like( - [ - 'pg_restore', - "$tempdir/format_custom", '-C', - '--format' => 'custom', - '-d' => 'dbpq', - ], - qr/\Qpg_restore: error: could not connect to database "dbpq"\E/, - 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall' -); - -$node->stop('fast'); - -done_testing(); diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c index e876f35f38ed4..7a4e4eb95706e 100644 --- a/src/bin/pg_resetwal/pg_resetwal.c +++ b/src/bin/pg_resetwal/pg_resetwal.c @@ -719,7 +719,7 @@ GuessControlValues(void) ControlFile.indexMaxKeys = INDEX_MAX_KEYS; ControlFile.toast_max_chunk_size = TOAST_MAX_CHUNK_SIZE; ControlFile.loblksize = LOBLKSIZE; - ControlFile.float8ByVal = FLOAT8PASSBYVAL; + ControlFile.float8ByVal = true; /* vestigial */ /* * XXX eventually, should try to grovel through old XLOG to develop more diff --git a/src/bin/pg_upgrade/Makefile b/src/bin/pg_upgrade/Makefile index f83d2b5d30955..69fcf593caec9 100644 --- a/src/bin/pg_upgrade/Makefile +++ b/src/bin/pg_upgrade/Makefile @@ -3,8 +3,7 @@ PGFILEDESC = "pg_upgrade - an in-place binary upgrade utility" PGAPPICON = win32 -# required for 003_upgrade_logical_replication_slots.pl -EXTRA_INSTALL=contrib/test_decoding +EXTRA_INSTALL=contrib/test_decoding src/test/modules/dummy_seclabel subdir = src/bin/pg_upgrade top_builddir = ../../.. diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 5e6403f07731b..1e17d64b3ec63 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -956,12 +956,12 @@ check_for_new_tablespace_dir(void) prep_status("Checking for new cluster tablespace directories"); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++) { struct stat statbuf; snprintf(new_tablespace_dir, MAXPGPATH, "%s%s", - os_info.old_tablespaces[tblnum], + new_cluster.tablespaces[tblnum], new_cluster.tablespace_suffix); if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT) @@ -1013,17 +1013,17 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) * directory. We can't create a proper old cluster delete script in that * case. */ - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++) { - char old_tablespace_dir[MAXPGPATH]; + char new_tablespace_dir[MAXPGPATH]; - strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH); - canonicalize_path(old_tablespace_dir); - if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir)) + strlcpy(new_tablespace_dir, new_cluster.tablespaces[tblnum], MAXPGPATH); + canonicalize_path(new_tablespace_dir); + if (path_is_prefix_of_path(old_cluster_pgdata, new_tablespace_dir)) { /* reproduce warning from CREATE TABLESPACE that is in the log */ pg_log(PG_WARNING, - "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", old_tablespace_dir); + "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", new_tablespace_dir); /* Unlink file in case it is left over from a previous run. */ unlink(*deletion_script_file_name); @@ -1051,9 +1051,9 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) /* delete old cluster's alternate tablespaces */ old_tblspc_suffix = pg_strdup(old_cluster.tablespace_suffix); fix_path_separator(old_tblspc_suffix); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++) fprintf(script, RMDIR_CMD " %c%s%s%c\n", PATH_QUOTE, - fix_path_separator(os_info.old_tablespaces[tblnum]), + fix_path_separator(old_cluster.tablespaces[tblnum]), old_tblspc_suffix, PATH_QUOTE); pfree(old_tblspc_suffix); @@ -1713,7 +1713,7 @@ check_for_not_null_inheritance(ClusterInfo *cluster) "If the parent column(s) are NOT NULL, then the child column must\n" "also be marked NOT NULL, or the upgrade will fail.\n" "You can fix this by running\n" - " ALTER TABLE tablename ALTER column SET NOT NULL;\n" + " ALTER TABLE tablename ALTER column SET NOT NULL;\n" "on each column listed in the file:\n" " %s", report.path); } @@ -1971,14 +1971,19 @@ check_for_unicode_update(ClusterInfo *cluster) " SELECT oper.oid, oper.oprcode, collid FROM pg_operator oper, collations " " WHERE oprname IN ('~', '~*', '!~', '!~*', '~~*', '!~~*') AND " " oprnamespace='pg_catalog'::regnamespace AND " - " oprright='text'::regtype " + " oprright='pg_catalog.text'::pg_catalog.regtype " "), " /* functions that use the input collation for character semantics */ "coll_functions(procid, collid) AS ( " " SELECT proc.oid, collid FROM pg_proc proc, collations " - " WHERE proname IN ('lower','initcap','upper') AND " - " pronamespace='pg_catalog'::regnamespace AND " - " proargtypes[0] = 'text'::regtype " + " WHERE pronamespace='pg_catalog'::regnamespace AND " + " ((proname IN ('lower','initcap','upper','casefold') AND " + " pronargs = 1 AND " + " proargtypes[0] = 'pg_catalog.text'::pg_catalog.regtype) OR " + " (proname = 'substring' AND pronargs = 2 AND " + " proargtypes[0] = 'pg_catalog.text'::pg_catalog.regtype AND " + " proargtypes[1] = 'pg_catalog.text'::pg_catalog.regtype) OR " + " proname LIKE 'regexp_%') " /* include functions behind the operators listed above */ " UNION " " SELECT procid, collid FROM coll_operators " diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c index 183f08ce1e86f..55f6e7b4d9c3e 100644 --- a/src/bin/pg_upgrade/dump.c +++ b/src/bin/pg_upgrade/dump.c @@ -58,7 +58,7 @@ generate_old_dump(void) (user_opts.transfer_mode == TRANSFER_MODE_SWAP) ? "" : "--sequence-data", log_opts.verbose ? "--verbose" : "", - user_opts.do_statistics ? "--with-statistics" : "--no-statistics", + user_opts.do_statistics ? "--statistics" : "--no-statistics", log_opts.dumpdir, sql_file_name, escaped_connstr.data); diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c index a437067cdca82..7ce0827016803 100644 --- a/src/bin/pg_upgrade/info.c +++ b/src/bin/pg_upgrade/info.c @@ -443,10 +443,26 @@ get_db_infos(ClusterInfo *cluster) for (tupnum = 0; tupnum < ntups; tupnum++) { + char *spcloc = PQgetvalue(res, tupnum, i_spclocation); + bool inplace = spcloc[0] && !is_absolute_path(spcloc); + dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid)); dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname)); - snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s", - PQgetvalue(res, tupnum, i_spclocation)); + + /* + * The tablespace location might be "", meaning the cluster default + * location, i.e. pg_default or pg_global. For in-place tablespaces, + * pg_tablespace_location() returns a path relative to the data + * directory. + */ + if (inplace) + snprintf(dbinfos[tupnum].db_tablespace, + sizeof(dbinfos[tupnum].db_tablespace), + "%s/%s", cluster->pgdata, spcloc); + else + snprintf(dbinfos[tupnum].db_tablespace, + sizeof(dbinfos[tupnum].db_tablespace), + "%s", spcloc); } PQclear(res); @@ -482,7 +498,10 @@ get_rel_infos_query(void) * * pg_largeobject contains user data that does not appear in pg_dump * output, so we have to copy that system table. It's easiest to do that - * by treating it as a user table. + * by treating it as a user table. We can do the same for + * pg_largeobject_metadata for upgrades from v16 and newer. pg_upgrade + * can't copy/link the files from older versions because aclitem (needed + * by pg_largeobject_metadata.lomacl) changed its storage format in v16. */ appendPQExpBuffer(&query, "WITH regular_heap (reloid, indtable, toastheap) AS ( " @@ -498,10 +517,12 @@ get_rel_infos_query(void) " 'binary_upgrade', 'pg_toast') AND " " c.oid >= %u::pg_catalog.oid) OR " " (n.nspname = 'pg_catalog' AND " - " relname IN ('pg_largeobject') ))), ", + " relname IN ('pg_largeobject'%s) ))), ", (user_opts.transfer_mode == TRANSFER_MODE_SWAP) ? ", " CppAsString2(RELKIND_SEQUENCE) : "", - FirstNormalObjectId); + FirstNormalObjectId, + (GET_MAJOR_VERSION(old_cluster.major_version) >= 1600) ? + ", 'pg_largeobject_metadata'" : ""); /* * Add a CTE that collects OIDs of toast tables belonging to the tables @@ -616,11 +637,21 @@ process_rel_infos(DbInfo *dbinfo, PGresult *res, void *arg) /* Is the tablespace oid non-default? */ if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0) { + char *spcloc = PQgetvalue(res, relnum, i_spclocation); + bool inplace = spcloc[0] && !is_absolute_path(spcloc); + /* * The tablespace location might be "", meaning the cluster - * default location, i.e. pg_default or pg_global. + * default location, i.e. pg_default or pg_global. For in-place + * tablespaces, pg_tablespace_location() returns a path relative + * to the data directory. */ - tablespace = PQgetvalue(res, relnum, i_spclocation); + if (inplace) + tablespace = psprintf("%s/%s", + os_info.running_cluster->pgdata, + spcloc); + else + tablespace = spcloc; /* Can we reuse the previous string allocation? */ if (last_tablespace && strcmp(tablespace, last_tablespace) == 0) @@ -630,6 +661,10 @@ process_rel_infos(DbInfo *dbinfo, PGresult *res, void *arg) last_tablespace = curr->tablespace = pg_strdup(tablespace); curr->tblsp_alloc = true; } + + /* Free palloc'd string for in-place tablespaces. */ + if (inplace) + pfree(tablespace); } else /* A zero reltablespace oid indicates the database tablespace. */ diff --git a/src/bin/pg_upgrade/parallel.c b/src/bin/pg_upgrade/parallel.c index 056aa2edaee3f..6d7941844a7c8 100644 --- a/src/bin/pg_upgrade/parallel.c +++ b/src/bin/pg_upgrade/parallel.c @@ -40,6 +40,7 @@ typedef struct char *old_pgdata; char *new_pgdata; char *old_tablespace; + char *new_tablespace; } transfer_thread_arg; static exec_thread_arg **exec_thread_args; @@ -171,7 +172,7 @@ win32_exec_prog(exec_thread_arg *args) void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, - char *old_tablespace) + char *old_tablespace, char *new_tablespace) { #ifndef WIN32 pid_t child; @@ -181,7 +182,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, #endif if (user_opts.jobs <= 1) - transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL); + transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL, NULL); else { /* parallel */ @@ -225,7 +226,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, if (child == 0) { transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, - old_tablespace); + old_tablespace, new_tablespace); /* if we take another exit path, it will be non-zero */ /* use _exit to skip atexit() functions */ _exit(0); @@ -246,6 +247,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, new_arg->new_pgdata = pg_strdup(new_pgdata); pg_free(new_arg->old_tablespace); new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL; + new_arg->new_tablespace = new_tablespace ? pg_strdup(new_tablespace) : NULL; child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs, new_arg, 0, NULL); @@ -263,7 +265,8 @@ DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args) { transfer_all_new_dbs(args->old_db_arr, args->new_db_arr, args->old_pgdata, - args->new_pgdata, args->old_tablespace); + args->new_pgdata, args->old_tablespace, + args->new_tablespace); /* terminates thread */ return 0; diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c index d5cd5bf0b3a6b..490e98fa26f2a 100644 --- a/src/bin/pg_upgrade/pg_upgrade.c +++ b/src/bin/pg_upgrade/pg_upgrade.c @@ -29,9 +29,9 @@ * We control all assignments of pg_enum.oid because these oids are stored * in user tables as enum values. * - * We control all assignments of pg_authid.oid for historical reasons (the - * oids used to be stored in pg_largeobject_metadata, which is now copied via - * SQL commands), that might change at some point in the future. + * We control all assignments of pg_authid.oid because the oids are stored in + * pg_largeobject_metadata, which is copied via file transfer for upgrades + * from v16 and newer. * * We control all assignments of pg_database.oid because we want the directory * names to match between the old and new cluster. diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h index e9401430e697f..0ef47be0dc199 100644 --- a/src/bin/pg_upgrade/pg_upgrade.h +++ b/src/bin/pg_upgrade/pg_upgrade.h @@ -300,6 +300,8 @@ typedef struct uint32 major_version; /* PG_VERSION of cluster */ char major_version_str[64]; /* string PG_VERSION of cluster */ uint32 bin_version; /* version returned from pg_ctl */ + char **tablespaces; /* tablespace directories */ + int num_tablespaces; const char *tablespace_suffix; /* directory specification */ int nsubs; /* number of subscriptions */ bool sub_retain_dead_tuples; /* whether a subscription enables @@ -356,8 +358,6 @@ typedef struct const char *progname; /* complete pathname for this program */ char *user; /* username for clusters */ bool user_specified; /* user specified on command-line */ - char **old_tablespaces; /* tablespaces */ - int num_old_tablespaces; LibraryInfo *libraries; /* loadable libraries */ int num_libraries; ClusterInfo *running_cluster; @@ -457,7 +457,7 @@ void transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata); void transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, - char *old_tablespace); + char *old_tablespace, char *new_tablespace); /* tablespace.c */ @@ -505,7 +505,7 @@ void parallel_exec_prog(const char *log_file, const char *opt_log_file, const char *fmt,...) pg_attribute_printf(3, 4); void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, - char *old_tablespace); + char *old_tablespace, char *new_tablespace); bool reap_child(bool wait_for_child); /* task.c */ diff --git a/src/bin/pg_upgrade/relfilenumber.c b/src/bin/pg_upgrade/relfilenumber.c index 8d8e816a01fa4..38c17ceabf222 100644 --- a/src/bin/pg_upgrade/relfilenumber.c +++ b/src/bin/pg_upgrade/relfilenumber.c @@ -17,7 +17,7 @@ #include "common/logging.h" #include "pg_upgrade.h" -static void transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace); +static void transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace, char *new_tablespace); static void transfer_relfile(FileNameMap *map, const char *type_suffix, bool vm_must_add_frozenbit); /* @@ -136,21 +136,22 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, */ if (user_opts.jobs <= 1) parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, - new_pgdata, NULL); + new_pgdata, NULL, NULL); else { int tblnum; /* transfer default tablespace */ parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, - new_pgdata, old_pgdata); + new_pgdata, old_pgdata, new_pgdata); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++) parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, - os_info.old_tablespaces[tblnum]); + old_cluster.tablespaces[tblnum], + new_cluster.tablespaces[tblnum]); /* reap all children */ while (reap_child(true) == true) ; @@ -169,7 +170,8 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, */ void transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata, char *old_tablespace) + char *old_pgdata, char *new_pgdata, + char *old_tablespace, char *new_tablespace) { int old_dbnum, new_dbnum; @@ -204,7 +206,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, new_pgdata); if (n_maps) { - transfer_single_new_db(mappings, n_maps, old_tablespace); + transfer_single_new_db(mappings, n_maps, old_tablespace, new_tablespace); } /* We allocate something even for n_maps == 0 */ pg_free(mappings); @@ -234,10 +236,10 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, * moved_db_dir: Destination for the pg_restore-generated database directory. */ static bool -prepare_for_swap(const char *old_tablespace, Oid db_oid, - char *old_catalog_dir, char *new_db_dir, char *moved_db_dir) +prepare_for_swap(const char *old_tablespace, const char *new_tablespace, + Oid db_oid, char *old_catalog_dir, char *new_db_dir, + char *moved_db_dir) { - const char *new_tablespace; const char *old_tblspc_suffix; const char *new_tblspc_suffix; char old_tblspc[MAXPGPATH]; @@ -247,24 +249,14 @@ prepare_for_swap(const char *old_tablespace, Oid db_oid, struct stat st; if (strcmp(old_tablespace, old_cluster.pgdata) == 0) - { - new_tablespace = new_cluster.pgdata; - new_tblspc_suffix = "/base"; old_tblspc_suffix = "/base"; - } else - { - /* - * XXX: The below line is a hack to deal with the fact that we - * presently don't have an easy way to find the corresponding new - * tablespace's path. This will need to be fixed if/when we add - * pg_upgrade support for in-place tablespaces. - */ - new_tablespace = old_tablespace; + old_tblspc_suffix = old_cluster.tablespace_suffix; + if (strcmp(new_tablespace, new_cluster.pgdata) == 0) + new_tblspc_suffix = "/base"; + else new_tblspc_suffix = new_cluster.tablespace_suffix; - old_tblspc_suffix = old_cluster.tablespace_suffix; - } /* Old and new cluster paths. */ snprintf(old_tblspc, sizeof(old_tblspc), "%s%s", old_tablespace, old_tblspc_suffix); @@ -450,7 +442,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir, * during pg_restore. */ static void -do_swap(FileNameMap *maps, int size, char *old_tablespace) +do_swap(FileNameMap *maps, int size, char *old_tablespace, char *new_tablespace) { char old_catalog_dir[MAXPGPATH]; char new_db_dir[MAXPGPATH]; @@ -470,21 +462,23 @@ do_swap(FileNameMap *maps, int size, char *old_tablespace) */ if (old_tablespace) { - if (prepare_for_swap(old_tablespace, maps[0].db_oid, + if (prepare_for_swap(old_tablespace, new_tablespace, maps[0].db_oid, old_catalog_dir, new_db_dir, moved_db_dir)) swap_catalog_files(maps, size, old_catalog_dir, new_db_dir, moved_db_dir); } else { - if (prepare_for_swap(old_cluster.pgdata, maps[0].db_oid, + if (prepare_for_swap(old_cluster.pgdata, new_cluster.pgdata, maps[0].db_oid, old_catalog_dir, new_db_dir, moved_db_dir)) swap_catalog_files(maps, size, old_catalog_dir, new_db_dir, moved_db_dir); - for (int tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (int tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++) { - if (prepare_for_swap(os_info.old_tablespaces[tblnum], maps[0].db_oid, + if (prepare_for_swap(old_cluster.tablespaces[tblnum], + new_cluster.tablespaces[tblnum], + maps[0].db_oid, old_catalog_dir, new_db_dir, moved_db_dir)) swap_catalog_files(maps, size, old_catalog_dir, new_db_dir, moved_db_dir); @@ -498,7 +492,8 @@ do_swap(FileNameMap *maps, int size, char *old_tablespace) * create links for mappings stored in "maps" array. */ static void -transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace) +transfer_single_new_db(FileNameMap *maps, int size, + char *old_tablespace, char *new_tablespace) { int mapnum; bool vm_must_add_frozenbit = false; @@ -520,7 +515,7 @@ transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace) */ Assert(!vm_must_add_frozenbit); - do_swap(maps, size, old_tablespace); + do_swap(maps, size, old_tablespace, new_tablespace); return; } diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl index 7d82593879d57..823f41e754ced 100644 --- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl +++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl @@ -86,6 +86,7 @@ sub get_dump_for_comparison $node->run_log( [ 'pg_dump', '--no-sync', + '--restrict-key' => 'test', '-d' => $node->connstr($db), '-f' => $dumpfile ]); @@ -375,6 +376,9 @@ sub get_dump_for_comparison { my $dstnode = PostgreSQL::Test::Cluster->new('dst_node'); + skip "regress_dump_restore not enabled in PG_TEST_EXTRA" + if (!$ENV{PG_TEST_EXTRA} + || $ENV{PG_TEST_EXTRA} !~ /\bregress_dump_restore\b/); skip "different Postgres versions" if ($oldnode->pg_version != $dstnode->pg_version); skip "source node not using default install" @@ -424,6 +428,7 @@ sub get_dump_for_comparison # that we need to use pg_dumpall from the new node here. my @dump_command = ( 'pg_dumpall', '--no-sync', + '--restrict-key' => 'test', '--dbname' => $oldnode->connstr('postgres'), '--file' => $dump1_file); # --extra-float-digits is needed when upgrading from a version older than 11. @@ -621,6 +626,7 @@ sub get_dump_for_comparison # Second dump from the upgraded instance. @dump_command = ( 'pg_dumpall', '--no-sync', + '--restrict-key' => 'test', '--dbname' => $newnode->connstr('postgres'), '--file' => $dump2_file); # --extra-float-digits is needed when upgrading from a version older than 11. diff --git a/src/bin/pg_upgrade/t/006_transfer_modes.pl b/src/bin/pg_upgrade/t/006_transfer_modes.pl index 58fe8a8c7dcea..2f68f0b56aa61 100644 --- a/src/bin/pg_upgrade/t/006_transfer_modes.pl +++ b/src/bin/pg_upgrade/t/006_transfer_modes.pl @@ -38,6 +38,29 @@ sub test_mode } $new->init(); + # allow_in_place_tablespaces is available as far back as v10. + if ($old->pg_version >= 10) + { + $new->append_conf('postgresql.conf', "allow_in_place_tablespaces = true"); + $old->append_conf('postgresql.conf', "allow_in_place_tablespaces = true"); + } + + # We can only test security labels if both the old and new installations + # have dummy_seclabel. + my $test_seclabel = 1; + $old->start; + if (!$old->check_extension('dummy_seclabel')) + { + $test_seclabel = 0; + } + $old->stop; + $new->start; + if (!$new->check_extension('dummy_seclabel')) + { + $test_seclabel = 0; + } + $new->stop; + # Create a small variety of simple test objects on the old cluster. We'll # check that these reach the new version after upgrading. $old->start; @@ -49,8 +72,7 @@ sub test_mode $old->safe_psql('testdb1', "VACUUM FULL test2"); $old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432"); - # For cross-version tests, we can also check that pg_upgrade handles - # tablespaces. + # If an old installation is provided, we can test non-in-place tablespaces. if (defined($ENV{oldinstall})) { my $tblspc = PostgreSQL::Test::Utils::tempdir_short(); @@ -64,6 +86,42 @@ sub test_mode $old->safe_psql('testdb2', "CREATE TABLE test4 AS SELECT generate_series(400, 502)"); } + + # If the old cluster is >= v10, we can test in-place tablespaces. + if ($old->pg_version >= 10) + { + $old->safe_psql('postgres', + "CREATE TABLESPACE inplc_tblspc LOCATION ''"); + $old->safe_psql('postgres', + "CREATE DATABASE testdb3 TABLESPACE inplc_tblspc"); + $old->safe_psql('postgres', + "CREATE TABLE test5 TABLESPACE inplc_tblspc AS SELECT generate_series(503, 606)"); + $old->safe_psql('testdb3', + "CREATE TABLE test6 AS SELECT generate_series(607, 711)"); + } + + # While we are here, test handling of large objects. + $old->safe_psql('postgres', q| + CREATE ROLE regress_lo_1; + CREATE ROLE regress_lo_2; + + SELECT lo_from_bytea(4532, '\xffffff00'); + COMMENT ON LARGE OBJECT 4532 IS 'test'; + + SELECT lo_from_bytea(4533, '\x0f0f0f0f'); + ALTER LARGE OBJECT 4533 OWNER TO regress_lo_1; + GRANT SELECT ON LARGE OBJECT 4533 TO regress_lo_2; + |); + + if ($test_seclabel) + { + $old->safe_psql('postgres', q| + CREATE EXTENSION dummy_seclabel; + + SELECT lo_from_bytea(4534, '\x00ffffff'); + SECURITY LABEL ON LARGE OBJECT 4534 IS 'classified'; + |); + } $old->stop; my $result = command_ok_or_fails_like( @@ -94,8 +152,7 @@ sub test_mode $result = $new->safe_psql('testdb1', "SELECT nextval('testseq')"); is($result, '5432', "sequence data after pg_upgrade $mode"); - # For cross-version tests, we should have some objects in a non-default - # tablespace. + # Tests for non-in-place tablespaces. if (defined($ENV{oldinstall})) { $result = @@ -105,6 +162,43 @@ sub test_mode $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4"); is($result, '103', "test4 data after pg_upgrade $mode"); } + + # Tests for in-place tablespaces. + if ($old->pg_version >= 10) + { + $result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test5"); + is($result, '104', "test5 data after pg_upgrade $mode"); + $result = $new->safe_psql('testdb3', "SELECT COUNT(*) FROM test6"); + is($result, '105', "test6 data after pg_upgrade $mode"); + } + + # Tests for large objects + $result = $new->safe_psql('postgres', "SELECT lo_get(4532)"); + is($result, '\xffffff00', "LO contents after upgrade"); + $result = $new->safe_psql('postgres', + "SELECT obj_description(4532, 'pg_largeobject')"); + is($result, 'test', "comment on LO after pg_upgrade"); + + $result = $new->safe_psql('postgres', "SELECT lo_get(4533)"); + is($result, '\x0f0f0f0f', "LO contents after upgrade"); + $result = $new->safe_psql('postgres', + "SELECT lomowner::regrole FROM pg_largeobject_metadata WHERE oid = 4533"); + is($result, 'regress_lo_1', "LO owner after upgrade"); + $result = $new->safe_psql('postgres', + "SELECT lomacl FROM pg_largeobject_metadata WHERE oid = 4533"); + is($result, '{regress_lo_1=rw/regress_lo_1,regress_lo_2=r/regress_lo_1}', + "LO ACL after upgrade"); + + if ($test_seclabel) + { + $result = $new->safe_psql('postgres', "SELECT lo_get(4534)"); + is($result, '\x00ffffff', "LO contents after upgrade"); + $result = $new->safe_psql('postgres', q| + SELECT label FROM pg_seclabel WHERE objoid = 4534 + AND classoid = 'pg_largeobject'::regclass + |); + is($result, 'classified', "seclabel on LO after pg_upgrade"); + } $new->stop; } diff --git a/src/bin/pg_upgrade/tablespace.c b/src/bin/pg_upgrade/tablespace.c index 3520a75ba317d..151d74e17349b 100644 --- a/src/bin/pg_upgrade/tablespace.c +++ b/src/bin/pg_upgrade/tablespace.c @@ -23,10 +23,20 @@ init_tablespaces(void) set_tablespace_directory_suffix(&old_cluster); set_tablespace_directory_suffix(&new_cluster); - if (os_info.num_old_tablespaces > 0 && + if (old_cluster.num_tablespaces > 0 && strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0) - pg_fatal("Cannot upgrade to/from the same system catalog version when\n" - "using tablespaces."); + { + for (int i = 0; i < old_cluster.num_tablespaces; i++) + { + /* + * In-place tablespaces are okay for same-version upgrades because + * their paths will differ between clusters. + */ + if (strcmp(old_cluster.tablespaces[i], new_cluster.tablespaces[i]) == 0) + pg_fatal("Cannot upgrade to/from the same system catalog version when\n" + "using tablespaces."); + } + } } @@ -53,19 +63,48 @@ get_tablespace_paths(void) res = executeQueryOrDie(conn, "%s", query); - if ((os_info.num_old_tablespaces = PQntuples(res)) != 0) - os_info.old_tablespaces = - (char **) pg_malloc(os_info.num_old_tablespaces * sizeof(char *)); + old_cluster.num_tablespaces = PQntuples(res); + new_cluster.num_tablespaces = PQntuples(res); + + if (PQntuples(res) != 0) + { + old_cluster.tablespaces = + (char **) pg_malloc(old_cluster.num_tablespaces * sizeof(char *)); + new_cluster.tablespaces = + (char **) pg_malloc(new_cluster.num_tablespaces * sizeof(char *)); + } else - os_info.old_tablespaces = NULL; + { + old_cluster.tablespaces = NULL; + new_cluster.tablespaces = NULL; + } i_spclocation = PQfnumber(res, "spclocation"); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++) { struct stat statBuf; + char *spcloc = PQgetvalue(res, tblnum, i_spclocation); - os_info.old_tablespaces[tblnum] = pg_strdup(PQgetvalue(res, tblnum, i_spclocation)); + /* + * For now, we do not expect non-in-place tablespaces to move during + * upgrade. If that changes, it will likely become necessary to run + * the above query on the new cluster, too. + * + * pg_tablespace_location() returns absolute paths for non-in-place + * tablespaces and relative paths for in-place ones, so we use + * is_absolute_path() to distinguish between them. + */ + if (is_absolute_path(PQgetvalue(res, tblnum, i_spclocation))) + { + old_cluster.tablespaces[tblnum] = pg_strdup(spcloc); + new_cluster.tablespaces[tblnum] = old_cluster.tablespaces[tblnum]; + } + else + { + old_cluster.tablespaces[tblnum] = psprintf("%s/%s", old_cluster.pgdata, spcloc); + new_cluster.tablespaces[tblnum] = psprintf("%s/%s", new_cluster.pgdata, spcloc); + } /* * Check that the tablespace path exists and is a directory. @@ -76,21 +115,21 @@ get_tablespace_paths(void) * that contains user tablespaces is moved as part of pg_upgrade * preparation and the symbolic links are not updated. */ - if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0) + if (stat(old_cluster.tablespaces[tblnum], &statBuf) != 0) { if (errno == ENOENT) report_status(PG_FATAL, "tablespace directory \"%s\" does not exist", - os_info.old_tablespaces[tblnum]); + old_cluster.tablespaces[tblnum]); else report_status(PG_FATAL, "could not stat tablespace directory \"%s\": %m", - os_info.old_tablespaces[tblnum]); + old_cluster.tablespaces[tblnum]); } if (!S_ISDIR(statBuf.st_mode)) report_status(PG_FATAL, "tablespace path \"%s\" is not a directory", - os_info.old_tablespaces[tblnum]); + old_cluster.tablespaces[tblnum]); } PQclear(res); diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 497a936c141f3..125f3c7bbbe5b 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -3495,6 +3495,8 @@ doRetry(CState *st, pg_time_usec_t *now) static int discardUntilSync(CState *st) { + bool received_sync = false; + /* send a sync */ if (!PQpipelineSync(st->con)) { @@ -3509,10 +3511,21 @@ discardUntilSync(CState *st) PGresult *res = PQgetResult(st->con); if (PQresultStatus(res) == PGRES_PIPELINE_SYNC) + received_sync = true; + else if (received_sync) { - PQclear(res); - res = PQgetResult(st->con); + /* + * PGRES_PIPELINE_SYNC must be followed by another + * PGRES_PIPELINE_SYNC or NULL; otherwise, assert failure. + */ Assert(res == NULL); + + /* + * Reset ongoing sync count to 0 since all PGRES_PIPELINE_SYNC + * results have been discarded. + */ + st->num_syncs = 0; + PQclear(res); break; } PQclear(res); diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 0e00d73487c33..cc602087db246 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -130,6 +130,8 @@ static backslashResult exec_command_pset(PsqlScanState scan_state, bool active_b static backslashResult exec_command_quit(PsqlScanState scan_state, bool active_branch); static backslashResult exec_command_reset(PsqlScanState scan_state, bool active_branch, PQExpBuffer query_buf); +static backslashResult exec_command_restrict(PsqlScanState scan_state, bool active_branch, + const char *cmd); static backslashResult exec_command_s(PsqlScanState scan_state, bool active_branch); static backslashResult exec_command_sendpipeline(PsqlScanState scan_state, bool active_branch); static backslashResult exec_command_set(PsqlScanState scan_state, bool active_branch); @@ -142,6 +144,8 @@ static backslashResult exec_command_syncpipeline(PsqlScanState scan_state, bool static backslashResult exec_command_t(PsqlScanState scan_state, bool active_branch); static backslashResult exec_command_T(PsqlScanState scan_state, bool active_branch); static backslashResult exec_command_timing(PsqlScanState scan_state, bool active_branch); +static backslashResult exec_command_unrestrict(PsqlScanState scan_state, bool active_branch, + const char *cmd); static backslashResult exec_command_unset(PsqlScanState scan_state, bool active_branch, const char *cmd); static backslashResult exec_command_write(PsqlScanState scan_state, bool active_branch, @@ -192,6 +196,8 @@ static char *pset_value_string(const char *param, printQueryOpt *popt); static void checkWin32Codepage(void); #endif +static bool restricted; +static char *restrict_key; /*---------- @@ -237,8 +243,19 @@ HandleSlashCmds(PsqlScanState scan_state, /* Parse off the command name */ cmd = psql_scan_slash_command(scan_state); - /* And try to execute it */ - status = exec_command(cmd, scan_state, cstack, query_buf, previous_buf); + /* + * And try to execute it. + * + * If we are in "restricted" mode, the only allowable backslash command is + * \unrestrict (to exit restricted mode). + */ + if (restricted && strcmp(cmd, "unrestrict") != 0) + { + pg_log_error("backslash commands are restricted; only \\unrestrict is allowed"); + status = PSQL_CMD_ERROR; + } + else + status = exec_command(cmd, scan_state, cstack, query_buf, previous_buf); if (status == PSQL_CMD_UNKNOWN) { @@ -416,6 +433,8 @@ exec_command(const char *cmd, status = exec_command_quit(scan_state, active_branch); else if (strcmp(cmd, "r") == 0 || strcmp(cmd, "reset") == 0) status = exec_command_reset(scan_state, active_branch, query_buf); + else if (strcmp(cmd, "restrict") == 0) + status = exec_command_restrict(scan_state, active_branch, cmd); else if (strcmp(cmd, "s") == 0) status = exec_command_s(scan_state, active_branch); else if (strcmp(cmd, "sendpipeline") == 0) @@ -438,6 +457,8 @@ exec_command(const char *cmd, status = exec_command_T(scan_state, active_branch); else if (strcmp(cmd, "timing") == 0) status = exec_command_timing(scan_state, active_branch); + else if (strcmp(cmd, "unrestrict") == 0) + status = exec_command_unrestrict(scan_state, active_branch, cmd); else if (strcmp(cmd, "unset") == 0) status = exec_command_unset(scan_state, active_branch, cmd); else if (strcmp(cmd, "w") == 0 || strcmp(cmd, "write") == 0) @@ -2754,6 +2775,35 @@ exec_command_reset(PsqlScanState scan_state, bool active_branch, return PSQL_CMD_SKIP_LINE; } +/* + * \restrict -- enter "restricted mode" with the provided key + */ +static backslashResult +exec_command_restrict(PsqlScanState scan_state, bool active_branch, + const char *cmd) +{ + if (active_branch) + { + char *opt; + + Assert(!restricted); + + opt = psql_scan_slash_option(scan_state, OT_NORMAL, NULL, true); + if (opt == NULL || opt[0] == '\0') + { + pg_log_error("\\%s: missing required argument", cmd); + return PSQL_CMD_ERROR; + } + + restrict_key = pstrdup(opt); + restricted = true; + } + else + ignore_slash_options(scan_state); + + return PSQL_CMD_SKIP_LINE; +} + /* * \s -- save history in a file or show it on the screen */ @@ -3135,6 +3185,46 @@ exec_command_timing(PsqlScanState scan_state, bool active_branch) return success ? PSQL_CMD_SKIP_LINE : PSQL_CMD_ERROR; } +/* + * \unrestrict -- exit "restricted mode" if provided key matches + */ +static backslashResult +exec_command_unrestrict(PsqlScanState scan_state, bool active_branch, + const char *cmd) +{ + if (active_branch) + { + char *opt; + + opt = psql_scan_slash_option(scan_state, OT_NORMAL, NULL, true); + if (opt == NULL || opt[0] == '\0') + { + pg_log_error("\\%s: missing required argument", cmd); + return PSQL_CMD_ERROR; + } + + if (!restricted) + { + pg_log_error("\\%s: not currently in restricted mode", cmd); + return PSQL_CMD_ERROR; + } + else if (strcmp(opt, restrict_key) == 0) + { + pfree(restrict_key); + restricted = false; + } + else + { + pg_log_error("\\%s: wrong key", cmd); + return PSQL_CMD_ERROR; + } + } + else + ignore_slash_options(scan_state); + + return PSQL_CMD_SKIP_LINE; +} + /* * \unset -- unset variable */ diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 7a06af48842d8..4aa793d7de758 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -6746,7 +6746,7 @@ describeSubscriptions(const char *pattern, bool verbose) printQueryOpt myopt = pset.popt; static const bool translate_columns[] = {false, false, false, false, false, false, false, false, false, false, false, false, false, false, - false, false}; + false, false, false, false}; if (pset.sversion < 100000) { @@ -6815,10 +6815,20 @@ describeSubscriptions(const char *pattern, bool verbose) ", subfailover AS \"%s\"\n", gettext_noop("Failover")); if (pset.sversion >= 190000) + { appendPQExpBuffer(&buf, ", subretaindeadtuples AS \"%s\"\n", gettext_noop("Retain dead tuples")); + appendPQExpBuffer(&buf, + ", submaxretention AS \"%s\"\n", + gettext_noop("Max retention duration")); + + appendPQExpBuffer(&buf, + ", subretentionactive AS \"%s\"\n", + gettext_noop("Retention active")); + } + appendPQExpBuffer(&buf, ", subsynccommit AS \"%s\"\n" ", subconninfo AS \"%s\"\n", diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c index 8c62729a0d124..ed00c36695e85 100644 --- a/src/bin/psql/help.c +++ b/src/bin/psql/help.c @@ -171,6 +171,10 @@ slashUsage(unsigned short int pager) HELP0(" \\gset [PREFIX] execute query and store result in psql variables\n"); HELP0(" \\gx [(OPTIONS)] [FILE] as \\g, but forces expanded output mode\n"); HELP0(" \\q quit psql\n"); + HELP0(" \\restrict RESTRICT_KEY\n" + " enter restricted mode with provided key\n"); + HELP0(" \\unrestrict RESTRICT_KEY\n" + " exit restricted mode if key matches\n"); HELP0(" \\watch [[i=]SEC] [c=N] [m=MIN]\n" " execute query every SEC seconds, up to N times,\n" " stop if less than MIN rows are returned\n"); diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl index f42c3961e09f7..cf07a9dbd5ed6 100644 --- a/src/bin/psql/t/001_basic.pl +++ b/src/bin/psql/t/001_basic.pl @@ -530,4 +530,11 @@ sub psql_fails_like qr/COPY in a pipeline is not supported, aborting connection/, '\copy to in pipeline: fails'); +psql_fails_like( + $node, + qq{\\restrict test +\\! should_fail}, + qr/backslash commands are restricted; only \\unrestrict is allowed/, + 'meta-command in restrict mode fails'); + done_testing(); diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c index dbc586c5bc370..6b20a4404b213 100644 --- a/src/bin/psql/tab-complete.in.c +++ b/src/bin/psql/tab-complete.in.c @@ -1010,7 +1010,7 @@ static const SchemaQuery Query_for_trigger_of_table = { #define Query_for_list_of_database_vars \ "SELECT conf FROM ("\ -" SELECT setdatabase, pg_catalog.split_part(unnest(setconfig),'=',1) conf"\ +" SELECT setdatabase, pg_catalog.split_part(pg_catalog.unnest(setconfig),'=',1) conf"\ " FROM pg_db_role_setting "\ " ) s, pg_database d "\ " WHERE s.setdatabase = d.oid "\ @@ -1086,9 +1086,12 @@ Keywords_for_list_of_owner_roles, "PUBLIC" " WHERE usename LIKE '%s'" #define Query_for_list_of_user_vars \ -" SELECT pg_catalog.split_part(pg_catalog.unnest(rolconfig),'=',1) "\ -" FROM pg_catalog.pg_roles "\ -" WHERE rolname LIKE '%s'" +"SELECT conf FROM ("\ +" SELECT rolname, pg_catalog.split_part(pg_catalog.unnest(rolconfig),'=',1) conf"\ +" FROM pg_catalog.pg_roles"\ +" ) s"\ +" WHERE s.conf like '%s' "\ +" AND s.rolname LIKE '%s'" #define Query_for_list_of_access_methods \ " SELECT amname "\ @@ -1915,11 +1918,11 @@ psql_completion(const char *text, int start, int end) "\\out", "\\parse", "\\password", "\\print", "\\prompt", "\\pset", "\\qecho", "\\quit", - "\\reset", + "\\reset", "\\restrict", "\\s", "\\sendpipeline", "\\set", "\\setenv", "\\sf", "\\startpipeline", "\\sv", "\\syncpipeline", "\\t", "\\T", "\\timing", - "\\unset", + "\\unrestrict", "\\unset", "\\x", "\\warn", "\\watch", "\\write", "\\z", @@ -2318,7 +2321,8 @@ match_previous_words(int pattern_id, COMPLETE_WITH("(", "PUBLICATION"); /* ALTER SUBSCRIPTION SET ( */ else if (Matches("ALTER", "SUBSCRIPTION", MatchAny, MatchAnyN, "SET", "(")) - COMPLETE_WITH("binary", "disable_on_error", "failover", "origin", + COMPLETE_WITH("binary", "disable_on_error", "failover", + "max_retention_duration", "origin", "password_required", "retain_dead_tuples", "run_as_owner", "slot_name", "streaming", "synchronous_commit", "two_phase"); @@ -2517,7 +2521,10 @@ match_previous_words(int pattern_id, /* ALTER USER,ROLE RESET */ else if (Matches("ALTER", "USER|ROLE", MatchAny, "RESET")) + { + set_completion_reference(prev2_wd); COMPLETE_WITH_QUERY_PLUS(Query_for_list_of_user_vars, "ALL"); + } /* ALTER USER,ROLE WITH */ else if (Matches("ALTER", "USER|ROLE", MatchAny, "WITH")) @@ -3774,7 +3781,8 @@ match_previous_words(int pattern_id, /* Complete "CREATE SUBSCRIPTION ... WITH ( " */ else if (Matches("CREATE", "SUBSCRIPTION", MatchAnyN, "WITH", "(")) COMPLETE_WITH("binary", "connect", "copy_data", "create_slot", - "disable_on_error", "enabled", "failover", "origin", + "disable_on_error", "enabled", "failover", + "max_retention_duration", "origin", "password_required", "retain_dead_tuples", "run_as_owner", "slot_name", "streaming", "synchronous_commit", "two_phase"); @@ -5015,7 +5023,7 @@ match_previous_words(int pattern_id, /* Complete with a variable name */ else if (TailMatches("SET|RESET") && !TailMatches("UPDATE", MatchAny, "SET") && - !TailMatches("ALTER", "DATABASE", MatchAny, "RESET")) + !TailMatches("ALTER", "DATABASE|USER|ROLE", MatchAny, "RESET")) COMPLETE_WITH_QUERY_VERBATIM_PLUS(Query_for_list_of_set_vars, "CONSTRAINTS", "TRANSACTION", diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl index ff56a13b46bbb..945c30df15600 100644 --- a/src/bin/scripts/t/100_vacuumdb.pl +++ b/src/bin/scripts/t/100_vacuumdb.pl @@ -237,9 +237,10 @@ qr/cannot vacuum all databases and a specific one at the same time/, 'cannot use option --all and a dbname as argument at the same time'); -$node->safe_psql('postgres', - 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;' -); +$node->safe_psql('postgres', q| + CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b; + ALTER TABLE regression_vacuumdb_test ADD COLUMN c INT GENERATED ALWAYS AS (a + b); +|); $node->issues_sql_like( [ 'vacuumdb', '--analyze-only', @@ -340,4 +341,15 @@ qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing partition stats'); +$node->safe_psql('postgres', + "CREATE TABLE parent_table (a INT) PARTITION BY LIST (a);\n" + . "CREATE TABLE child_table PARTITION OF parent_table FOR VALUES IN (1);\n" + . "INSERT INTO parent_table VALUES (1);\n"); +$node->issues_sql_like( + [ + 'vacuumdb', '--analyze-only', 'postgres' + ], + qr/statement: ANALYZE public.parent_table/s, + '--analyze-only updates statistics for partitioned tables'); + done_testing(); diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c index 79b1096eb08c4..fd236087e90ae 100644 --- a/src/bin/scripts/vacuumdb.c +++ b/src/bin/scripts/vacuumdb.c @@ -14,6 +14,7 @@ #include +#include "catalog/pg_attribute_d.h" #include "catalog/pg_class_d.h" #include "common.h" #include "common/connect.h" @@ -911,10 +912,26 @@ retrieve_objects(PGconn *conn, vacuumingOptions *vacopts, */ if ((objfilter & OBJFILTER_TABLE) == 0) { - appendPQExpBufferStr(&catalog_query, - " AND c.relkind OPERATOR(pg_catalog.=) ANY (array[" - CppAsString2(RELKIND_RELATION) ", " - CppAsString2(RELKIND_MATVIEW) "])\n"); + /* + * vacuumdb should generally follow the behavior of the underlying + * VACUUM and ANALYZE commands. If analyze_only is true, process + * regular tables, materialized views, and partitioned tables, just + * like ANALYZE (with no specific target tables) does. Otherwise, + * process only regular tables and materialized views, since VACUUM + * skips partitioned tables when no target tables are specified. + */ + if (vacopts->analyze_only) + appendPQExpBufferStr(&catalog_query, + " AND c.relkind OPERATOR(pg_catalog.=) ANY (array[" + CppAsString2(RELKIND_RELATION) ", " + CppAsString2(RELKIND_MATVIEW) ", " + CppAsString2(RELKIND_PARTITIONED_TABLE) "])\n"); + else + appendPQExpBufferStr(&catalog_query, + " AND c.relkind OPERATOR(pg_catalog.=) ANY (array[" + CppAsString2(RELKIND_RELATION) ", " + CppAsString2(RELKIND_MATVIEW) "])\n"); + } /* @@ -957,6 +974,8 @@ retrieve_objects(PGconn *conn, vacuumingOptions *vacopts, " AND a.attnum OPERATOR(pg_catalog.>) 0::pg_catalog.int2\n" " AND NOT a.attisdropped\n" " AND a.attstattarget IS DISTINCT FROM 0::pg_catalog.int2\n" + " AND a.attgenerated OPERATOR(pg_catalog.<>) " + CppAsString2(ATTRIBUTE_GENERATED_VIRTUAL) "\n" " AND NOT EXISTS (SELECT NULL FROM pg_catalog.pg_statistic s\n" " WHERE s.starelid OPERATOR(pg_catalog.=) a.attrelid\n" " AND s.staattnum OPERATOR(pg_catalog.=) a.attnum\n" @@ -994,6 +1013,8 @@ retrieve_objects(PGconn *conn, vacuumingOptions *vacopts, " AND a.attnum OPERATOR(pg_catalog.>) 0::pg_catalog.int2\n" " AND NOT a.attisdropped\n" " AND a.attstattarget IS DISTINCT FROM 0::pg_catalog.int2\n" + " AND a.attgenerated OPERATOR(pg_catalog.<>) " + CppAsString2(ATTRIBUTE_GENERATED_VIRTUAL) "\n" " AND c.relhassubclass\n" " AND NOT p.inherited\n" " AND EXISTS (SELECT NULL FROM pg_catalog.pg_inherits h\n" diff --git a/src/common/Makefile b/src/common/Makefile index 1e2b91c83c4c4..2c720caa50972 100644 --- a/src/common/Makefile +++ b/src/common/Makefile @@ -163,7 +163,7 @@ libpgcommon_shlib.a: $(OBJS_SHLIB) # The JSON API normally exits on out-of-memory; disable that behavior for shared # library builds. This requires libpq's pqexpbuffer.h. jsonapi_shlib.o: override CPPFLAGS += -DJSONAPI_USE_PQEXPBUFFER -jsonapi_shlib.o: override CPPFLAGS += -I$(libpq_srcdir) +jsonapi_shlib.o: override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) # Because this uses its own compilation rule, it doesn't use the # dependency tracking logic from Makefile.global. To make sure that diff --git a/src/fe_utils/meson.build b/src/fe_utils/meson.build index a18cbc939e412..5a9ddb73463e1 100644 --- a/src/fe_utils/meson.build +++ b/src/fe_utils/meson.build @@ -29,7 +29,7 @@ generated_sources += psqlscan fe_utils_sources += psqlscan fe_utils = static_library('libpgfeutils', - fe_utils_sources + generated_headers, + fe_utils_sources, c_pch: pch_postgres_fe_h, include_directories: [postgres_inc, libpq_inc], c_args: host_system == 'windows' ? ['-DFD_SETSIZE=1024'] : [], diff --git a/src/include/Makefile b/src/include/Makefile index 3f94543f3270b..24c5452de98fb 100644 --- a/src/include/Makefile +++ b/src/include/Makefile @@ -72,7 +72,7 @@ uninstall: clean: - rm -f utils/fmgroids.h utils/fmgrprotos.h utils/errcodes.h utils/header-stamp + rm -f utils/fmgroids.h utils/fmgrprotos.h utils/guc_tables.inc.c utils/errcodes.h utils/header-stamp rm -f storage/lwlocknames.h utils/probes.h utils/wait_event_types.h rm -f nodes/nodetags.h nodes/header-stamp $(MAKE) -C catalog clean diff --git a/src/include/access/gin_tuple.h b/src/include/access/gin_tuple.h index 702f7d12889d8..b4f103dec9afd 100644 --- a/src/include/access/gin_tuple.h +++ b/src/include/access/gin_tuple.h @@ -15,7 +15,9 @@ #include "utils/sortsupport.h" /* - * Data for one key in a GIN index. + * Data for one key in a GIN index. (This is not the permanent in-index + * representation, but just a convenient format to use during the tuplesort + * stage of building a new GIN index.) */ typedef struct GinTuple { diff --git a/src/include/access/hash_xlog.h b/src/include/access/hash_xlog.h index 6fe97de4d66f1..5d4671dc4c128 100644 --- a/src/include/access/hash_xlog.h +++ b/src/include/access/hash_xlog.h @@ -129,7 +129,7 @@ typedef struct xl_hash_split_complete * * This data record is used for XLOG_HASH_MOVE_PAGE_CONTENTS * - * Backup Blk 0: bucket page + * Backup Blk 0: primary bucket page * Backup Blk 1: page containing moved tuples * Backup Blk 2: page from which tuples will be removed */ @@ -149,12 +149,13 @@ typedef struct xl_hash_move_page_contents * * This data record is used for XLOG_HASH_SQUEEZE_PAGE * - * Backup Blk 0: page containing tuples moved from freed overflow page - * Backup Blk 1: freed overflow page - * Backup Blk 2: page previous to the freed overflow page - * Backup Blk 3: page next to the freed overflow page - * Backup Blk 4: bitmap page containing info of freed overflow page - * Backup Blk 5: meta page + * Backup Blk 0: primary bucket page + * Backup Blk 1: page containing tuples moved from freed overflow page + * Backup Blk 2: freed overflow page + * Backup Blk 3: page previous to the freed overflow page + * Backup Blk 4: page next to the freed overflow page + * Backup Blk 5: bitmap page containing info of freed overflow page + * Backup Blk 6: meta page */ typedef struct xl_hash_squeeze_page { @@ -245,7 +246,7 @@ typedef struct xl_hash_init_bitmap_page * * This data record is used for XLOG_HASH_VACUUM_ONE_PAGE * - * Backup Blk 0: bucket page + * Backup Blk 0: primary bucket page * Backup Blk 1: meta page */ typedef struct xl_hash_vacuum_one_page diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h index 277df6b3cf0b3..d4c0625b63228 100644 --- a/src/include/access/heapam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -284,7 +284,6 @@ typedef struct xl_heap_update */ typedef struct xl_heap_prune { - uint8 reason; uint8 flags; /* diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h index aa957cf3b0165..ae813a790419e 100644 --- a/src/include/access/htup_details.h +++ b/src/include/access/htup_details.h @@ -884,7 +884,7 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) if (att_isnull(attnum - 1, tup->t_data->t_bits)) { *isnull = true; - return (Datum) NULL; + return (Datum) 0; } else return nocachegetattr(tup, attnum, tupleDesc); diff --git a/src/include/access/itup.h b/src/include/access/itup.h index 7066c2a2868b3..338e90749bd1f 100644 --- a/src/include/access/itup.h +++ b/src/include/access/itup.h @@ -154,7 +154,7 @@ index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) if (att_isnull(attnum - 1, (bits8 *) tup + sizeof(IndexTupleData))) { *isnull = true; - return (Datum) NULL; + return (Datum) 0; } else return nocache_index_getattr(tup, attnum, tupleDesc); diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h index b876e98f46ed7..82e4bb90dd581 100644 --- a/src/include/access/multixact.h +++ b/src/include/access/multixact.h @@ -111,6 +111,9 @@ extern bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly); extern void MultiXactIdSetOldestMember(void); extern int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool isLockOnly); +extern bool GetMultiXactInfo(uint32 *multixacts, MultiXactOffset *members, + MultiXactId *oldestMultiXactId, + MultiXactOffset *oldestOffset); extern bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2); extern bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2); @@ -158,5 +161,6 @@ extern void multixact_desc(StringInfo buf, XLogReaderState *record); extern const char *multixact_identify(uint8 info); extern char *mxid_to_string(MultiXactId multi, int nmembers, MultiXactMember *members); +extern char *mxstatus_to_string(MultiXactStatus status); #endif /* MULTIXACT_H */ diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index e709d2e0afe94..9ab467cb8fdec 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -17,9 +17,8 @@ #include "access/amapi.h" #include "access/itup.h" #include "access/sdir.h" -#include "access/tableam.h" -#include "access/xlogreader.h" #include "catalog/pg_am_d.h" +#include "catalog/pg_class.h" #include "catalog/pg_index.h" #include "lib/stringinfo.h" #include "storage/bufmgr.h" @@ -1285,9 +1284,10 @@ extern void _bt_pageinit(Page page, Size size); extern void _bt_delitems_vacuum(Relation rel, Buffer buf, OffsetNumber *deletable, int ndeletable, BTVacuumPosting *updatable, int nupdatable); +struct TM_IndexDeleteOp; /* avoid including tableam.h here */ extern void _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, - TM_IndexDeleteOp *delstate); + struct TM_IndexDeleteOp *delstate); extern void _bt_pagedel(Relation rel, Buffer leafbuf, BTVacState *vstate); extern void _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly); diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h index dfbb4c854606c..a604a4702c37c 100644 --- a/src/include/access/reloptions.h +++ b/src/include/access/reloptions.h @@ -233,7 +233,7 @@ extern void add_local_string_reloption(local_relopts *relopts, const char *name, fill_string_relopt filler, int offset); extern Datum transformRelOptions(Datum oldOptions, List *defList, - const char *namspace, const char *const validnsps[], + const char *nameSpace, const char *const validnsps[], bool acceptOidsOff, bool isReset); extern List *untransformRelOptions(Datum options); extern bytea *extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, diff --git a/src/include/access/slru.h b/src/include/access/slru.h index 20dbd1e0070be..8d57753ed01bd 100644 --- a/src/include/access/slru.h +++ b/src/include/access/slru.h @@ -55,7 +55,7 @@ typedef enum /* * Shared-memory state * - * ControlLock is used to protect access to the other fields, except + * SLRU bank locks are used to protect access to the other fields, except * latest_page_number, which uses atomics; see comment in slru.c. */ typedef struct SlruSharedData diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h index cb43a278f4667..56ac64f0597eb 100644 --- a/src/include/access/spgist_private.h +++ b/src/include/access/spgist_private.h @@ -285,10 +285,12 @@ typedef struct SpGistCache * If the prefix datum is of a pass-by-value type, it is stored in its * Datum representation, that is its on-disk representation is of length * sizeof(Datum). This is a fairly unfortunate choice, because in no other - * place does Postgres use Datum as an on-disk representation; it creates - * an unnecessary incompatibility between 32-bit and 64-bit builds. But the - * compatibility loss is mostly theoretical since MAXIMUM_ALIGNOF typically - * differs between such builds, too. Anyway we're stuck with it now. + * place does Postgres use Datum as an on-disk representation. Formerly it + * meant an unnecessary incompatibility between 32-bit and 64-bit builds, and + * as of v19 it instead creates a hazard for binary upgrades on 32-bit builds. + * Fortunately, that hazard seems mostly theoretical for lack of affected + * opclasses. Going forward, we will be using a fixed size of Datum so that + * there's no longer any pressing reason to change this. */ typedef struct SpGistInnerTupleData { @@ -377,8 +379,8 @@ typedef SpGistNodeTupleData *SpGistNodeTuple; * * size must be a multiple of MAXALIGN; also, it must be at least SGDTSIZE * so that the tuple can be converted to REDIRECT status later. (This - * restriction only adds bytes for a NULL leaf datum stored on a 32-bit - * machine; otherwise alignment restrictions force it anyway.) + * restriction only adds bytes for a NULL leaf datum; otherwise alignment + * restrictions force it anyway.) */ typedef struct SpGistLeafTupleData { diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 1c9e802a6b128..b2ce35e2a3407 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -121,7 +121,9 @@ typedef enum TU_UpdateIndexes /* * When table_tuple_update, table_tuple_delete, or table_tuple_lock fail * because the target tuple is already outdated, they fill in this struct to - * provide information to the caller about what happened. + * provide information to the caller about what happened. When those functions + * succeed, the contents of this struct should not be relied upon, except for + * `traversed`, which may be set in both success and failure cases. * * ctid is the target's ctid link: it is the same as the target's TID if the * target was deleted, or the location of the replacement tuple if the target @@ -137,6 +139,9 @@ typedef enum TU_UpdateIndexes * tuple); otherwise cmax is zero. (We make this restriction because * HeapTupleHeaderGetCmax doesn't work for tuples outdated in other * transactions.) + * + * traversed indicates if an update chain was followed in order to try to lock + * the target tuple. (This may be set in both success and failure cases.) */ typedef struct TM_FailureData { @@ -1508,7 +1513,7 @@ table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, * * Input parameters: * relation: relation containing tuple (caller must hold suitable lock) - * tid: TID of tuple to lock + * tid: TID of tuple to lock (updated if an update chain was followed) * snapshot: snapshot to use for visibility determinations * cid: current command ID (used for visibility test, and stored into * tuple's cmax if lock is successful) @@ -1533,8 +1538,10 @@ table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, * TM_WouldBlock: lock couldn't be acquired and wait_policy is skip * * In the failure cases other than TM_Invisible and TM_Deleted, the routine - * fills *tmfd with the tuple's t_ctid, t_xmax, and, if possible, t_cmax. See - * comments for struct TM_FailureData for additional info. + * fills *tmfd with the tuple's t_ctid, t_xmax, and, if possible, t_cmax. + * Additionally, in both success and failure cases, tmfd->traversed is set if + * an update chain was followed. See comments for struct TM_FailureData for + * additional info. */ static inline TM_Result table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot, diff --git a/src/include/access/tupmacs.h b/src/include/access/tupmacs.h index 6240ec930e7a9..84b3e7fd896e0 100644 --- a/src/include/access/tupmacs.h +++ b/src/include/access/tupmacs.h @@ -39,9 +39,6 @@ att_isnull(int ATT, const bits8 *BITS) * return the correct number of bytes fetched from the data area and extended * to Datum form. * - * On machines where Datum is 8 bytes, we support fetching 8-byte byval - * attributes; otherwise, only 1, 2, and 4-byte values are supported. - * * Note that T must already be properly aligned for this to work correctly. */ #define fetchatt(A,T) fetch_att(T, (A)->attbyval, (A)->attlen) @@ -62,10 +59,8 @@ fetch_att(const void *T, bool attbyval, int attlen) return Int16GetDatum(*((const int16 *) T)); case sizeof(int32): return Int32GetDatum(*((const int32 *) T)); -#if SIZEOF_DATUM == 8 - case sizeof(Datum): - return *((const Datum *) T); -#endif + case sizeof(int64): + return Int64GetDatum(*((const int64 *) T)); default: elog(ERROR, "unsupported byval length: %d", attlen); return 0; @@ -221,11 +216,9 @@ store_att_byval(void *T, Datum newdatum, int attlen) case sizeof(int32): *(int32 *) T = DatumGetInt32(newdatum); break; -#if SIZEOF_DATUM == 8 - case sizeof(Datum): - *(Datum *) T = newdatum; + case sizeof(int64): + *(int64 *) T = DatumGetInt64(newdatum); break; -#endif default: elog(ERROR, "unsupported byval length: %d", attlen); } diff --git a/src/include/access/twophase.h b/src/include/access/twophase.h index 509bdad9a5d55..64463e9f4afb4 100644 --- a/src/include/access/twophase.h +++ b/src/include/access/twophase.h @@ -68,4 +68,6 @@ extern void TwoPhaseTransactionGid(Oid subid, TransactionId xid, char *gid_res, int szgid); extern bool LookupGXactBySubid(Oid subid); +extern TransactionId TwoPhaseGetOldestXidInCommit(void); + #endif /* TWOPHASE_H */ diff --git a/src/include/access/xlogdefs.h b/src/include/access/xlogdefs.h index 514f03df0b69b..2397fb2411530 100644 --- a/src/include/access/xlogdefs.h +++ b/src/include/access/xlogdefs.h @@ -38,7 +38,7 @@ typedef uint64 XLogRecPtr; /* * Handy macro for printing XLogRecPtr in conventional format, e.g., * - * printf("%X/08X", LSN_FORMAT_ARGS(lsn)); + * printf("%X/%08X", LSN_FORMAT_ARGS(lsn)); * * To avoid breaking translatable messages, we're directly applying the * LSN format instead of using a macro. diff --git a/src/include/backup/basebackup_sink.h b/src/include/backup/basebackup_sink.h index 8a5ee996a45ed..310d92b8b9d45 100644 --- a/src/include/backup/basebackup_sink.h +++ b/src/include/backup/basebackup_sink.h @@ -287,7 +287,8 @@ extern bbsink *bbsink_copystream_new(bool send_to_client); extern bbsink *bbsink_gzip_new(bbsink *next, pg_compress_specification *); extern bbsink *bbsink_lz4_new(bbsink *next, pg_compress_specification *); extern bbsink *bbsink_zstd_new(bbsink *next, pg_compress_specification *); -extern bbsink *bbsink_progress_new(bbsink *next, bool estimate_backup_size); +extern bbsink *bbsink_progress_new(bbsink *next, bool estimate_backup_size, + bool incremental); extern bbsink *bbsink_server_new(bbsink *next, char *pathname); extern bbsink *bbsink_throttle_new(bbsink *next, uint32 maxrate); diff --git a/src/include/c.h b/src/include/c.h index 6d4495bdd9f68..39022f8a9dd75 100644 --- a/src/include/c.h +++ b/src/include/c.h @@ -530,8 +530,6 @@ typedef uint32 bits32; /* >= 32 bits */ /* snprintf format strings to use for 64-bit integers */ #define INT64_FORMAT "%" PRId64 #define UINT64_FORMAT "%" PRIu64 -#define INT64_HEX_FORMAT "%" PRIx64 -#define UINT64_HEX_FORMAT "%" PRIx64 /* * 128-bit signed and unsigned integers @@ -611,11 +609,11 @@ typedef signed int Offset; typedef float float4; typedef double float8; -#ifdef USE_FLOAT8_BYVAL +/* + * float8, int8, and related datatypes are now always pass-by-value. + * We keep this symbol to avoid breaking extension code that may use it. + */ #define FLOAT8PASSBYVAL true -#else -#define FLOAT8PASSBYVAL false -#endif /* * Oid, RegProcedure, TransactionId, SubTransactionId, MultiXactId, diff --git a/src/include/catalog/README b/src/include/catalog/README new file mode 100644 index 0000000000000..7e0e5d7e70c87 --- /dev/null +++ b/src/include/catalog/README @@ -0,0 +1,2 @@ +See about the +files in this directory. diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 5173d422d468a..ef0d0f92165eb 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -57,6 +57,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202507231 +#define CATALOG_VERSION_NO 202509091 #endif diff --git a/src/include/catalog/pg_database.h b/src/include/catalog/pg_database.h index 54f0d38c9c9e1..97bc18745084b 100644 --- a/src/include/catalog/pg_database.h +++ b/src/include/catalog/pg_database.h @@ -123,6 +123,7 @@ DECLARE_OID_DEFINING_MACRO(PostgresDbOid, 5); */ #define DATCONNLIMIT_INVALID_DB -2 +extern Oid get_database_oid(const char *dbname, bool missing_ok); extern bool database_is_invalid_form(Form_pg_database datform); extern bool database_is_invalid_oid(Oid dboid); diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 3ee8fed7e537f..03e82d28c8767 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -3503,6 +3503,18 @@ proname => 'random', provolatile => 'v', proparallel => 'r', prorettype => 'numeric', proargtypes => 'numeric numeric', proargnames => '{min,max}', prosrc => 'numeric_random' }, +{ oid => '6431', descr => 'random date in range', + proname => 'random', provolatile => 'v', proparallel => 'r', + prorettype => 'date', proargtypes => 'date date', + proargnames => '{min,max}', prosrc => 'date_random' }, +{ oid => '6432', descr => 'random timestamp in range', + proname => 'random', provolatile => 'v', proparallel => 'r', + prorettype => 'timestamp', proargtypes => 'timestamp timestamp', + proargnames => '{min,max}', prosrc => 'timestamp_random' }, +{ oid => '6433', descr => 'random timestamptz in range', + proname => 'random', provolatile => 'v', proparallel => 'r', + prorettype => 'timestamptz', proargtypes => 'timestamptz timestamptz', + proargnames => '{min,max}', prosrc => 'timestamptz_random' }, { oid => '1599', descr => 'set random seed', proname => 'setseed', provolatile => 'v', proparallel => 'r', prorettype => 'void', proargtypes => 'float8', prosrc => 'setseed' }, @@ -5688,9 +5700,9 @@ { oid => '6231', descr => 'statistics: information about subscription stats', proname => 'pg_stat_get_subscription_stats', provolatile => 's', proparallel => 'r', prorettype => 'record', proargtypes => 'oid', - proallargtypes => '{oid,oid,int8,int8,int8,int8,int8,int8,int8,int8,int8,timestamptz}', - proargmodes => '{i,o,o,o,o,o,o,o,o,o,o,o}', - proargnames => '{subid,subid,apply_error_count,sync_error_count,confl_insert_exists,confl_update_origin_differs,confl_update_exists,confl_update_missing,confl_delete_origin_differs,confl_delete_missing,confl_multiple_unique_conflicts,stats_reset}', + proallargtypes => '{oid,oid,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,timestamptz}', + proargmodes => '{i,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{subid,subid,apply_error_count,sync_error_count,confl_insert_exists,confl_update_origin_differs,confl_update_exists,confl_update_deleted,confl_update_missing,confl_delete_origin_differs,confl_delete_missing,confl_multiple_unique_conflicts,stats_reset}', prosrc => 'pg_stat_get_subscription_stats' }, { oid => '6118', descr => 'statistics: information about subscription', proname => 'pg_stat_get_subscription', prorows => '10', proisstrict => 'f', diff --git a/src/include/catalog/pg_subscription.h b/src/include/catalog/pg_subscription.h index 231ef84ec9a6f..55cb9b1eefaeb 100644 --- a/src/include/catalog/pg_subscription.h +++ b/src/include/catalog/pg_subscription.h @@ -81,6 +81,15 @@ CATALOG(pg_subscription,6100,SubscriptionRelationId) BKI_SHARED_RELATION BKI_ROW bool subretaindeadtuples; /* True if dead tuples useful for * conflict detection are retained */ + int32 submaxretention; /* The maximum duration (in milliseconds) + * for which information useful for + * conflict detection can be retained */ + + bool subretentionactive; /* True if retain_dead_tuples is enabled + * and the retention duration has not + * exceeded max_retention_duration, when + * defined */ + #ifdef CATALOG_VARLEN /* variable-length fields start here */ /* Connection string to the publisher */ text subconninfo BKI_FORCE_NOT_NULL; @@ -136,6 +145,13 @@ typedef struct Subscription * to be synchronized to the standbys. */ bool retaindeadtuples; /* True if dead tuples useful for conflict * detection are retained */ + int32 maxretention; /* The maximum duration (in milliseconds) for + * which information useful for conflict + * detection can be retained */ + bool retentionactive; /* True if retain_dead_tuples is enabled + * and the retention duration has not + * exceeded max_retention_duration, when + * defined */ char *conninfo; /* Connection string to the publisher */ char *slotname; /* Name of the replication slot */ char *synccommit; /* Synchronous commit setting for worker */ diff --git a/src/include/catalog/pg_subscription_rel.h b/src/include/catalog/pg_subscription_rel.h index c91797c869c24..02f97a547dd58 100644 --- a/src/include/catalog/pg_subscription_rel.h +++ b/src/include/catalog/pg_subscription_rel.h @@ -85,11 +85,13 @@ typedef struct SubscriptionRelState extern void AddSubscriptionRelState(Oid subid, Oid relid, char state, XLogRecPtr sublsn, bool retain_lock); extern void UpdateSubscriptionRelState(Oid subid, Oid relid, char state, - XLogRecPtr sublsn); + XLogRecPtr sublsn, bool already_locked); extern char GetSubscriptionRelState(Oid subid, Oid relid, XLogRecPtr *sublsn); extern void RemoveSubscriptionRel(Oid subid, Oid relid); extern bool HasSubscriptionRelations(Oid subid); extern List *GetSubscriptionRelations(Oid subid, bool not_ready); +extern void UpdateDeadTupleRetentionStatus(Oid subid, bool active); + #endif /* PG_SUBSCRIPTION_REL_H */ diff --git a/src/include/catalog/pg_type.dat b/src/include/catalog/pg_type.dat index 29e4ffffc9806..cb730aeac8646 100644 --- a/src/include/catalog/pg_type.dat +++ b/src/include/catalog/pg_type.dat @@ -54,7 +54,7 @@ typcollation => 'C' }, { oid => '20', array_type_oid => '1016', descr => '~18 digit integer, 8-byte storage', - typname => 'int8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typname => 'int8', typlen => '8', typbyval => 't', typcategory => 'N', typinput => 'int8in', typoutput => 'int8out', typreceive => 'int8recv', typsend => 'int8send', typalign => 'd' }, { oid => '21', array_type_oid => '1005', @@ -172,7 +172,7 @@ typoutput => 'pg_ddl_command_out', typreceive => 'pg_ddl_command_recv', typsend => 'pg_ddl_command_send', typalign => 'ALIGNOF_POINTER' }, { oid => '5069', array_type_oid => '271', descr => 'full transaction id', - typname => 'xid8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typname => 'xid8', typlen => '8', typbyval => 't', typcategory => 'U', typinput => 'xid8in', typoutput => 'xid8out', typreceive => 'xid8recv', typsend => 'xid8send', typalign => 'd' }, @@ -222,7 +222,7 @@ typsend => 'float4send', typalign => 'i' }, { oid => '701', array_type_oid => '1022', descr => 'double-precision floating point number, 8-byte storage', - typname => 'float8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typname => 'float8', typlen => '8', typbyval => 't', typcategory => 'N', typispreferred => 't', typinput => 'float8in', typoutput => 'float8out', typreceive => 'float8recv', typsend => 'float8send', typalign => 'd' }, @@ -237,7 +237,7 @@ typreceive => 'circle_recv', typsend => 'circle_send', typalign => 'd' }, { oid => '790', array_type_oid => '791', descr => 'monetary amounts, $d,ddd.cc', - typname => 'money', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typname => 'money', typlen => '8', typbyval => 't', typcategory => 'N', typinput => 'cash_in', typoutput => 'cash_out', typreceive => 'cash_recv', typsend => 'cash_send', typalign => 'd' }, @@ -290,7 +290,7 @@ typinput => 'date_in', typoutput => 'date_out', typreceive => 'date_recv', typsend => 'date_send', typalign => 'i' }, { oid => '1083', array_type_oid => '1183', descr => 'time of day', - typname => 'time', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typname => 'time', typlen => '8', typbyval => 't', typcategory => 'D', typinput => 'time_in', typoutput => 'time_out', typreceive => 'time_recv', typsend => 'time_send', typmodin => 'timetypmodin', typmodout => 'timetypmodout', typalign => 'd' }, @@ -298,14 +298,14 @@ # OIDS 1100 - 1199 { oid => '1114', array_type_oid => '1115', descr => 'date and time', - typname => 'timestamp', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typname => 'timestamp', typlen => '8', typbyval => 't', typcategory => 'D', typinput => 'timestamp_in', typoutput => 'timestamp_out', typreceive => 'timestamp_recv', typsend => 'timestamp_send', typmodin => 'timestamptypmodin', typmodout => 'timestamptypmodout', typalign => 'd' }, { oid => '1184', array_type_oid => '1185', descr => 'date and time with time zone', - typname => 'timestamptz', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typname => 'timestamptz', typlen => '8', typbyval => 't', typcategory => 'D', typispreferred => 't', typinput => 'timestamptz_in', typoutput => 'timestamptz_out', typreceive => 'timestamptz_recv', typsend => 'timestamptz_send', typmodin => 'timestamptztypmodin', @@ -413,7 +413,7 @@ # pg_lsn { oid => '3220', array_type_oid => '3221', descr => 'PostgreSQL LSN', - typname => 'pg_lsn', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typname => 'pg_lsn', typlen => '8', typbyval => 't', typcategory => 'U', typinput => 'pg_lsn_in', typoutput => 'pg_lsn_out', typreceive => 'pg_lsn_recv', typsend => 'pg_lsn_send', typalign => 'd' }, diff --git a/src/include/commands/dbcommands.h b/src/include/commands/dbcommands.h index 524ac6d97e898..d48ab6d7d7d38 100644 --- a/src/include/commands/dbcommands.h +++ b/src/include/commands/dbcommands.h @@ -14,9 +14,7 @@ #ifndef DBCOMMANDS_H #define DBCOMMANDS_H -#include "access/xlogreader.h" #include "catalog/objectaddress.h" -#include "lib/stringinfo.h" #include "parser/parse_node.h" extern Oid createdb(ParseState *pstate, const CreatedbStmt *stmt); @@ -28,8 +26,6 @@ extern ObjectAddress AlterDatabaseRefreshColl(AlterDatabaseRefreshCollStmt *stmt extern Oid AlterDatabaseSet(AlterDatabaseSetStmt *stmt); extern ObjectAddress AlterDatabaseOwner(const char *dbname, Oid newOwnerId); -extern Oid get_database_oid(const char *dbname, bool missing_ok); -extern char *get_database_name(Oid dbid); extern bool have_createdb_privilege(void); extern void check_encoding_locale_matches(int encoding, const char *collate, const char *ctype); diff --git a/src/include/commands/progress.h b/src/include/commands/progress.h index 7c736e7b03bcf..1cde4bd9bcf14 100644 --- a/src/include/commands/progress.h +++ b/src/include/commands/progress.h @@ -130,6 +130,7 @@ #define PROGRESS_BASEBACKUP_BACKUP_STREAMED 2 #define PROGRESS_BASEBACKUP_TBLSPC_TOTAL 3 #define PROGRESS_BASEBACKUP_TBLSPC_STREAMED 4 +#define PROGRESS_BASEBACKUP_BACKUP_TYPE 5 /* Phases of pg_basebackup (as advertised via PROGRESS_BASEBACKUP_PHASE) */ #define PROGRESS_BASEBACKUP_PHASE_WAIT_CHECKPOINT 1 @@ -138,6 +139,10 @@ #define PROGRESS_BASEBACKUP_PHASE_WAIT_WAL_ARCHIVE 4 #define PROGRESS_BASEBACKUP_PHASE_TRANSFER_WAL 5 +/* Types of pg_basebackup (as advertised via PROGRESS_BASEBACKUP_BACKUP_TYPE) */ +#define PROGRESS_BASEBACKUP_BACKUP_TYPE_FULL 1 +#define PROGRESS_BASEBACKUP_BACKUP_TYPE_INCREMENTAL 2 + /* Progress parameters for PROGRESS_COPY */ #define PROGRESS_COPY_BYTES_PROCESSED 0 #define PROGRESS_COPY_BYTES_TOTAL 1 diff --git a/src/include/commands/subscriptioncmds.h b/src/include/commands/subscriptioncmds.h index 9b288ad22a623..fb4e26a51a4d2 100644 --- a/src/include/commands/subscriptioncmds.h +++ b/src/include/commands/subscriptioncmds.h @@ -31,6 +31,9 @@ extern char defGetStreamingMode(DefElem *def); extern ObjectAddress AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, bool isTopLevel); extern void CheckSubDeadTupleRetention(bool check_guc, bool sub_disabled, - int elevel_for_sub_disabled); + int elevel_for_sub_disabled, + bool retain_dead_tuples, + bool retention_active, + bool max_retention_set); #endif /* SUBSCRIPTIONCMDS_H */ diff --git a/src/include/common/int128.h b/src/include/common/int128.h index a50f5709c2988..62aae1bc6a7bb 100644 --- a/src/include/common/int128.h +++ b/src/include/common/int128.h @@ -6,7 +6,7 @@ * We make use of the native int128 type if there is one, otherwise * implement things the hard way based on two int64 halves. * - * See src/tools/testint128.c for a simple test harness for this file. + * See src/test/modules/test_int128 for a simple test harness for this file. * * Copyright (c) 2017-2025, PostgreSQL Global Development Group * @@ -29,146 +29,172 @@ #endif #endif - +/* + * If native int128 support is enabled, INT128 is just int128. Otherwise, it + * is a structure with separate 64-bit high and low parts. + * + * We lay out the INT128 structure with the same content and byte ordering + * that a native int128 type would (probably) have. This makes no difference + * for ordinary use of INT128, but allows union'ing INT128 with int128 for + * testing purposes. + * + * PG_INT128_HI_INT64 and PG_INT128_LO_UINT64 allow the (signed) high and + * (unsigned) low 64-bit integer parts to be extracted portably on all + * platforms. + */ #if USE_NATIVE_INT128 typedef int128 INT128; -/* - * Add an unsigned int64 value into an INT128 variable. - */ -static inline void -int128_add_uint64(INT128 *i128, uint64 v) +#define PG_INT128_HI_INT64(i128) ((int64) ((i128) >> 64)) +#define PG_INT128_LO_UINT64(i128) ((uint64) (i128)) + +#else + +typedef struct { - *i128 += v; -} +#ifdef WORDS_BIGENDIAN + int64 hi; /* most significant 64 bits, including sign */ + uint64 lo; /* least significant 64 bits, without sign */ +#else + uint64 lo; /* least significant 64 bits, without sign */ + int64 hi; /* most significant 64 bits, including sign */ +#endif +} INT128; + +#define PG_INT128_HI_INT64(i128) ((i128).hi) +#define PG_INT128_LO_UINT64(i128) ((i128).lo) + +#endif /* - * Add a signed int64 value into an INT128 variable. + * Construct an INT128 from (signed) high and (unsigned) low 64-bit integer + * parts. */ -static inline void -int128_add_int64(INT128 *i128, int64 v) +static inline INT128 +make_int128(int64 hi, uint64 lo) { - *i128 += v; +#if USE_NATIVE_INT128 + return (((int128) hi) << 64) + lo; +#else + INT128 val; + + val.hi = hi; + val.lo = lo; + return val; +#endif } /* - * Add the 128-bit product of two int64 values into an INT128 variable. - * - * XXX with a stupid compiler, this could actually be less efficient than - * the other implementation; maybe we should do it by hand always? + * Add an unsigned int64 value into an INT128 variable. */ static inline void -int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y) +int128_add_uint64(INT128 *i128, uint64 v) { - *i128 += (int128) x * (int128) y; -} +#if USE_NATIVE_INT128 + *i128 += v; +#else + /* + * First add the value to the .lo part, then check to see if a carry needs + * to be propagated into the .hi part. Since this is unsigned integer + * arithmetic, which is just modular arithmetic, a carry is needed if the + * new .lo part is less than the old .lo part (i.e., if modular + * wrap-around occurred). Writing this in the form below, rather than + * using an "if" statement causes modern compilers to produce branchless + * machine code identical to the native code. + */ + uint64 oldlo = i128->lo; -/* - * Compare two INT128 values, return -1, 0, or +1. - */ -static inline int -int128_compare(INT128 x, INT128 y) -{ - if (x < y) - return -1; - if (x > y) - return 1; - return 0; + i128->lo += v; + i128->hi += (i128->lo < oldlo); +#endif } /* - * Widen int64 to INT128. + * Add a signed int64 value into an INT128 variable. */ -static inline INT128 -int64_to_int128(int64 v) +static inline void +int128_add_int64(INT128 *i128, int64 v) { - return (INT128) v; -} +#if USE_NATIVE_INT128 + *i128 += v; +#else + /* + * This is much like the above except that the carry logic differs for + * negative v -- we need to subtract 1 from the .hi part if the new .lo + * value is greater than the old .lo value. That can be achieved without + * any branching by adding the sign bit from v (v >> 63 = 0 or -1) to the + * previous result (for negative v, if the new .lo value is less than the + * old .lo value, the two terms cancel and we leave the .hi part + * unchanged, otherwise we subtract 1 from the .hi part). With modern + * compilers this often produces machine code identical to the native + * code. + */ + uint64 oldlo = i128->lo; -/* - * Convert INT128 to int64 (losing any high-order bits). - * This also works fine for casting down to uint64. - */ -static inline int64 -int128_to_int64(INT128 val) -{ - return (int64) val; + i128->lo += v; + i128->hi += (i128->lo < oldlo) + (v >> 63); +#endif } -#else /* !USE_NATIVE_INT128 */ - /* - * We lay out the INT128 structure with the same content and byte ordering - * that a native int128 type would (probably) have. This makes no difference - * for ordinary use of INT128, but allows union'ing INT128 with int128 for - * testing purposes. + * Add an INT128 value into an INT128 variable. */ -typedef struct +static inline void +int128_add_int128(INT128 *i128, INT128 v) { -#ifdef WORDS_BIGENDIAN - int64 hi; /* most significant 64 bits, including sign */ - uint64 lo; /* least significant 64 bits, without sign */ +#if USE_NATIVE_INT128 + *i128 += v; #else - uint64 lo; /* least significant 64 bits, without sign */ - int64 hi; /* most significant 64 bits, including sign */ + int128_add_uint64(i128, v.lo); + i128->hi += v.hi; #endif -} INT128; +} /* - * Add an unsigned int64 value into an INT128 variable. + * Subtract an unsigned int64 value from an INT128 variable. */ static inline void -int128_add_uint64(INT128 *i128, uint64 v) +int128_sub_uint64(INT128 *i128, uint64 v) { +#if USE_NATIVE_INT128 + *i128 -= v; +#else /* - * First add the value to the .lo part, then check to see if a carry needs - * to be propagated into the .hi part. A carry is needed if both inputs - * have high bits set, or if just one input has high bit set while the new - * .lo part doesn't. Remember that .lo part is unsigned; we cast to - * signed here just as a cheap way to check the high bit. + * This is like int128_add_uint64(), except we must propagate a borrow to + * (subtract 1 from) the .hi part if the new .lo part is greater than the + * old .lo part. */ uint64 oldlo = i128->lo; - i128->lo += v; - if (((int64) v < 0 && (int64) oldlo < 0) || - (((int64) v < 0 || (int64) oldlo < 0) && (int64) i128->lo >= 0)) - i128->hi++; + i128->lo -= v; + i128->hi -= (i128->lo > oldlo); +#endif } /* - * Add a signed int64 value into an INT128 variable. + * Subtract a signed int64 value from an INT128 variable. */ static inline void -int128_add_int64(INT128 *i128, int64 v) +int128_sub_int64(INT128 *i128, int64 v) { - /* - * This is much like the above except that the carry logic differs for - * negative v. Ordinarily we'd need to subtract 1 from the .hi part - * (corresponding to adding the sign-extended bits of v to it); but if - * there is a carry out of the .lo part, that cancels and we do nothing. - */ +#if USE_NATIVE_INT128 + *i128 -= v; +#else + /* Like int128_add_int64() with the sign of v inverted */ uint64 oldlo = i128->lo; - i128->lo += v; - if (v >= 0) - { - if ((int64) oldlo < 0 && (int64) i128->lo >= 0) - i128->hi++; - } - else - { - if (!((int64) oldlo < 0 || (int64) i128->lo >= 0)) - i128->hi--; - } + i128->lo -= v; + i128->hi -= (i128->lo > oldlo) + (v >> 63); +#endif } /* - * INT64_AU32 extracts the most significant 32 bits of int64 as int64, while - * INT64_AL32 extracts the least significant 32 bits as uint64. + * INT64_HI_INT32 extracts the most significant 32 bits of int64 as int32. + * INT64_LO_UINT32 extracts the least significant 32 bits as uint32. */ -#define INT64_AU32(i64) ((i64) >> 32) -#define INT64_AL32(i64) ((i64) & UINT64CONST(0xFFFFFFFF)) +#define INT64_HI_INT32(i64) ((int32) ((i64) >> 32)) +#define INT64_LO_UINT32(i64) ((uint32) (i64)) /* * Add the 128-bit product of two int64 values into an INT128 variable. @@ -176,7 +202,14 @@ int128_add_int64(INT128 *i128, int64 v) static inline void int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y) { - /* INT64_AU32 must use arithmetic right shift */ +#if USE_NATIVE_INT128 + /* + * XXX with a stupid compiler, this could actually be less efficient than + * the non-native implementation; maybe we should do it by hand always? + */ + *i128 += (int128) x * (int128) y; +#else + /* INT64_HI_INT32 must use arithmetic right shift */ StaticAssertDecl(((int64) -1 >> 1) == (int64) -1, "arithmetic right shift is needed"); @@ -201,34 +234,188 @@ int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y) /* No need to work hard if product must be zero */ if (x != 0 && y != 0) { - int64 x_u32 = INT64_AU32(x); - uint64 x_l32 = INT64_AL32(x); - int64 y_u32 = INT64_AU32(y); - uint64 y_l32 = INT64_AL32(y); + int32 x_hi = INT64_HI_INT32(x); + uint32 x_lo = INT64_LO_UINT32(x); + int32 y_hi = INT64_HI_INT32(y); + uint32 y_lo = INT64_LO_UINT32(y); int64 tmp; /* the first term */ - i128->hi += x_u32 * y_u32; - - /* the second term: sign-extend it only if x is negative */ - tmp = x_u32 * y_l32; - if (x < 0) - i128->hi += INT64_AU32(tmp); - else - i128->hi += ((uint64) tmp) >> 32; - int128_add_uint64(i128, ((uint64) INT64_AL32(tmp)) << 32); - - /* the third term: sign-extend it only if y is negative */ - tmp = x_l32 * y_u32; - if (y < 0) - i128->hi += INT64_AU32(tmp); - else - i128->hi += ((uint64) tmp) >> 32; - int128_add_uint64(i128, ((uint64) INT64_AL32(tmp)) << 32); + i128->hi += (int64) x_hi * (int64) y_hi; + + /* the second term: sign-extended with the sign of x */ + tmp = (int64) x_hi * (int64) y_lo; + i128->hi += INT64_HI_INT32(tmp); + int128_add_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32); + + /* the third term: sign-extended with the sign of y */ + tmp = (int64) x_lo * (int64) y_hi; + i128->hi += INT64_HI_INT32(tmp); + int128_add_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32); /* the fourth term: always unsigned */ - int128_add_uint64(i128, x_l32 * y_l32); + int128_add_uint64(i128, (uint64) x_lo * (uint64) y_lo); } +#endif +} + +/* + * Subtract the 128-bit product of two int64 values from an INT128 variable. + */ +static inline void +int128_sub_int64_mul_int64(INT128 *i128, int64 x, int64 y) +{ +#if USE_NATIVE_INT128 + *i128 -= (int128) x * (int128) y; +#else + /* As above, except subtract the 128-bit product */ + if (x != 0 && y != 0) + { + int32 x_hi = INT64_HI_INT32(x); + uint32 x_lo = INT64_LO_UINT32(x); + int32 y_hi = INT64_HI_INT32(y); + uint32 y_lo = INT64_LO_UINT32(y); + int64 tmp; + + /* the first term */ + i128->hi -= (int64) x_hi * (int64) y_hi; + + /* the second term: sign-extended with the sign of x */ + tmp = (int64) x_hi * (int64) y_lo; + i128->hi -= INT64_HI_INT32(tmp); + int128_sub_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32); + + /* the third term: sign-extended with the sign of y */ + tmp = (int64) x_lo * (int64) y_hi; + i128->hi -= INT64_HI_INT32(tmp); + int128_sub_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32); + + /* the fourth term: always unsigned */ + int128_sub_uint64(i128, (uint64) x_lo * (uint64) y_lo); + } +#endif +} + +/* + * Divide an INT128 variable by a signed int32 value, returning the quotient + * and remainder. The remainder will have the same sign as *i128. + * + * Note: This provides no protection against dividing by 0, or dividing + * INT128_MIN by -1, which overflows. It is the caller's responsibility to + * guard against those. + */ +static inline void +int128_div_mod_int32(INT128 *i128, int32 v, int32 *remainder) +{ +#if USE_NATIVE_INT128 + int128 old_i128 = *i128; + + *i128 /= v; + *remainder = (int32) (old_i128 - *i128 * v); +#else + /* + * To avoid any intermediate values overflowing (as happens if INT64_MIN + * is divided by -1), we first compute the quotient abs(*i128) / abs(v) + * using unsigned 64-bit arithmetic, and then fix the signs up at the end. + * + * The quotient is computed using the short division algorithm described + * in Knuth volume 2, section 4.3.1 exercise 16 (cf. div_var_int() in + * numeric.c). Since the absolute value of the divisor is known to be at + * most 2^31, the remainder carried from one digit to the next is at most + * 2^31 - 1, and so there is no danger of overflow when this is combined + * with the next digit (a 32-bit unsigned integer). + */ + uint64 n_hi; + uint64 n_lo; + uint32 d; + uint64 q; + uint64 r; + uint64 tmp; + + /* numerator: absolute value of *i128 */ + if (i128->hi < 0) + { + n_hi = 0 - ((uint64) i128->hi); + n_lo = 0 - i128->lo; + if (n_lo != 0) + n_hi--; + } + else + { + n_hi = i128->hi; + n_lo = i128->lo; + } + + /* denomimator: absolute value of v */ + d = abs(v); + + /* quotient and remainder of high 64 bits */ + q = n_hi / d; + r = n_hi % d; + n_hi = q; + + /* quotient and remainder of next 32 bits (upper half of n_lo) */ + tmp = (r << 32) + (n_lo >> 32); + q = tmp / d; + r = tmp % d; + + /* quotient and remainder of last 32 bits (lower half of n_lo) */ + tmp = (r << 32) + (uint32) n_lo; + n_lo = q << 32; + q = tmp / d; + r = tmp % d; + n_lo += q; + + /* final remainder should have the same sign as *i128 */ + *remainder = i128->hi < 0 ? (int32) (0 - r) : (int32) r; + + /* store the quotient in *i128, negating it if necessary */ + if ((i128->hi < 0) != (v < 0)) + { + n_hi = 0 - n_hi; + n_lo = 0 - n_lo; + if (n_lo != 0) + n_hi--; + } + i128->hi = (int64) n_hi; + i128->lo = n_lo; +#endif +} + +/* + * Test if an INT128 value is zero. + */ +static inline bool +int128_is_zero(INT128 x) +{ +#if USE_NATIVE_INT128 + return x == 0; +#else + return x.hi == 0 && x.lo == 0; +#endif +} + +/* + * Return the sign of an INT128 value (returns -1, 0, or +1). + */ +static inline int +int128_sign(INT128 x) +{ +#if USE_NATIVE_INT128 + if (x < 0) + return -1; + if (x > 0) + return 1; + return 0; +#else + if (x.hi < 0) + return -1; + if (x.hi > 0) + return 1; + if (x.lo > 0) + return 1; + return 0; +#endif } /* @@ -237,6 +424,13 @@ int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y) static inline int int128_compare(INT128 x, INT128 y) { +#if USE_NATIVE_INT128 + if (x < y) + return -1; + if (x > y) + return 1; + return 0; +#else if (x.hi < y.hi) return -1; if (x.hi > y.hi) @@ -246,6 +440,7 @@ int128_compare(INT128 x, INT128 y) if (x.lo > y.lo) return 1; return 0; +#endif } /* @@ -254,11 +449,15 @@ int128_compare(INT128 x, INT128 y) static inline INT128 int64_to_int128(int64 v) { +#if USE_NATIVE_INT128 + return (INT128) v; +#else INT128 val; val.lo = (uint64) v; val.hi = (v < 0) ? -INT64CONST(1) : INT64CONST(0); return val; +#endif } /* @@ -268,9 +467,11 @@ int64_to_int128(int64 v) static inline int64 int128_to_int64(INT128 val) { +#if USE_NATIVE_INT128 + return (int64) val; +#else return (int64) val.lo; +#endif } -#endif /* USE_NATIVE_INT128 */ - #endif /* INT128_H */ diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 104b059544dd3..31133514e8438 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -14,6 +14,7 @@ #ifndef EXECUTOR_H #define EXECUTOR_H +#include "datatype/timestamp.h" #include "executor/execdesc.h" #include "fmgr.h" #include "nodes/lockoptions.h" @@ -241,7 +242,9 @@ extern void standard_ExecutorEnd(QueryDesc *queryDesc); extern void ExecutorRewind(QueryDesc *queryDesc); extern bool ExecCheckPermissions(List *rangeTable, List *rteperminfos, bool ereport_on_violation); +extern bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo); extern void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation, + OnConflictAction onConflictAction, List *mergeActions); extern void InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation resultRelationDesc, @@ -759,7 +762,18 @@ extern bool RelationFindReplTupleByIndex(Relation rel, Oid idxoid, TupleTableSlot *outslot); extern bool RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, TupleTableSlot *searchslot, TupleTableSlot *outslot); - +extern bool RelationFindDeletedTupleInfoSeq(Relation rel, + TupleTableSlot *searchslot, + TransactionId oldestxmin, + TransactionId *delete_xid, + RepOriginId *delete_origin, + TimestampTz *delete_time); +extern bool RelationFindDeletedTupleInfoByIndex(Relation rel, Oid idxoid, + TupleTableSlot *searchslot, + TransactionId oldestxmin, + TransactionId *delete_xid, + RepOriginId *delete_origin, + TimestampTz *delete_time); extern void ExecSimpleRelationInsert(ResultRelInfo *resultRelInfo, EState *estate, TupleTableSlot *slot); extern void ExecSimpleRelationUpdate(ResultRelInfo *resultRelInfo, diff --git a/src/include/fmgr.h b/src/include/fmgr.h index 0fe7b4ebc7719..c7236e4297242 100644 --- a/src/include/fmgr.h +++ b/src/include/fmgr.h @@ -469,7 +469,7 @@ typedef struct int funcmaxargs; /* FUNC_MAX_ARGS */ int indexmaxkeys; /* INDEX_MAX_KEYS */ int namedatalen; /* NAMEDATALEN */ - int float8byval; /* FLOAT8PASSBYVAL */ + int float8byval; /* FLOAT8PASSBYVAL (now vestigial) */ char abi_extra[32]; /* see pg_config_manual.h */ } Pg_abi_values; diff --git a/src/include/libpq/libpq.h b/src/include/libpq/libpq.h index aeb66ca40cf38..5af005ad779dc 100644 --- a/src/include/libpq/libpq.h +++ b/src/include/libpq/libpq.h @@ -118,6 +118,24 @@ extern PGDLLIMPORT bool SSLPreferServerCiphers; extern PGDLLIMPORT bool ssl_loaded_verify_locations; #endif +#ifdef USE_SSL +#define SSL_LIBRARY "OpenSSL" +#else +#define SSL_LIBRARY "" +#endif + +#ifdef USE_OPENSSL +#define DEFAULT_SSL_CIPHERS "HIGH:MEDIUM:+3DES:!aNULL" +#else +#define DEFAULT_SSL_CIPHERS "none" +#endif + +#ifdef USE_SSL +#define DEFAULT_SSL_GROUPS "X25519:prime256v1" +#else +#define DEFAULT_SSL_GROUPS "none" +#endif + /* * prototypes for functions in be-secure-gssapi.c */ diff --git a/src/include/libpq/protocol.h b/src/include/libpq/protocol.h index b0bcb3cdc26eb..7bf90053bcb6d 100644 --- a/src/include/libpq/protocol.h +++ b/src/include/libpq/protocol.h @@ -66,9 +66,31 @@ /* These are the codes sent by parallel workers to leader processes. */ + #define PqMsg_Progress 'P' +/* Replication codes sent by the primary (wrapped in CopyData messages). */ + +#define PqReplMsg_Keepalive 'k' +#define PqReplMsg_PrimaryStatusUpdate 's' +#define PqReplMsg_WALData 'w' + + +/* Replication codes sent by the standby (wrapped in CopyData messages). */ + +#define PqReplMsg_HotStandbyFeedback 'h' +#define PqReplMsg_PrimaryStatusRequest 'p' +#define PqReplMsg_StandbyStatusUpdate 'r' + + +/* Codes used for backups via COPY OUT (wrapped in CopyData messages). */ + +#define PqBackupMsg_Manifest 'm' +#define PqBackupMsg_NewArchive 'n' +#define PqBackupMsg_ProgressReport 'p' + + /* These are the authentication request codes sent by the backend. */ #define AUTH_REQ_OK 0 /* User is authenticated */ diff --git a/src/include/meson.build b/src/include/meson.build index 2e4b7aa529e26..7cb3075da2a93 100644 --- a/src/include/meson.build +++ b/src/include/meson.build @@ -177,3 +177,24 @@ install_subdir('catalog', # autoconf generates the file there, ensure we get a conflict generated_sources_ac += {'src/include': ['stamp-h']} + + +# Instead of having targets depending directly on a list of all generated +# headers, have them depend on a stamp files for all of them. Dependencies on +# headers are implemented as order-only dependencies in meson (and then using +# compiler generated dependencies during incremental rebuilds ). The benefit +# of using a stamp file is that it makes ninja.build considerably smaller and +# meson setup faster, as otherwise the list of headers is repeated for every C +# file, bloating build.ninja by ~2x. +generated_headers_stamp = custom_target('generated-headers-stamp.h', + output: 'generated-headers-stamp.h', + input: generated_headers, + command: stamp_cmd, +) + +generated_backend_headers_stamp = custom_target('generated-backend-headers-stamp.h', + output: 'generated-backend-headers-stamp.h', + input: generated_backend_headers, + depends: generated_headers_stamp, + command: stamp_cmd, +) diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index e107d6e5f8174..71857feae4823 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -609,15 +609,13 @@ typedef struct ResultRelInfo bool ri_RootToChildMapValid; /* - * Information needed by tuple routing target relations + * Other information needed by child result relations * - * RootResultRelInfo gives the target relation mentioned in the query, if - * it's a partitioned table. It is not set if the target relation - * mentioned in the query is an inherited table, nor when tuple routing is - * not needed. + * ri_RootResultRelInfo gives the target relation mentioned in the query. + * Used as the root for tuple routing and/or transition capture. * - * PartitionTupleSlot is non-NULL if RootToChild conversion is needed and - * the relation is a partition. + * ri_PartitionTupleSlot is non-NULL if the relation is a partition to + * route tuples into and ri_RootToChildMap conversion is needed. */ struct ResultRelInfo *ri_RootResultRelInfo; TupleTableSlot *ri_PartitionTupleSlot; @@ -1022,7 +1020,6 @@ typedef struct SubPlanState bool havehashrows; /* true if hashtable is not empty */ bool havenullrows; /* true if hashnulls is not empty */ MemoryContext hashtablecxt; /* memory context containing hash tables */ - MemoryContext hashtempcxt; /* temp memory context for hash tables */ ExprContext *innerecontext; /* econtext for computing inner tuples */ int numCols; /* number of columns being hashed */ /* each of the remaining fields is an array of length numCols: */ diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index fbe333d88fac9..fb3957e75e5f1 100644 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -188,6 +188,8 @@ castNodeImpl(NodeTag type, void *ptr) * ---------------------------------------------------------------- */ +#ifndef FRONTEND + /* * nodes/{outfuncs.c,print.c} */ @@ -198,7 +200,7 @@ extern void outNode(struct StringInfoData *str, const void *obj); extern void outToken(struct StringInfoData *str, const char *s); extern void outBitmapset(struct StringInfoData *str, const struct Bitmapset *bms); -extern void outDatum(struct StringInfoData *str, uintptr_t value, +extern void outDatum(struct StringInfoData *str, Datum value, int typlen, bool typbyval); extern char *nodeToString(const void *obj); extern char *nodeToStringWithLocations(const void *obj); @@ -212,7 +214,7 @@ extern void *stringToNode(const char *str); extern void *stringToNodeWithLocations(const char *str); #endif extern struct Bitmapset *readBitmapset(void); -extern uintptr_t readDatum(bool typbyval); +extern Datum readDatum(bool typbyval); extern bool *readBoolCols(int numCols); extern int *readIntCols(int numCols); extern Oid *readOidCols(int numCols); @@ -235,6 +237,8 @@ extern void *copyObjectImpl(const void *from); */ extern bool equal(const void *a, const void *b); +#endif /* !FRONTEND */ + /* * Typedef for parse location. This is just an int, but this way @@ -319,8 +323,8 @@ typedef enum JoinType * These codes are used internally in the planner, but are not supported * by the executor (nor, indeed, by most of the planner). */ - JOIN_UNIQUE_OUTER, /* LHS path must be made unique */ - JOIN_UNIQUE_INNER, /* RHS path must be made unique */ + JOIN_UNIQUE_OUTER, /* LHS has be made unique */ + JOIN_UNIQUE_INNER, /* RHS has be made unique */ /* * We might need additional join types someday. diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index e5dd15098f635..4a903d1ec1832 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -703,8 +703,6 @@ typedef struct PartitionSchemeData *PartitionScheme; * (regardless of ordering) among the unparameterized paths; * or if there is no unparameterized path, the path with lowest * total cost among the paths with minimum parameterization - * cheapest_unique_path - for caching cheapest path to produce unique - * (no duplicates) output from relation; NULL if not yet requested * cheapest_parameterized_paths - best paths for their parameterizations; * always includes cheapest_total_path, even if that's unparameterized * direct_lateral_relids - rels this rel has direct LATERAL references to @@ -770,6 +768,21 @@ typedef struct PartitionSchemeData *PartitionScheme; * other rels for which we have tried and failed to prove * this one unique * + * Three fields are used to cache information about unique-ification of this + * relation. This is used to support semijoins where the relation appears on + * the RHS: the relation is first unique-ified, and then a regular join is + * performed: + * + * unique_rel - the unique-ified version of the relation, containing paths + * that produce unique (no duplicates) output from relation; + * NULL if not yet requested + * unique_pathkeys - pathkeys that represent the ordering requirements for + * the relation's output in sort-based unique-ification + * implementations + * unique_groupclause - a list of SortGroupClause nodes that represent the + * columns to be grouped on in hash-based unique-ification + * implementations + * * The presence of the following fields depends on the restrictions * and joins that the relation participates in: * @@ -930,7 +943,6 @@ typedef struct RelOptInfo List *partial_pathlist; /* partial Paths */ struct Path *cheapest_startup_path; struct Path *cheapest_total_path; - struct Path *cheapest_unique_path; List *cheapest_parameterized_paths; /* @@ -1004,6 +1016,16 @@ typedef struct RelOptInfo /* known not unique for these set(s) */ List *non_unique_for_rels; + /* + * information about unique-ification of this relation + */ + /* the unique-ified version of the relation */ + struct RelOptInfo *unique_rel; + /* pathkeys for sort-based unique-ification implementations */ + List *unique_pathkeys; + /* SortGroupClause nodes for hash-based unique-ification implementations */ + List *unique_groupclause; + /* * used by various scans and joins: */ @@ -1097,6 +1119,17 @@ typedef struct RelOptInfo ((rel)->part_scheme && (rel)->boundinfo && (rel)->nparts > 0 && \ (rel)->part_rels && (rel)->partexprs && (rel)->nullable_partexprs) +/* + * Is given relation unique-ified? + * + * When the nominal jointype is JOIN_INNER, sjinfo->jointype is JOIN_SEMI, and + * the given rel is exactly the RHS of the semijoin, it indicates that the rel + * has been unique-ified. + */ +#define RELATION_WAS_MADE_UNIQUE(rel, sjinfo, nominal_jointype) \ + ((nominal_jointype) == JOIN_INNER && (sjinfo)->jointype == JOIN_SEMI && \ + bms_equal((sjinfo)->syn_righthand, (rel)->relids)) + /* * IndexOptInfo * Per-index information for planning/optimization @@ -1741,8 +1774,8 @@ typedef struct ParamPathInfo * and the specified outer rel(s). * * "rows" is the same as parent->rows in simple paths, but in parameterized - * paths and UniquePaths it can be less than parent->rows, reflecting the - * fact that we've filtered by extra join conditions or removed duplicates. + * paths it can be less than parent->rows, reflecting the fact that we've + * filtered by extra join conditions. * * "pathkeys" is a List of PathKey nodes (see above), describing the sort * ordering of the path's output rows. @@ -2133,40 +2166,14 @@ typedef struct MemoizePath * complete after caching the first record. */ bool binary_mode; /* true when cache key should be compared bit * by bit, false when using hash equality ops */ - Cardinality calls; /* expected number of rescans */ uint32 est_entries; /* The maximum number of entries that the * planner expects will fit in the cache, or 0 * if unknown */ + Cardinality est_calls; /* expected number of rescans */ + Cardinality est_unique_keys; /* estimated unique keys, for EXPLAIN */ + double est_hit_ratio; /* estimated cache hit ratio, for EXPLAIN */ } MemoizePath; -/* - * UniquePath represents elimination of distinct rows from the output of - * its subpath. - * - * This can represent significantly different plans: either hash-based or - * sort-based implementation, or a no-op if the input path can be proven - * distinct already. The decision is sufficiently localized that it's not - * worth having separate Path node types. (Note: in the no-op case, we could - * eliminate the UniquePath node entirely and just return the subpath; but - * it's convenient to have a UniquePath in the path tree to signal upper-level - * routines that the input is known distinct.) - */ -typedef enum UniquePathMethod -{ - UNIQUE_PATH_NOOP, /* input is known unique already */ - UNIQUE_PATH_HASH, /* use hashing */ - UNIQUE_PATH_SORT, /* use sorting */ -} UniquePathMethod; - -typedef struct UniquePath -{ - Path path; - Path *subpath; - UniquePathMethod umethod; - List *in_operators; /* equality operators of the IN clause */ - List *uniq_exprs; /* expressions to be made unique */ -} UniquePath; - /* * GatherPath runs several copies of a plan in parallel and collects the * results. The parallel leader may also execute the plan, unless the @@ -2373,17 +2380,17 @@ typedef struct GroupPath } GroupPath; /* - * UpperUniquePath represents adjacent-duplicate removal (in presorted input) + * UniquePath represents adjacent-duplicate removal (in presorted input) * * The columns to be compared are the first numkeys columns of the path's * pathkeys. The input is presumed already sorted that way. */ -typedef struct UpperUniquePath +typedef struct UniquePath { Path path; Path *subpath; /* path representing input source */ int numkeys; /* number of pathkey columns to compare */ -} UpperUniquePath; +} UniquePath; /* * AggPath represents generic computation of aggregate functions diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 46e2e09ea35be..29d7732d6a031 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -29,18 +29,19 @@ */ /* ---------------- - * CachedPlanType + * PlannedStmtOrigin * - * CachedPlanType identifies whether a PlannedStmt is a cached plan, and if - * so, whether it is generic or custom. + * PlannedStmtOrigin identifies from where a PlannedStmt comes from. * ---------------- */ -typedef enum CachedPlanType +typedef enum PlannedStmtOrigin { - PLAN_CACHE_NONE = 0, /* Not a cached plan */ - PLAN_CACHE_GENERIC, /* Generic cached plan */ - PLAN_CACHE_CUSTOM, /* Custom cached plan */ -} CachedPlanType; + PLAN_STMT_UNKNOWN = 0, /* plan origin is not yet known */ + PLAN_STMT_INTERNAL, /* generated internally by a query */ + PLAN_STMT_STANDARD, /* standard planned statement */ + PLAN_STMT_CACHE_GENERIC, /* Generic cached plan */ + PLAN_STMT_CACHE_CUSTOM, /* Custom cached plan */ +} PlannedStmtOrigin; /* ---------------- * PlannedStmt node @@ -72,8 +73,8 @@ typedef struct PlannedStmt /* plan identifier (can be set by plugins) */ int64 planId; - /* type of cached plan */ - CachedPlanType cached_plan_type; + /* origin of plan */ + PlannedStmtOrigin planOrigin; /* is it insert|update|delete|merge RETURNING? */ bool hasReturning; @@ -1073,6 +1074,16 @@ typedef struct Memoize /* paramids from param_exprs */ Bitmapset *keyparamids; + + /* Estimated number of rescans, for EXPLAIN */ + Cardinality est_calls; + + /* Estimated number of distinct lookup keys, for EXPLAIN */ + Cardinality est_unique_keys; + + /* Estimated cache hit ratio, for EXPLAIN */ + double est_hit_ratio; + } Memoize; /* ---------------- diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h index 60dcdb77e41be..763cd25bb3c9a 100644 --- a/src/include/optimizer/pathnode.h +++ b/src/include/optimizer/pathnode.h @@ -90,9 +90,7 @@ extern MemoizePath *create_memoize_path(PlannerInfo *root, List *hash_operators, bool singlerow, bool binary_mode, - double calls); -extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel, - Path *subpath, SpecialJoinInfo *sjinfo); + Cardinality est_calls); extern GatherPath *create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows); @@ -223,11 +221,11 @@ extern GroupPath *create_group_path(PlannerInfo *root, List *groupClause, List *qual, double numGroups); -extern UpperUniquePath *create_upper_unique_path(PlannerInfo *root, - RelOptInfo *rel, - Path *subpath, - int numCols, - double numGroups); +extern UniquePath *create_unique_path(PlannerInfo *root, + RelOptInfo *rel, + Path *subpath, + int numCols, + double numGroups); extern AggPath *create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h index 8410531f2d640..cbade77b717fb 100644 --- a/src/include/optimizer/paths.h +++ b/src/include/optimizer/paths.h @@ -71,10 +71,7 @@ extern void generate_partitionwise_join_paths(PlannerInfo *root, extern void create_index_paths(PlannerInfo *root, RelOptInfo *rel); extern bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, List *restrictlist, - List *exprlist, List *oprlist); -extern bool relation_has_unique_index_ext(PlannerInfo *root, RelOptInfo *rel, - List *restrictlist, List *exprlist, - List *oprlist, List **extra_clauses); + List **extra_clauses); extern bool indexcol_is_bool_constant_for_query(PlannerInfo *root, IndexOptInfo *index, int indexcol); diff --git a/src/include/optimizer/plancat.h b/src/include/optimizer/plancat.h index d6f6f4ad2d788..dd8f2cd157f6f 100644 --- a/src/include/optimizer/plancat.h +++ b/src/include/optimizer/plancat.h @@ -76,6 +76,8 @@ extern double get_function_rows(PlannerInfo *root, Oid funcid, Node *node); extern bool has_row_triggers(PlannerInfo *root, Index rti, CmdType event); +extern bool has_transition_tables(PlannerInfo *root, Index rti, CmdType event); + extern bool has_stored_generated_columns(PlannerInfo *root, Index rti); extern Bitmapset *get_dependent_generated_columns(PlannerInfo *root, Index rti, diff --git a/src/include/optimizer/planner.h b/src/include/optimizer/planner.h index 347c582a78927..f220e9a270d5c 100644 --- a/src/include/optimizer/planner.h +++ b/src/include/optimizer/planner.h @@ -59,4 +59,7 @@ extern Path *get_cheapest_fractional_path(RelOptInfo *rel, extern Expr *preprocess_phv_expression(PlannerInfo *root, Expr *expr); +extern RelOptInfo *create_unique_paths(PlannerInfo *root, RelOptInfo *rel, + SpecialJoinInfo *sjinfo); + #endif /* PLANNER_H */ diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h index 125d3eb5fff5e..7e1aa4223326e 100644 --- a/src/include/pg_config_manual.h +++ b/src/include/pg_config_manual.h @@ -74,17 +74,12 @@ #define PARTITION_MAX_KEYS 32 /* - * Decide whether built-in 8-byte types, including float8, int8, and - * timestamp, are passed by value. This is on by default if sizeof(Datum) >= - * 8 (that is, on 64-bit platforms). If sizeof(Datum) < 8 (32-bit platforms), - * this must be off. We keep this here as an option so that it is easy to - * test the pass-by-reference code paths on 64-bit platforms. - * - * Changing this requires an initdb. + * This symbol is now vestigial: built-in 8-byte types, including float8, + * int8, and timestamp, are always passed by value since we require Datum + * to be wide enough to permit that. We continue to define the symbol here + * so as not to unnecessarily break extension code. */ -#if SIZEOF_VOID_P >= 8 #define USE_FLOAT8_BYVAL 1 -#endif /* diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 202bd2d5acedc..f402b17295c89 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -747,11 +747,11 @@ extern PgStat_StatReplSlotEntry *pgstat_fetch_replslot(NameData slotname); */ extern void pgstat_reset_slru(const char *); -extern void pgstat_count_slru_page_zeroed(int slru_idx); -extern void pgstat_count_slru_page_hit(int slru_idx); -extern void pgstat_count_slru_page_read(int slru_idx); -extern void pgstat_count_slru_page_written(int slru_idx); -extern void pgstat_count_slru_page_exists(int slru_idx); +extern void pgstat_count_slru_blocks_zeroed(int slru_idx); +extern void pgstat_count_slru_blocks_hit(int slru_idx); +extern void pgstat_count_slru_blocks_read(int slru_idx); +extern void pgstat_count_slru_blocks_written(int slru_idx); +extern void pgstat_count_slru_blocks_exists(int slru_idx); extern void pgstat_count_slru_flush(int slru_idx); extern void pgstat_count_slru_truncate(int slru_idx); extern const char *pgstat_get_slru_name(int slru_idx); diff --git a/src/include/port/pg_bswap.h b/src/include/port/pg_bswap.h index 33648433c6377..b15f6f6ac3819 100644 --- a/src/include/port/pg_bswap.h +++ b/src/include/port/pg_bswap.h @@ -130,8 +130,7 @@ pg_bswap64(uint64 x) /* * Rearrange the bytes of a Datum from big-endian order into the native byte - * order. On big-endian machines, this does nothing at all. Note that the C - * type Datum is an unsigned integer type on all platforms. + * order. On big-endian machines, this does nothing at all. * * One possible application of the DatumBigEndianToNative() macro is to make * bitwise comparisons cheaper. A simple 3-way comparison of Datums @@ -139,23 +138,11 @@ pg_bswap64(uint64 x) * the same result as a memcmp() of the corresponding original Datums, but can * be much cheaper. It's generally safe to do this on big-endian systems * without any special transformation occurring first. - * - * If SIZEOF_DATUM is not defined, then postgres.h wasn't included and these - * macros probably shouldn't be used, so we define nothing. Note that - * SIZEOF_DATUM == 8 would evaluate as 0 == 8 in that case, potentially - * leading to the wrong implementation being selected and confusing errors, so - * defining nothing is safest. */ -#ifdef SIZEOF_DATUM #ifdef WORDS_BIGENDIAN #define DatumBigEndianToNative(x) (x) #else /* !WORDS_BIGENDIAN */ -#if SIZEOF_DATUM == 8 -#define DatumBigEndianToNative(x) pg_bswap64(x) -#else /* SIZEOF_DATUM != 8 */ -#define DatumBigEndianToNative(x) pg_bswap32(x) -#endif /* SIZEOF_DATUM == 8 */ +#define DatumBigEndianToNative(x) UInt64GetDatum(pg_bswap64(DatumGetUInt64(x))) #endif /* WORDS_BIGENDIAN */ -#endif /* SIZEOF_DATUM */ #endif /* PG_BSWAP_H */ diff --git a/src/include/postgres.h b/src/include/postgres.h index 8a41a6686877f..357cbd6fd961e 100644 --- a/src/include/postgres.h +++ b/src/include/postgres.h @@ -58,15 +58,22 @@ /* * A Datum contains either a value of a pass-by-value type or a pointer to a - * value of a pass-by-reference type. Therefore, we require: - * - * sizeof(Datum) == sizeof(void *) == 4 or 8 + * value of a pass-by-reference type. Therefore, we must have + * sizeof(Datum) >= sizeof(void *). No current or foreseeable Postgres + * platform has pointers wider than 8 bytes, and standardizing on Datum being + * exactly 8 bytes has advantages in reducing cross-platform differences. * * The functions below and the analogous functions for other types should be used to * convert between a Datum and the appropriate C type. */ -typedef uintptr_t Datum; +typedef uint64_t Datum; + +/* + * This symbol is now vestigial, but we continue to define it so as not to + * unnecessarily break extension code. + */ +#define SIZEOF_DATUM 8 /* * A NullableDatum is used in places where both a Datum and its nullness needs @@ -83,8 +90,6 @@ typedef struct NullableDatum /* due to alignment padding this could be used for flags for free */ } NullableDatum; -#define SIZEOF_DATUM SIZEOF_VOID_P - /* * DatumGetBool * Returns boolean value of a datum. @@ -316,7 +321,7 @@ CommandIdGetDatum(CommandId X) static inline Pointer DatumGetPointer(Datum X) { - return (Pointer) X; + return (Pointer) (uintptr_t) X; } /* @@ -326,7 +331,7 @@ DatumGetPointer(Datum X) static inline Datum PointerGetDatum(const void *X) { - return (Datum) X; + return (Datum) (uintptr_t) X; } /* @@ -383,68 +388,41 @@ NameGetDatum(const NameData *X) /* * DatumGetInt64 * Returns 64-bit integer value of a datum. - * - * Note: this function hides whether int64 is pass by value or by reference. */ static inline int64 DatumGetInt64(Datum X) { -#ifdef USE_FLOAT8_BYVAL return (int64) X; -#else - return *((int64 *) DatumGetPointer(X)); -#endif } /* * Int64GetDatum * Returns datum representation for a 64-bit integer. - * - * Note: if int64 is pass by reference, this function returns a reference - * to palloc'd space. */ -#ifdef USE_FLOAT8_BYVAL static inline Datum Int64GetDatum(int64 X) { return (Datum) X; } -#else -extern Datum Int64GetDatum(int64 X); -#endif - /* * DatumGetUInt64 * Returns 64-bit unsigned integer value of a datum. - * - * Note: this function hides whether int64 is pass by value or by reference. */ static inline uint64 DatumGetUInt64(Datum X) { -#ifdef USE_FLOAT8_BYVAL return (uint64) X; -#else - return *((uint64 *) DatumGetPointer(X)); -#endif } /* * UInt64GetDatum * Returns datum representation for a 64-bit unsigned integer. - * - * Note: if int64 is pass by reference, this function returns a reference - * to palloc'd space. */ static inline Datum UInt64GetDatum(uint64 X) { -#ifdef USE_FLOAT8_BYVAL return (Datum) X; -#else - return Int64GetDatum((int64) X); -#endif } /* @@ -492,13 +470,10 @@ Float4GetDatum(float4 X) /* * DatumGetFloat8 * Returns 8-byte floating point value of a datum. - * - * Note: this function hides whether float8 is pass by value or by reference. */ static inline float8 DatumGetFloat8(Datum X) { -#ifdef USE_FLOAT8_BYVAL union { int64 value; @@ -507,19 +482,12 @@ DatumGetFloat8(Datum X) myunion.value = DatumGetInt64(X); return myunion.retval; -#else - return *((float8 *) DatumGetPointer(X)); -#endif } /* * Float8GetDatum * Returns datum representation for an 8-byte floating point number. - * - * Note: if float8 is pass by reference, this function returns a reference - * to palloc'd space. */ -#ifdef USE_FLOAT8_BYVAL static inline Datum Float8GetDatum(float8 X) { @@ -532,35 +500,22 @@ Float8GetDatum(float8 X) myunion.value = X; return Int64GetDatum(myunion.retval); } -#else -extern Datum Float8GetDatum(float8 X); -#endif - /* * Int64GetDatumFast * Float8GetDatumFast * - * These macros are intended to allow writing code that does not depend on + * These macros were intended to allow writing code that does not depend on * whether int64 and float8 are pass-by-reference types, while not - * sacrificing performance when they are. The argument must be a variable - * that will exist and have the same value for as long as the Datum is needed. - * In the pass-by-ref case, the address of the variable is taken to use as - * the Datum. In the pass-by-val case, these are the same as the non-Fast - * functions, except for asserting that the variable is of the correct type. + * sacrificing performance when they are. They are no longer different + * from the regular functions, though we keep the assertions to protect + * code that might get back-patched into older branches. */ -#ifdef USE_FLOAT8_BYVAL #define Int64GetDatumFast(X) \ (AssertVariableIsOfTypeMacro(X, int64), Int64GetDatum(X)) #define Float8GetDatumFast(X) \ (AssertVariableIsOfTypeMacro(X, double), Float8GetDatum(X)) -#else -#define Int64GetDatumFast(X) \ - (AssertVariableIsOfTypeMacro(X, int64), PointerGetDatum(&(X))) -#define Float8GetDatumFast(X) \ - (AssertVariableIsOfTypeMacro(X, double), PointerGetDatum(&(X))) -#endif /* ---------------------------------------------------------------- diff --git a/src/include/replication/conflict.h b/src/include/replication/conflict.h index 6c59125f25657..e516caa5c73fc 100644 --- a/src/include/replication/conflict.h +++ b/src/include/replication/conflict.h @@ -32,6 +32,9 @@ typedef enum /* The updated row value violates unique constraint */ CT_UPDATE_EXISTS, + /* The row to be updated was concurrently deleted by a different origin */ + CT_UPDATE_DELETED, + /* The row to be updated is missing */ CT_UPDATE_MISSING, @@ -54,7 +57,7 @@ typedef enum #define CONFLICT_NUM_TYPES (CT_MULTIPLE_UNIQUE_CONFLICTS + 1) /* - * Information for the existing local tuple that caused the conflict. + * Information for the existing local row that caused the conflict. */ typedef struct ConflictTupleInfo { @@ -66,7 +69,7 @@ typedef struct ConflictTupleInfo * the conflict */ RepOriginId origin; /* origin identifier of the modification */ TimestampTz ts; /* timestamp of when the modification on the - * conflicting local tuple occurred */ + * conflicting local row occurred */ } ConflictTupleInfo; extern bool GetTupleTransactionInfo(TupleTableSlot *localslot, diff --git a/src/include/replication/slot.h b/src/include/replication/slot.h index e8fc342d1a96e..fe62162cde304 100644 --- a/src/include/replication/slot.h +++ b/src/include/replication/slot.h @@ -134,7 +134,7 @@ typedef struct ReplicationSlotPersistentData /* * Was this slot synchronized from the primary server? */ - char synced; + bool synced; /* * Is this a failover slot (sync candidate for standbys)? Only relevant diff --git a/src/include/replication/worker_internal.h b/src/include/replication/worker_internal.h index 0c7b8440a61e3..de00380261279 100644 --- a/src/include/replication/worker_internal.h +++ b/src/include/replication/worker_internal.h @@ -87,12 +87,16 @@ typedef struct LogicalRepWorker bool parallel_apply; /* - * The changes made by this and later transactions must be retained to - * ensure reliable conflict detection during the apply phase. + * Changes made by this transaction and subsequent ones must be preserved. + * This ensures that update_deleted conflicts can be accurately detected + * during the apply phase of logical replication by this worker. * * The logical replication launcher manages an internal replication slot * named "pg_conflict_detection". It asynchronously collects this ID to * decide when to advance the xmin value of the slot. + * + * This ID is set to InvalidTransactionId when the apply worker stops + * retaining information needed for conflict detection. */ TransactionId oldest_nonremovable_xid; @@ -268,6 +272,7 @@ extern void ReplicationOriginNameForLogicalRep(Oid suboid, Oid relid, char *originname, Size szoriginname); extern bool AllTablesyncsReady(void); +extern bool HasSubscriptionRelationsCached(void); extern void UpdateTwoPhaseState(Oid suboid, char new_state); extern void process_syncing_tables(XLogRecPtr current_lsn); diff --git a/src/include/storage/aio_internal.h b/src/include/storage/aio_internal.h index 2d37a243abe52..b4de30f2ec149 100644 --- a/src/include/storage/aio_internal.h +++ b/src/include/storage/aio_internal.h @@ -92,17 +92,23 @@ typedef enum PgAioHandleState struct ResourceOwnerData; -/* typedef is in aio_types.h */ +/* + * Typedef is in aio_types.h + * + * We don't use the underlying enums for state, target and op to avoid wasting + * space. We tried using bitfields, but several compilers generate rather + * horrid code for that. + */ struct PgAioHandle { /* all state updates should go through pgaio_io_update_state() */ - PgAioHandleState state:8; + uint8 state; /* what are we operating on */ - PgAioTargetID target:8; + uint8 target; /* which IO operation */ - PgAioOp op:8; + uint8 op; /* bitfield of PgAioHandleFlags */ uint8 flags; diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index 52a71b138f736..dfd614f7ca449 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -80,8 +80,8 @@ StaticAssertDecl(BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS + BUF_FLAG_BITS == 32, * The maximum allowed value of usage_count represents a tradeoff between * accuracy and speed of the clock-sweep buffer management algorithm. A * large value (comparable to NBuffers) would approximate LRU semantics. - * But it can take as many as BM_MAX_USAGE_COUNT+1 complete cycles of - * clock sweeps to find a free buffer, so in practice we don't want the + * But it can take as many as BM_MAX_USAGE_COUNT+1 complete cycles of the + * clock-sweep hand to find a free buffer, so in practice we don't want the * value to be very large. */ #define BM_MAX_USAGE_COUNT 5 @@ -217,8 +217,7 @@ BufMappingPartitionLockByIndex(uint32 index) * single atomic variable. This layout allow us to do some operations in a * single atomic operation, without actually acquiring and releasing spinlock; * for instance, increase or decrease refcount. buf_id field never changes - * after initialization, so does not need locking. freeNext is protected by - * the buffer_strategy_lock not buffer header lock. The LWLock can take care + * after initialization, so does not need locking. The LWLock can take care * of itself. The buffer header lock is *not* used to control access to the * data in the buffer! * @@ -264,7 +263,6 @@ typedef struct BufferDesc pg_atomic_uint32 state; int wait_backend_pgprocno; /* backend of pin-count waiter */ - int freeNext; /* link in freelist chain */ PgAioWaitRef io_wref; /* set iff AIO is in progress */ LWLock content_lock; /* to lock access to buffer contents */ @@ -360,13 +358,6 @@ BufferDescriptorGetContentLock(const BufferDesc *bdesc) return (LWLock *) (&bdesc->content_lock); } -/* - * The freeNext field is either the index of the next freelist entry, - * or one of these special values: - */ -#define FREENEXT_END_OF_LIST (-1) -#define FREENEXT_NOT_IN_LIST (-2) - /* * Functions for acquiring/releasing a shared buffer header's spinlock. Do * not apply these to local buffers! @@ -444,7 +435,6 @@ extern void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag extern IOContext IOContextForStrategy(BufferAccessStrategy strategy); extern BufferDesc *StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_ring); -extern void StrategyFreeBuffer(BufferDesc *buf); extern bool StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf, bool from_ring); @@ -453,7 +443,6 @@ extern void StrategyNotifyBgWriter(int bgwprocno); extern Size StrategyShmemSize(void); extern void StrategyInitialize(bool init); -extern bool have_free_buffer(void); /* buf_table.c */ extern Size BufTableShmemSize(int size); diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h index 58eee4e0d54c2..b7abd18397d62 100644 --- a/src/include/storage/lmgr.h +++ b/src/include/storage/lmgr.h @@ -71,16 +71,16 @@ extern bool ConditionalLockPage(Relation relation, BlockNumber blkno, LOCKMODE l extern void UnlockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode); /* Lock a tuple (see heap_lock_tuple before assuming you understand this) */ -extern void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode); -extern bool ConditionalLockTuple(Relation relation, ItemPointer tid, +extern void LockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode); +extern bool ConditionalLockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode, bool logLockFailure); -extern void UnlockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode); +extern void UnlockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode); /* Lock an XID (used to wait for a transaction to finish) */ extern void XactLockTableInsert(TransactionId xid); extern void XactLockTableDelete(TransactionId xid); extern void XactLockTableWait(TransactionId xid, Relation rel, - ItemPointer ctid, XLTW_Oper oper); + const ItemPointerData *ctid, XLTW_Oper oper); extern bool ConditionalXactLockTableWait(TransactionId xid, bool logLockFailure); diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index 5e717765764f4..0e9cf81a4c766 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -73,15 +73,9 @@ typedef union LWLockPadded extern PGDLLIMPORT LWLockPadded *MainLWLockArray; -/* struct for storing named tranche information */ -typedef struct NamedLWLockTranche -{ - int trancheId; - char *trancheName; -} NamedLWLockTranche; - -extern PGDLLIMPORT NamedLWLockTranche *NamedLWLockTrancheArray; +extern PGDLLIMPORT char **LWLockTrancheNames; extern PGDLLIMPORT int NamedLWLockTrancheRequests; +extern PGDLLIMPORT int *LWLockCounter; /* * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS @@ -157,19 +151,11 @@ extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name); /* * There is another, more flexible method of obtaining lwlocks. First, call - * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from - * a shared counter. Next, each individual process using the tranche should - * call LWLockRegisterTranche() to associate that tranche ID with a name. - * Finally, LWLockInitialize should be called just once per lwlock, passing - * the tranche ID as an argument. - * - * It may seem strange that each process using the tranche must register it - * separately, but dynamic shared memory segments aren't guaranteed to be - * mapped at the same address in all coordinating backends, so storing the - * registration in the main shared memory segment wouldn't work for that case. + * LWLockNewTrancheId to obtain a tranche ID; this allocates from a shared + * counter. Second, LWLockInitialize should be called just once per lwlock, + * passing the tranche ID as an argument. */ -extern int LWLockNewTrancheId(void); -extern void LWLockRegisterTranche(int tranche_id, const char *tranche_name); +extern int LWLockNewTrancheId(const char *name); extern void LWLockInitialize(LWLock *lock, int tranche_id); /* diff --git a/src/include/storage/lwlocklist.h b/src/include/storage/lwlocklist.h index 208d2e3a8ed9e..06a1ffd4b08b0 100644 --- a/src/include/storage/lwlocklist.h +++ b/src/include/storage/lwlocklist.h @@ -38,7 +38,7 @@ PG_LWLOCK(3, XidGen) PG_LWLOCK(4, ProcArray) PG_LWLOCK(5, SInvalRead) PG_LWLOCK(6, SInvalWrite) -/* 7 was WALBufMapping */ +PG_LWLOCK(7, WALBufMapping) PG_LWLOCK(8, WALWrite) PG_LWLOCK(9, ControlFile) /* 10 was CheckpointLock */ diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h index c1f668ded9523..8604feca93ba0 100644 --- a/src/include/storage/shmem.h +++ b/src/include/storage/shmem.h @@ -35,7 +35,7 @@ extern void *ShmemAllocNoError(Size size); extern void *ShmemAllocUnlocked(Size size); extern bool ShmemAddrIsValid(const void *addr); extern void InitShmemIndex(void); -extern HTAB *ShmemInitHash(const char *name, long init_size, long max_size, +extern HTAB *ShmemInitHash(const char *name, int64 init_size, int64 max_size, HASHCTL *infoP, int hash_flags); extern void *ShmemInitStruct(const char *name, Size size, bool *foundPtr); extern Size add_size(Size s1, Size s2); diff --git a/src/include/utils/.gitignore b/src/include/utils/.gitignore index c1b4c662139b1..30f921429c6f9 100644 --- a/src/include/utils/.gitignore +++ b/src/include/utils/.gitignore @@ -1,5 +1,6 @@ /fmgroids.h /fmgrprotos.h +/guc_tables.inc.c /probes.h /errcodes.h /header-stamp diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h index 277ec33c00bac..00808e23f49b8 100644 --- a/src/include/utils/catcache.h +++ b/src/include/utils/catcache.h @@ -87,6 +87,14 @@ typedef struct catcache typedef struct catctup { + /* + * Each tuple in a cache is a member of a dlist that stores the elements + * of its hash bucket. We keep each dlist in LRU order to speed repeated + * lookups. Keep the dlist_node field first so that Valgrind understands + * the struct is reachable. + */ + dlist_node cache_elem; /* list member of per-bucket list */ + int ct_magic; /* for identifying CatCTup entries */ #define CT_MAGIC 0x57261502 @@ -98,13 +106,6 @@ typedef struct catctup */ Datum keys[CATCACHE_MAXKEYS]; - /* - * Each tuple in a cache is a member of a dlist that stores the elements - * of its hash bucket. We keep each dlist in LRU order to speed repeated - * lookups. - */ - dlist_node cache_elem; /* list member of per-bucket list */ - /* * A tuple marked "dead" must not be returned by subsequent searches. * However, it won't be physically deleted from the cache until its @@ -158,13 +159,17 @@ typedef struct catctup */ typedef struct catclist { + /* + * Keep the dlist_node field first so that Valgrind understands the struct + * is reachable. + */ + dlist_node cache_elem; /* list member of per-catcache list */ + int cl_magic; /* for identifying CatCList entries */ #define CL_MAGIC 0x52765103 uint32 hash_value; /* hash value for lookup keys */ - dlist_node cache_elem; /* list member of per-catcache list */ - /* * Lookup keys for the entry, with the first nkeys elements being valid. * All by-reference are separately allocated. diff --git a/src/include/utils/dsa.h b/src/include/utils/dsa.h index 0a6067be6288b..f2104dacbfcc5 100644 --- a/src/include/utils/dsa.h +++ b/src/include/utils/dsa.h @@ -114,13 +114,13 @@ typedef pg_atomic_uint64 dsa_pointer_atomic; dsa_allocate_extended(area, size, DSA_ALLOC_ZERO) /* Create dsa_area with default segment sizes */ -#define dsa_create(tranch_id) \ - dsa_create_ext(tranch_id, DSA_DEFAULT_INIT_SEGMENT_SIZE, \ +#define dsa_create(tranche_id) \ + dsa_create_ext(tranche_id, DSA_DEFAULT_INIT_SEGMENT_SIZE, \ DSA_MAX_SEGMENT_SIZE) /* Create dsa_area with default segment sizes in an existing share memory space */ -#define dsa_create_in_place(place, size, tranch_id, segment) \ - dsa_create_in_place_ext(place, size, tranch_id, segment, \ +#define dsa_create_in_place(place, size, tranche_id, segment) \ + dsa_create_in_place_ext(place, size, tranche_id, segment, \ DSA_DEFAULT_INIT_SEGMENT_SIZE, \ DSA_MAX_SEGMENT_SIZE) diff --git a/src/include/utils/dynahash.h b/src/include/utils/dynahash.h deleted file mode 100644 index 8a31d9524e2a4..0000000000000 --- a/src/include/utils/dynahash.h +++ /dev/null @@ -1,20 +0,0 @@ -/*------------------------------------------------------------------------- - * - * dynahash.h - * POSTGRES dynahash.h file definitions - * - * - * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * IDENTIFICATION - * src/include/utils/dynahash.h - * - *------------------------------------------------------------------------- - */ -#ifndef DYNAHASH_H -#define DYNAHASH_H - -extern int my_log2(long num); - -#endif /* DYNAHASH_H */ diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h index f619100467df2..f21ec37da8933 100644 --- a/src/include/utils/guc.h +++ b/src/include/utils/guc.h @@ -106,7 +106,7 @@ typedef enum * will show as "default" in pg_settings. If there is a specific reason not * to want that, use source == PGC_S_OVERRIDE. * - * NB: see GucSource_Names in guc.c if you change this. + * NB: see GucSource_Names in guc_tables.c if you change this. */ typedef enum { @@ -247,6 +247,7 @@ typedef enum /* GUC vars that are actually defined in guc_tables.c, rather than elsewhere */ extern PGDLLIMPORT bool Debug_print_plan; extern PGDLLIMPORT bool Debug_print_parse; +extern PGDLLIMPORT bool Debug_print_raw_parse; extern PGDLLIMPORT bool Debug_print_rewritten; extern PGDLLIMPORT bool Debug_pretty_print; @@ -254,8 +255,31 @@ extern PGDLLIMPORT bool Debug_pretty_print; extern PGDLLIMPORT bool Debug_copy_parse_plan_trees; extern PGDLLIMPORT bool Debug_write_read_parse_plan_trees; extern PGDLLIMPORT bool Debug_raw_expression_coverage_test; + +/* + * support for legacy compile-time settings + */ + +#ifdef COPY_PARSE_PLAN_TREES +#define DEFAULT_DEBUG_COPY_PARSE_PLAN_TREES true +#else +#define DEFAULT_DEBUG_COPY_PARSE_PLAN_TREES false +#endif + +#ifdef READ_WRITE_PARSE_PLAN_TREES +#define DEFAULT_DEBUG_READ_WRITE_PARSE_PLAN_TREES true +#else +#define DEFAULT_DEBUG_READ_WRITE_PARSE_PLAN_TREES false +#endif + +#ifdef RAW_EXPRESSION_COVERAGE_TEST +#define DEFAULT_DEBUG_RAW_EXPRESSION_COVERAGE_TEST true +#else +#define DEFAULT_DEBUG_RAW_EXPRESSION_COVERAGE_TEST false #endif +#endif /* DEBUG_NODE_TESTS_ENABLED */ + extern PGDLLIMPORT bool log_parser_stats; extern PGDLLIMPORT bool log_planner_stats; extern PGDLLIMPORT bool log_executor_stats; diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h index 932cc4f34d90d..cb09a4cbe8cbd 100644 --- a/src/include/utils/hsearch.h +++ b/src/include/utils/hsearch.h @@ -65,12 +65,12 @@ typedef struct HTAB HTAB; typedef struct HASHCTL { /* Used if HASH_PARTITION flag is set: */ - long num_partitions; /* # partitions (must be power of 2) */ + int64 num_partitions; /* # partitions (must be power of 2) */ /* Used if HASH_SEGMENT flag is set: */ - long ssize; /* segment size */ + int64 ssize; /* segment size */ /* Used if HASH_DIRSIZE flag is set: */ - long dsize; /* (initial) directory size */ - long max_dsize; /* limit to dsize if dir size is limited */ + int64 dsize; /* (initial) directory size */ + int64 max_dsize; /* limit to dsize if dir size is limited */ /* Used if HASH_ELEM flag is set (which is now required): */ Size keysize; /* hash key length in bytes */ Size entrysize; /* total user element size in bytes */ @@ -129,10 +129,10 @@ typedef struct /* * prototypes for functions in dynahash.c */ -extern HTAB *hash_create(const char *tabname, long nelem, +extern HTAB *hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags); extern void hash_destroy(HTAB *hashp); -extern void hash_stats(const char *where, HTAB *hashp); +extern void hash_stats(const char *caller, HTAB *hashp); extern void *hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr); extern uint32 get_hash_value(HTAB *hashp, const void *keyPtr); @@ -141,7 +141,7 @@ extern void *hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, bool *foundPtr); extern bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr); -extern long hash_get_num_entries(HTAB *hashp); +extern int64 hash_get_num_entries(HTAB *hashp); extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp); extern void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, @@ -149,8 +149,8 @@ extern void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, extern void *hash_seq_search(HASH_SEQ_STATUS *status); extern void hash_seq_term(HASH_SEQ_STATUS *status); extern void hash_freeze(HTAB *hashp); -extern Size hash_estimate_size(long num_entries, Size entrysize); -extern long hash_select_dirsize(long num_entries); +extern Size hash_estimate_size(int64 num_entries, Size entrysize); +extern int64 hash_select_dirsize(int64 num_entries); extern Size hash_get_shared_size(HASHCTL *info, int flags); extern void AtEOXact_HashTables(bool isCommit); extern void AtEOSubXact_HashTables(bool isCommit, int nestDepth); diff --git a/src/include/utils/inval.h b/src/include/utils/inval.h index 9b871caef622f..af46625257899 100644 --- a/src/include/utils/inval.h +++ b/src/include/utils/inval.h @@ -20,6 +20,24 @@ extern PGDLLIMPORT int debug_discard_caches; +#define MIN_DEBUG_DISCARD_CACHES 0 + +#ifdef DISCARD_CACHES_ENABLED + /* Set default based on older compile-time-only cache clobber macros */ +#if defined(CLOBBER_CACHE_RECURSIVELY) +#define DEFAULT_DEBUG_DISCARD_CACHES 3 +#elif defined(CLOBBER_CACHE_ALWAYS) +#define DEFAULT_DEBUG_DISCARD_CACHES 1 +#else +#define DEFAULT_DEBUG_DISCARD_CACHES 0 +#endif +#define MAX_DEBUG_DISCARD_CACHES 5 +#else /* not DISCARD_CACHES_ENABLED */ +#define DEFAULT_DEBUG_DISCARD_CACHES 0 +#define MAX_DEBUG_DISCARD_CACHES 0 +#endif /* not DISCARD_CACHES_ENABLED */ + + typedef void (*SyscacheCallbackFunction) (Datum arg, int cacheid, uint32 hashvalue); typedef void (*RelcacheCallbackFunction) (Datum arg, Oid relid); typedef void (*RelSyncCallbackFunction) (Datum arg, Oid relid); diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h index fa7c7e0323b10..c65cee4f24cd2 100644 --- a/src/include/utils/lsyscache.h +++ b/src/include/utils/lsyscache.h @@ -104,7 +104,7 @@ extern bool get_collation_isdeterministic(Oid colloid); extern char *get_constraint_name(Oid conoid); extern Oid get_constraint_index(Oid conoid); extern char get_constraint_type(Oid conoid); - +extern char *get_database_name(Oid dbid); extern char *get_language_name(Oid langoid, bool missing_ok); extern Oid get_opclass_family(Oid opclass); extern Oid get_opclass_input_type(Oid opclass); diff --git a/src/include/utils/memdebug.h b/src/include/utils/memdebug.h index 7309271834b9f..80692dcef9382 100644 --- a/src/include/utils/memdebug.h +++ b/src/include/utils/memdebug.h @@ -29,6 +29,7 @@ #define VALGRIND_MEMPOOL_ALLOC(context, addr, size) do {} while (0) #define VALGRIND_MEMPOOL_FREE(context, addr) do {} while (0) #define VALGRIND_MEMPOOL_CHANGE(context, optr, nptr, size) do {} while (0) +#define VALGRIND_MEMPOOL_TRIM(context, addr, size) do {} while (0) #endif diff --git a/src/include/utils/meson.build b/src/include/utils/meson.build index 78c6b9b0a232a..0a2ea8fa32cae 100644 --- a/src/include/utils/meson.build +++ b/src/include/utils/meson.build @@ -30,6 +30,13 @@ errcodes = custom_target('errcodes', ) generated_headers += errcodes +guc_tables = custom_target('guc_tables', + input: files('../../backend/utils/misc/guc_parameters.dat'), + output: ['guc_tables.inc.c'], + depend_files: catalog_pm, + command: [perl, files('../../backend/utils/misc/gen_guc_tables.pl'), '@INPUT@', '@OUTPUT@']) +generated_headers += guc_tables + if dtrace.found() probes_tmp = custom_target('probes.h.tmp', input: files('../../backend/utils/probes.d'), diff --git a/src/include/utils/numeric.h b/src/include/utils/numeric.h index 9e79fc376cbea..215f1ea4f53b4 100644 --- a/src/include/utils/numeric.h +++ b/src/include/utils/numeric.h @@ -17,6 +17,9 @@ #include "common/pg_prng.h" #include "fmgr.h" +/* forward declaration to avoid node.h include */ +typedef struct Node Node; + /* * Limits on the precision and scale specifiable in a NUMERIC typmod. The * precision is strictly positive, but the scale may be positive or negative. @@ -91,18 +94,13 @@ extern char *numeric_normalize(Numeric num); extern Numeric int64_to_numeric(int64 val); extern Numeric int64_div_fast_to_numeric(int64 val1, int log10val2); -extern Numeric numeric_add_opt_error(Numeric num1, Numeric num2, - bool *have_error); -extern Numeric numeric_sub_opt_error(Numeric num1, Numeric num2, - bool *have_error); -extern Numeric numeric_mul_opt_error(Numeric num1, Numeric num2, - bool *have_error); -extern Numeric numeric_div_opt_error(Numeric num1, Numeric num2, - bool *have_error); -extern Numeric numeric_mod_opt_error(Numeric num1, Numeric num2, - bool *have_error); -extern int32 numeric_int4_opt_error(Numeric num, bool *have_error); -extern int64 numeric_int8_opt_error(Numeric num, bool *have_error); +extern Numeric numeric_add_safe(Numeric num1, Numeric num2, Node *escontext); +extern Numeric numeric_sub_safe(Numeric num1, Numeric num2, Node *escontext); +extern Numeric numeric_mul_safe(Numeric num1, Numeric num2, Node *escontext); +extern Numeric numeric_div_safe(Numeric num1, Numeric num2, Node *escontext); +extern Numeric numeric_mod_safe(Numeric num1, Numeric num2, Node *escontext); +extern int32 numeric_int4_safe(Numeric num, Node *escontext); +extern int64 numeric_int8_safe(Numeric num, Node *escontext); extern Numeric random_numeric(pg_prng_state *state, Numeric rmin, Numeric rmax); diff --git a/src/include/utils/pg_locale.h b/src/include/utils/pg_locale.h index 931f5b3b88068..2b072cafb4d48 100644 --- a/src/include/utils/pg_locale.h +++ b/src/include/utils/pg_locale.h @@ -18,6 +18,8 @@ /* only include the C APIs, to avoid errors in cpluspluscheck */ #undef U_SHOW_CPLUSPLUS_API #define U_SHOW_CPLUSPLUS_API 0 +#undef U_SHOW_CPLUSPLUS_HEADER_API +#define U_SHOW_CPLUSPLUS_HEADER_API 0 #include #endif diff --git a/src/include/utils/pg_lsn.h b/src/include/utils/pg_lsn.h index ae198af745029..461a4fdcba954 100644 --- a/src/include/utils/pg_lsn.h +++ b/src/include/utils/pg_lsn.h @@ -18,6 +18,9 @@ #include "access/xlogdefs.h" #include "fmgr.h" +/* forward declaration to avoid node.h include */ +typedef struct Node Node; + static inline XLogRecPtr DatumGetLSN(Datum X) { @@ -33,6 +36,6 @@ LSNGetDatum(XLogRecPtr X) #define PG_GETARG_LSN(n) DatumGetLSN(PG_GETARG_DATUM(n)) #define PG_RETURN_LSN(x) return LSNGetDatum(x) -extern XLogRecPtr pg_lsn_in_internal(const char *str, bool *have_error); +extern XLogRecPtr pg_lsn_in_safe(const char *str, Node *escontext); #endif /* PG_LSN_H */ diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h index 013049b3098ac..fb4fa53363d7c 100644 --- a/src/include/utils/selfuncs.h +++ b/src/include/utils/selfuncs.h @@ -96,7 +96,8 @@ typedef struct VariableStatData int32 atttypmod; /* actual typmod (after stripping relabel) */ bool isunique; /* matches unique index, DISTINCT or GROUP-BY * clause */ - bool acl_ok; /* result of ACL check on table or column */ + bool acl_ok; /* true if user has SELECT privilege on all + * rows from the table or column */ } VariableStatData; #define ReleaseVariableStats(vardata) \ @@ -153,6 +154,7 @@ extern PGDLLIMPORT get_index_stats_hook_type get_index_stats_hook; extern void examine_variable(PlannerInfo *root, Node *node, int varRelid, VariableStatData *vardata); +extern bool all_rows_selectable(PlannerInfo *root, Index varno, Bitmapset *varattnos); extern bool statistic_proc_security_check(VariableStatData *vardata, Oid func_oid); extern bool get_restriction_variable(PlannerInfo *root, List *args, int varRelid, diff --git a/src/include/utils/snapmgr.h b/src/include/utils/snapmgr.h index d346be716423e..604c1f902169d 100644 --- a/src/include/utils/snapmgr.h +++ b/src/include/utils/snapmgr.h @@ -56,6 +56,9 @@ extern PGDLLIMPORT SnapshotData SnapshotToastData; ((snapshot)->snapshot_type == SNAPSHOT_MVCC || \ (snapshot)->snapshot_type == SNAPSHOT_HISTORIC_MVCC) +#define IsHistoricMVCCSnapshot(snapshot) \ + ((snapshot)->snapshot_type == SNAPSHOT_HISTORIC_MVCC) + extern Snapshot GetTransactionSnapshot(void); extern Snapshot GetLatestSnapshot(void); extern void SnapshotSetCommandId(CommandId curcid); diff --git a/src/include/utils/sortsupport.h b/src/include/utils/sortsupport.h index b7abaf7802de0..c64527e2ee907 100644 --- a/src/include/utils/sortsupport.h +++ b/src/include/utils/sortsupport.h @@ -262,7 +262,6 @@ ApplyUnsignedSortComparator(Datum datum1, bool isNull1, return compare; } -#if SIZEOF_DATUM >= 8 static inline int ApplySignedSortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, @@ -296,7 +295,6 @@ ApplySignedSortComparator(Datum datum1, bool isNull1, return compare; } -#endif static inline int ApplyInt32SortComparator(Datum datum1, bool isNull1, @@ -376,9 +374,7 @@ ApplySortAbbrevFullComparator(Datum datum1, bool isNull1, * are eligible for faster sorting. */ extern int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup); -#if SIZEOF_DATUM >= 8 extern int ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup); -#endif extern int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup); /* Other functions in utils/sort/sortsupport.c */ diff --git a/src/include/varatt.h b/src/include/varatt.h index 2e8564d49980b..aeeabf9145b59 100644 --- a/src/include/varatt.h +++ b/src/include/varatt.h @@ -89,20 +89,35 @@ typedef enum vartag_external VARTAG_ONDISK = 18 } vartag_external; +/* Is a TOAST pointer either type of expanded-object pointer? */ /* this test relies on the specific tag values above */ -#define VARTAG_IS_EXPANDED(tag) \ - (((tag) & ~1) == VARTAG_EXPANDED_RO) +static inline bool +VARTAG_IS_EXPANDED(vartag_external tag) +{ + return ((tag & ~1) == VARTAG_EXPANDED_RO); +} -#define VARTAG_SIZE(tag) \ - ((tag) == VARTAG_INDIRECT ? sizeof(varatt_indirect) : \ - VARTAG_IS_EXPANDED(tag) ? sizeof(varatt_expanded) : \ - (tag) == VARTAG_ONDISK ? sizeof(varatt_external) : \ - (AssertMacro(false), 0)) +/* Size of the data part of a "TOAST pointer" datum */ +static inline Size +VARTAG_SIZE(vartag_external tag) +{ + if (tag == VARTAG_INDIRECT) + return sizeof(varatt_indirect); + else if (VARTAG_IS_EXPANDED(tag)) + return sizeof(varatt_expanded); + else if (tag == VARTAG_ONDISK) + return sizeof(varatt_external); + else + { + Assert(false); + return 0; + } +} /* * These structs describe the header of a varlena object that may have been * TOASTed. Generally, don't reference these structs directly, but use the - * macros below. + * functions and macros below. * * We use separate structs for the aligned and unaligned cases because the * compiler might otherwise think it could generate code that assumes @@ -166,7 +181,9 @@ typedef struct /* * Endian-dependent macros. These are considered internal --- use the - * external macros below instead of using these directly. + * external functions below instead of using these directly. All of these + * expect an argument that is a pointer, not a Datum. Some of them have + * multiple-evaluation hazards, too. * * Note: IS_1B is true for external toast records but VARSIZE_1B will return 0 * for such records. Hence you should usually check for IS_EXTERNAL before @@ -194,7 +211,7 @@ typedef struct #define VARSIZE_1B(PTR) \ (((varattrib_1b *) (PTR))->va_header & 0x7F) #define VARTAG_1B_E(PTR) \ - (((varattrib_1b_e *) (PTR))->va_tag) + ((vartag_external) ((varattrib_1b_e *) (PTR))->va_tag) #define SET_VARSIZE_4B(PTR,len) \ (((varattrib_4b *) (PTR))->va_4byte.va_header = (len) & 0x3FFFFFFF) @@ -227,7 +244,7 @@ typedef struct #define VARSIZE_1B(PTR) \ ((((varattrib_1b *) (PTR))->va_header >> 1) & 0x7F) #define VARTAG_1B_E(PTR) \ - (((varattrib_1b_e *) (PTR))->va_tag) + ((vartag_external) ((varattrib_1b_e *) (PTR))->va_tag) #define SET_VARSIZE_4B(PTR,len) \ (((varattrib_4b *) (PTR))->va_4byte.va_header = (((uint32) (len)) << 2)) @@ -247,19 +264,19 @@ typedef struct #define VARDATA_1B_E(PTR) (((varattrib_1b_e *) (PTR))->va_data) /* - * Externally visible TOAST macros begin here. + * Externally visible TOAST functions and macros begin here. All of these + * were originally macros, accounting for the upper-case naming. + * + * Most of these functions accept a pointer to a value of a toastable data + * type. The caller's variable might be declared "text *" or the like, + * so we use "void *" here. Callers that are working with a Datum variable + * must apply DatumGetPointer before calling these functions. */ #define VARHDRSZ_EXTERNAL offsetof(varattrib_1b_e, va_data) #define VARHDRSZ_COMPRESSED offsetof(varattrib_4b, va_compressed.va_data) #define VARHDRSZ_SHORT offsetof(varattrib_1b, va_data) - #define VARATT_SHORT_MAX 0x7F -#define VARATT_CAN_MAKE_SHORT(PTR) \ - (VARATT_IS_4B_U(PTR) && \ - (VARSIZE(PTR) - VARHDRSZ + VARHDRSZ_SHORT) <= VARATT_SHORT_MAX) -#define VARATT_CONVERTED_SHORT_SIZE(PTR) \ - (VARSIZE(PTR) - VARHDRSZ + VARHDRSZ_SHORT) /* * In consumers oblivious to data alignment, call PG_DETOAST_DATUM_PACKED(), @@ -272,70 +289,234 @@ typedef struct * Code assembling a new datum should call VARDATA() and SET_VARSIZE(). * (Datums begin life untoasted.) * - * Other macros here should usually be used only by tuple assembly/disassembly + * Other functions here should usually be used only by tuple assembly/disassembly * code and code that specifically wants to work with still-toasted Datums. */ -#define VARDATA(PTR) VARDATA_4B(PTR) -#define VARSIZE(PTR) VARSIZE_4B(PTR) - -#define VARSIZE_SHORT(PTR) VARSIZE_1B(PTR) -#define VARDATA_SHORT(PTR) VARDATA_1B(PTR) - -#define VARTAG_EXTERNAL(PTR) VARTAG_1B_E(PTR) -#define VARSIZE_EXTERNAL(PTR) (VARHDRSZ_EXTERNAL + VARTAG_SIZE(VARTAG_EXTERNAL(PTR))) -#define VARDATA_EXTERNAL(PTR) VARDATA_1B_E(PTR) - -#define VARATT_IS_COMPRESSED(PTR) VARATT_IS_4B_C(PTR) -#define VARATT_IS_EXTERNAL(PTR) VARATT_IS_1B_E(PTR) -#define VARATT_IS_EXTERNAL_ONDISK(PTR) \ - (VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_ONDISK) -#define VARATT_IS_EXTERNAL_INDIRECT(PTR) \ - (VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_INDIRECT) -#define VARATT_IS_EXTERNAL_EXPANDED_RO(PTR) \ - (VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_EXPANDED_RO) -#define VARATT_IS_EXTERNAL_EXPANDED_RW(PTR) \ - (VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_EXPANDED_RW) -#define VARATT_IS_EXTERNAL_EXPANDED(PTR) \ - (VARATT_IS_EXTERNAL(PTR) && VARTAG_IS_EXPANDED(VARTAG_EXTERNAL(PTR))) -#define VARATT_IS_EXTERNAL_NON_EXPANDED(PTR) \ - (VARATT_IS_EXTERNAL(PTR) && !VARTAG_IS_EXPANDED(VARTAG_EXTERNAL(PTR))) -#define VARATT_IS_SHORT(PTR) VARATT_IS_1B(PTR) -#define VARATT_IS_EXTENDED(PTR) (!VARATT_IS_4B_U(PTR)) - -#define SET_VARSIZE(PTR, len) SET_VARSIZE_4B(PTR, len) -#define SET_VARSIZE_SHORT(PTR, len) SET_VARSIZE_1B(PTR, len) -#define SET_VARSIZE_COMPRESSED(PTR, len) SET_VARSIZE_4B_C(PTR, len) - -#define SET_VARTAG_EXTERNAL(PTR, tag) SET_VARTAG_1B_E(PTR, tag) - -#define VARSIZE_ANY(PTR) \ - (VARATT_IS_1B_E(PTR) ? VARSIZE_EXTERNAL(PTR) : \ - (VARATT_IS_1B(PTR) ? VARSIZE_1B(PTR) : \ - VARSIZE_4B(PTR))) - -/* Size of a varlena data, excluding header */ -#define VARSIZE_ANY_EXHDR(PTR) \ - (VARATT_IS_1B_E(PTR) ? VARSIZE_EXTERNAL(PTR)-VARHDRSZ_EXTERNAL : \ - (VARATT_IS_1B(PTR) ? VARSIZE_1B(PTR)-VARHDRSZ_SHORT : \ - VARSIZE_4B(PTR)-VARHDRSZ)) +/* Size of a known-not-toasted varlena datum, including header */ +static inline Size +VARSIZE(const void *PTR) +{ + return VARSIZE_4B(PTR); +} + +/* Start of data area of a known-not-toasted varlena datum */ +static inline char * +VARDATA(const void *PTR) +{ + return VARDATA_4B(PTR); +} + +/* Size of a known-short-header varlena datum, including header */ +static inline Size +VARSIZE_SHORT(const void *PTR) +{ + return VARSIZE_1B(PTR); +} + +/* Start of data area of a known-short-header varlena datum */ +static inline char * +VARDATA_SHORT(const void *PTR) +{ + return VARDATA_1B(PTR); +} + +/* Type tag of a "TOAST pointer" datum */ +static inline vartag_external +VARTAG_EXTERNAL(const void *PTR) +{ + return VARTAG_1B_E(PTR); +} + +/* Size of a "TOAST pointer" datum, including header */ +static inline Size +VARSIZE_EXTERNAL(const void *PTR) +{ + return VARHDRSZ_EXTERNAL + VARTAG_SIZE(VARTAG_EXTERNAL(PTR)); +} + +/* Start of data area of a "TOAST pointer" datum */ +static inline char * +VARDATA_EXTERNAL(const void *PTR) +{ + return VARDATA_1B_E(PTR); +} + +/* Is varlena datum in inline-compressed format? */ +static inline bool +VARATT_IS_COMPRESSED(const void *PTR) +{ + return VARATT_IS_4B_C(PTR); +} + +/* Is varlena datum a "TOAST pointer" datum? */ +static inline bool +VARATT_IS_EXTERNAL(const void *PTR) +{ + return VARATT_IS_1B_E(PTR); +} + +/* Is varlena datum a pointer to on-disk toasted data? */ +static inline bool +VARATT_IS_EXTERNAL_ONDISK(const void *PTR) +{ + return VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_ONDISK; +} + +/* Is varlena datum an indirect pointer? */ +static inline bool +VARATT_IS_EXTERNAL_INDIRECT(const void *PTR) +{ + return VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_INDIRECT; +} + +/* Is varlena datum a read-only pointer to an expanded object? */ +static inline bool +VARATT_IS_EXTERNAL_EXPANDED_RO(const void *PTR) +{ + return VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_EXPANDED_RO; +} + +/* Is varlena datum a read-write pointer to an expanded object? */ +static inline bool +VARATT_IS_EXTERNAL_EXPANDED_RW(const void *PTR) +{ + return VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_EXPANDED_RW; +} + +/* Is varlena datum either type of pointer to an expanded object? */ +static inline bool +VARATT_IS_EXTERNAL_EXPANDED(const void *PTR) +{ + return VARATT_IS_EXTERNAL(PTR) && VARTAG_IS_EXPANDED(VARTAG_EXTERNAL(PTR)); +} + +/* Is varlena datum a "TOAST pointer", but not for an expanded object? */ +static inline bool +VARATT_IS_EXTERNAL_NON_EXPANDED(const void *PTR) +{ + return VARATT_IS_EXTERNAL(PTR) && !VARTAG_IS_EXPANDED(VARTAG_EXTERNAL(PTR)); +} + +/* Is varlena datum a short-header datum? */ +static inline bool +VARATT_IS_SHORT(const void *PTR) +{ + return VARATT_IS_1B(PTR); +} + +/* Is varlena datum not in traditional (4-byte-header, uncompressed) format? */ +static inline bool +VARATT_IS_EXTENDED(const void *PTR) +{ + return !VARATT_IS_4B_U(PTR); +} + +/* Is varlena datum short enough to convert to short-header format? */ +static inline bool +VARATT_CAN_MAKE_SHORT(const void *PTR) +{ + return VARATT_IS_4B_U(PTR) && + (VARSIZE(PTR) - VARHDRSZ + VARHDRSZ_SHORT) <= VARATT_SHORT_MAX; +} + +/* Size that datum will have in short-header format, including header */ +static inline Size +VARATT_CONVERTED_SHORT_SIZE(const void *PTR) +{ + return VARSIZE(PTR) - VARHDRSZ + VARHDRSZ_SHORT; +} + +/* Set the size (including header) of a 4-byte-header varlena datum */ +static inline void +SET_VARSIZE(void *PTR, Size len) +{ + SET_VARSIZE_4B(PTR, len); +} + +/* Set the size (including header) of a short-header varlena datum */ +static inline void +SET_VARSIZE_SHORT(void *PTR, Size len) +{ + SET_VARSIZE_1B(PTR, len); +} + +/* Set the size (including header) of an inline-compressed varlena datum */ +static inline void +SET_VARSIZE_COMPRESSED(void *PTR, Size len) +{ + SET_VARSIZE_4B_C(PTR, len); +} + +/* Set the type tag of a "TOAST pointer" datum */ +static inline void +SET_VARTAG_EXTERNAL(void *PTR, vartag_external tag) +{ + SET_VARTAG_1B_E(PTR, tag); +} + +/* Size of a varlena datum of any format, including header */ +static inline Size +VARSIZE_ANY(const void *PTR) +{ + if (VARATT_IS_1B_E(PTR)) + return VARSIZE_EXTERNAL(PTR); + else if (VARATT_IS_1B(PTR)) + return VARSIZE_1B(PTR); + else + return VARSIZE_4B(PTR); +} + +/* Size of a varlena datum of any format, excluding header */ +static inline Size +VARSIZE_ANY_EXHDR(const void *PTR) +{ + if (VARATT_IS_1B_E(PTR)) + return VARSIZE_EXTERNAL(PTR) - VARHDRSZ_EXTERNAL; + else if (VARATT_IS_1B(PTR)) + return VARSIZE_1B(PTR) - VARHDRSZ_SHORT; + else + return VARSIZE_4B(PTR) - VARHDRSZ; +} + +/* Start of data area of a plain or short-header varlena datum */ /* caution: this will not work on an external or compressed-in-line Datum */ /* caution: this will return a possibly unaligned pointer */ -#define VARDATA_ANY(PTR) \ - (VARATT_IS_1B(PTR) ? VARDATA_1B(PTR) : VARDATA_4B(PTR)) +static inline char * +VARDATA_ANY(const void *PTR) +{ + return VARATT_IS_1B(PTR) ? VARDATA_1B(PTR) : VARDATA_4B(PTR); +} -/* Decompressed size and compression method of a compressed-in-line Datum */ -#define VARDATA_COMPRESSED_GET_EXTSIZE(PTR) \ - (((varattrib_4b *) (PTR))->va_compressed.va_tcinfo & VARLENA_EXTSIZE_MASK) -#define VARDATA_COMPRESSED_GET_COMPRESS_METHOD(PTR) \ - (((varattrib_4b *) (PTR))->va_compressed.va_tcinfo >> VARLENA_EXTSIZE_BITS) +/* Decompressed size of a compressed-in-line varlena datum */ +static inline Size +VARDATA_COMPRESSED_GET_EXTSIZE(const void *PTR) +{ + return ((varattrib_4b *) PTR)->va_compressed.va_tcinfo & VARLENA_EXTSIZE_MASK; +} + +/* Compression method of a compressed-in-line varlena datum */ +static inline uint32 +VARDATA_COMPRESSED_GET_COMPRESS_METHOD(const void *PTR) +{ + return ((varattrib_4b *) PTR)->va_compressed.va_tcinfo >> VARLENA_EXTSIZE_BITS; +} /* Same for external Datums; but note argument is a struct varatt_external */ -#define VARATT_EXTERNAL_GET_EXTSIZE(toast_pointer) \ - ((toast_pointer).va_extinfo & VARLENA_EXTSIZE_MASK) -#define VARATT_EXTERNAL_GET_COMPRESS_METHOD(toast_pointer) \ - ((toast_pointer).va_extinfo >> VARLENA_EXTSIZE_BITS) +static inline Size +VARATT_EXTERNAL_GET_EXTSIZE(struct varatt_external toast_pointer) +{ + return toast_pointer.va_extinfo & VARLENA_EXTSIZE_MASK; +} +static inline uint32 +VARATT_EXTERNAL_GET_COMPRESS_METHOD(struct varatt_external toast_pointer) +{ + return toast_pointer.va_extinfo >> VARLENA_EXTSIZE_BITS; +} + +/* Set size and compress method of an externally-stored varlena datum */ +/* This has to remain a macro; beware multiple evaluations! */ #define VARATT_EXTERNAL_SET_SIZE_AND_COMPRESS_METHOD(toast_pointer, len, cm) \ do { \ Assert((cm) == TOAST_PGLZ_COMPRESSION_ID || \ @@ -351,8 +532,11 @@ typedef struct * VARHDRSZ overhead, the former doesn't. We never use compression unless it * actually saves space, so we expect either equality or less-than. */ -#define VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer) \ - (VARATT_EXTERNAL_GET_EXTSIZE(toast_pointer) < \ - (toast_pointer).va_rawsize - VARHDRSZ) +static inline bool +VARATT_EXTERNAL_IS_COMPRESSED(struct varatt_external toast_pointer) +{ + return VARATT_EXTERNAL_GET_EXTSIZE(toast_pointer) < + (Size) (toast_pointer.va_rawsize - VARHDRSZ); +} #endif diff --git a/src/interfaces/ecpg/compatlib/informix.c b/src/interfaces/ecpg/compatlib/informix.c index e829d722f2228..ca11a81f1bcfe 100644 --- a/src/interfaces/ecpg/compatlib/informix.c +++ b/src/interfaces/ecpg/compatlib/informix.c @@ -807,8 +807,10 @@ rfmtlong(long lng_val, const char *fmt, char *outbuf) if (strchr(fmt, (int) '(') && strchr(fmt, (int) ')')) brackets_ok = 1; - /* get position of the right-most dot in the format-string */ - /* and fill the temp-string wit '0's up to there. */ + /* + * get position of the right-most dot in the format-string and fill the + * temp-string with '0's up to there. + */ dotpos = getRightMostDot(fmt); /* start to parse the format-string */ diff --git a/src/interfaces/ecpg/test/expected/preproc-strings.c b/src/interfaces/ecpg/test/expected/preproc-strings.c index a26817968de79..06a86a2bfae4b 100644 --- a/src/interfaces/ecpg/test/expected/preproc-strings.c +++ b/src/interfaces/ecpg/test/expected/preproc-strings.c @@ -18,6 +18,16 @@ #line 3 "strings.pgc" /* exec sql begin declare section */ #line 1 "strings.h" +/* redundant declaration to silence -Wmissing-variable-declarations */ + + + + + + + + + @@ -29,7 +39,10 @@ #line 5 "strings.pgc" -#line 1 "strings.h" +#line 2 "strings.h" + extern char * s1 , * s2 , * s3 , * s4 , * s5 , * s6 , * s7 , * s8 ; + +#line 11 "strings.h" char * s1 , * s2 , * s3 , * s4 , * s5 , * s6 , * s7 , * s8 ; /* exec sql end declare section */ #line 5 "strings.pgc" diff --git a/src/interfaces/ecpg/test/preproc/strings.h b/src/interfaces/ecpg/test/preproc/strings.h index edb5be5339e6d..8932b4ea9ed11 100644 --- a/src/interfaces/ecpg/test/preproc/strings.h +++ b/src/interfaces/ecpg/test/preproc/strings.h @@ -1,3 +1,13 @@ +/* redundant declaration to silence -Wmissing-variable-declarations */ +extern char *s1, + *s2, + *s3, + *s4, + *s5, + *s6, + *s7, + *s8; + char *s1, *s2, *s3, diff --git a/src/interfaces/libpq-oauth/.gitignore b/src/interfaces/libpq-oauth/.gitignore index a4afe7c1c6858..eb5b98aea544c 100644 --- a/src/interfaces/libpq-oauth/.gitignore +++ b/src/interfaces/libpq-oauth/.gitignore @@ -1 +1,4 @@ /exports.list +/oauth_tests + +/tmp_check/ diff --git a/src/interfaces/libpq-oauth/Makefile b/src/interfaces/libpq-oauth/Makefile index 270fc0cf2d9d9..51145f085a8ac 100644 --- a/src/interfaces/libpq-oauth/Makefile +++ b/src/interfaces/libpq-oauth/Makefile @@ -24,7 +24,8 @@ NAME = pq-oauth-$(MAJORVERSION) override shlib := lib$(NAME)$(DLSUFFIX) override stlib := libpq-oauth.a -override CPPFLAGS := -I$(libpq_srcdir) -I$(top_builddir)/src/port $(LIBCURL_CPPFLAGS) $(CPPFLAGS) +override CPPFLAGS := -I$(libpq_srcdir) -I$(top_builddir)/src/port $(CPPFLAGS) $(LIBCURL_CPPFLAGS) +override CFLAGS += $(PTHREAD_CFLAGS) OBJS = \ $(WIN32RES) @@ -47,17 +48,13 @@ $(stlib): override OBJS += $(OBJS_STATIC) $(stlib): $(OBJS_STATIC) SHLIB_LINK_INTERNAL = $(libpq_pgport_shlib) -SHLIB_LINK = $(LIBCURL_LDFLAGS) $(LIBCURL_LDLIBS) $(filter -lintl, $(LIBS)) +SHLIB_LINK = $(LIBCURL_LDFLAGS) $(LIBCURL_LDLIBS) $(filter -lintl -lm $(PTHREAD_LIBS), $(LIBS)) SHLIB_PREREQS = submake-libpq SHLIB_EXPORTS = exports.txt # Disable -bundle_loader on macOS. BE_DLLLIBS = -# By default, a library without an SONAME doesn't get a static library, so we -# add it to the build explicitly. -all: all-lib all-static-lib - # Shared library stuff include $(top_srcdir)/src/Makefile.shlib @@ -66,6 +63,28 @@ include $(top_srcdir)/src/Makefile.shlib %_shlib.o: %.c %.o $(CC) $(CFLAGS) $(CFLAGS_SL) $(CPPFLAGS) $(CPPFLAGS_SHLIB) -c $< -o $@ +.PHONY: all-tests +all-tests: oauth_tests$(X) + +oauth_tests$(X): test-oauth-curl.o oauth-utils.o $(WIN32RES) | submake-libpgport submake-libpq + $(CC) $(CFLAGS) $^ $(LDFLAGS) $(LDFLAGS_EX) $(SHLIB_LINK) -o $@ + +# +# Top-Level Targets +# +# The existence of a t/ folder induces the buildfarm to run Make directly on +# this subdirectory, bypassing the recursion skip in src/interfaces/Makefile. +# Wrap the standard build targets in a with_libcurl conditional to avoid +# building OAuth code on platforms that haven't requested it. (The "clean"-style +# targets remain available.) +# + +ifeq ($(with_libcurl), yes) + +# By default, a library without an SONAME doesn't get a static library, so we +# add it to the build explicitly. +all: all-lib all-static-lib + # Ignore the standard rules for SONAME-less installation; we want both the # static and shared libraries to go into libdir. install: all installdirs $(stlib) $(shlib) @@ -75,9 +94,19 @@ install: all installdirs $(stlib) $(shlib) installdirs: $(MKDIR_P) '$(DESTDIR)$(libdir)' +check: all-tests + $(prove_check) + +installcheck: all-tests + $(prove_installcheck) + +endif # with_libcurl + uninstall: rm -f '$(DESTDIR)$(libdir)/$(stlib)' rm -f '$(DESTDIR)$(libdir)/$(shlib)' clean distclean: clean-lib rm -f $(OBJS) $(OBJS_STATIC) $(OBJS_SHLIB) + rm -f test-oauth-curl.o oauth_tests$(X) + rm -rf tmp_check diff --git a/src/interfaces/libpq-oauth/meson.build b/src/interfaces/libpq-oauth/meson.build index df064c59a4070..505e1671b8637 100644 --- a/src/interfaces/libpq-oauth/meson.build +++ b/src/interfaces/libpq-oauth/meson.build @@ -47,3 +47,38 @@ libpq_oauth_so = shared_module(libpq_oauth_name, link_args: export_fmt.format(export_file.full_path()), kwargs: default_lib_args, ) + +libpq_oauth_test_deps = [] + +oauth_test_sources = files('test-oauth-curl.c') + libpq_oauth_so_sources + +if host_system == 'windows' + oauth_test_sources += rc_bin_gen.process(win32ver_rc, extra_args: [ + '--NAME', 'oauth_tests', + '--FILEDESC', 'OAuth unit test program',]) +endif + +libpq_oauth_test_deps += executable('oauth_tests', + oauth_test_sources, + dependencies: [frontend_shlib_code, libpq, libpq_oauth_deps], + kwargs: default_bin_args + { + 'c_args': default_bin_args.get('c_args', []) + libpq_oauth_so_c_args, + 'c_pch': pch_postgres_fe_h, + 'include_directories': [libpq_inc, postgres_inc], + 'install': false, + } +) + +testprep_targets += libpq_oauth_test_deps + +tests += { + 'name': 'libpq-oauth', + 'sd': meson.current_source_dir(), + 'bd': meson.current_build_dir(), + 'tap': { + 'tests': [ + 't/001_oauth.pl', + ], + 'deps': libpq_oauth_test_deps, + }, +} diff --git a/src/interfaces/libpq-oauth/oauth-curl.c b/src/interfaces/libpq-oauth/oauth-curl.c index dba9a684fa8a5..aa50b00d05383 100644 --- a/src/interfaces/libpq-oauth/oauth-curl.c +++ b/src/interfaces/libpq-oauth/oauth-curl.c @@ -278,6 +278,7 @@ struct async_ctx bool user_prompted; /* have we already sent the authz prompt? */ bool used_basic_auth; /* did we send a client secret? */ bool debugging; /* can we give unsafe developer assistance? */ + int dbg_num_calls; /* (debug mode) how many times were we called? */ }; /* @@ -1291,22 +1292,31 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx, return 0; #elif defined(HAVE_SYS_EVENT_H) - struct kevent ev[2] = {0}; + struct kevent ev[2]; struct kevent ev_out[2]; struct timespec timeout = {0}; int nev = 0; int res; + /* + * We don't know which of the events is currently registered, perhaps + * both, so we always try to remove unneeded events. This means we need to + * tolerate ENOENT below. + */ switch (what) { case CURL_POLL_IN: EV_SET(&ev[nev], socket, EVFILT_READ, EV_ADD | EV_RECEIPT, 0, 0, 0); nev++; + EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_DELETE | EV_RECEIPT, 0, 0, 0); + nev++; break; case CURL_POLL_OUT: EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_ADD | EV_RECEIPT, 0, 0, 0); nev++; + EV_SET(&ev[nev], socket, EVFILT_READ, EV_DELETE | EV_RECEIPT, 0, 0, 0); + nev++; break; case CURL_POLL_INOUT: @@ -1317,12 +1327,6 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx, break; case CURL_POLL_REMOVE: - - /* - * We don't know which of these is currently registered, perhaps - * both, so we try to remove both. This means we need to tolerate - * ENOENT below. - */ EV_SET(&ev[nev], socket, EVFILT_READ, EV_DELETE | EV_RECEIPT, 0, 0, 0); nev++; EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_DELETE | EV_RECEIPT, 0, 0, 0); @@ -1334,7 +1338,10 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx, return -1; } - res = kevent(actx->mux, ev, nev, ev_out, lengthof(ev_out), &timeout); + Assert(nev <= lengthof(ev)); + Assert(nev <= lengthof(ev_out)); + + res = kevent(actx->mux, ev, nev, ev_out, nev, &timeout); if (res < 0) { actx_error(actx, "could not modify kqueue: %m"); @@ -1376,6 +1383,53 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx, #endif } +/* + * If there is no work to do on any of the descriptors in the multiplexer, then + * this function must ensure that the multiplexer is not readable. + * + * Unlike epoll descriptors, kqueue descriptors only transition from readable to + * unreadable when kevent() is called and finds nothing, after removing + * level-triggered conditions that have gone away. We therefore need a dummy + * kevent() call after operations might have been performed on the monitored + * sockets or timer_fd. Any event returned is ignored here, but it also remains + * queued (being level-triggered) and leaves the descriptor readable. This is a + * no-op for epoll descriptors. + */ +static bool +comb_multiplexer(struct async_ctx *actx) +{ +#if defined(HAVE_SYS_EPOLL_H) + /* The epoll implementation doesn't hold onto stale events. */ + return true; +#elif defined(HAVE_SYS_EVENT_H) + struct timespec timeout = {0}; + struct kevent ev; + + /* + * Try to read a single pending event. We can actually ignore the result: + * either we found an event to process, in which case the multiplexer is + * correctly readable for that event at minimum, and it doesn't matter if + * there are any stale events; or we didn't find any, in which case the + * kernel will have discarded any stale events as it traveled to the end + * of the queue. + * + * Note that this depends on our registrations being level-triggered -- + * even the timer, so we use a chained kqueue for that instead of an + * EVFILT_TIMER on the top-level mux. If we used edge-triggered events, + * this call would improperly discard them. + */ + if (kevent(actx->mux, NULL, 0, &ev, 1, &timeout) < 0) + { + actx_error(actx, "could not comb kqueue: %m"); + return false; + } + + return true; +#else +#error comb_multiplexer is not implemented on this platform +#endif +} + /* * Enables or disables the timer in the multiplexer set. The timeout value is * in milliseconds (negative values disable the timer). @@ -1483,40 +1537,20 @@ set_timer(struct async_ctx *actx, long timeout) /* * Returns 1 if the timeout in the multiplexer set has expired since the last - * call to set_timer(), 0 if the timer is still running, or -1 (with an - * actx_error() report) if the timer cannot be queried. + * call to set_timer(), 0 if the timer is either still running or disarmed, or + * -1 (with an actx_error() report) if the timer cannot be queried. */ static int timer_expired(struct async_ctx *actx) { -#if defined(HAVE_SYS_EPOLL_H) - struct itimerspec spec = {0}; - - if (timerfd_gettime(actx->timerfd, &spec) < 0) - { - actx_error(actx, "getting timerfd value: %m"); - return -1; - } - - /* - * This implementation assumes we're using single-shot timers. If you - * change to using intervals, you'll need to reimplement this function - * too, possibly with the read() or select() interfaces for timerfd. - */ - Assert(spec.it_interval.tv_sec == 0 - && spec.it_interval.tv_nsec == 0); - - /* If the remaining time to expiration is zero, we're done. */ - return (spec.it_value.tv_sec == 0 - && spec.it_value.tv_nsec == 0); -#elif defined(HAVE_SYS_EVENT_H) +#if defined(HAVE_SYS_EPOLL_H) || defined(HAVE_SYS_EVENT_H) int res; - /* Is the timer queue ready? */ + /* Is the timer ready? */ res = PQsocketPoll(actx->timerfd, 1 /* forRead */ , 0, 0); if (res < 0) { - actx_error(actx, "checking kqueue for timeout: %m"); + actx_error(actx, "checking timer expiration: %m"); return -1; } @@ -1548,6 +1582,36 @@ register_timer(CURLM *curlm, long timeout, void *ctx) return 0; } +/* + * Removes any expired-timer event from the multiplexer. If was_expired is not + * NULL, it will contain whether or not the timer was expired at time of call. + */ +static bool +drain_timer_events(struct async_ctx *actx, bool *was_expired) +{ + int res; + + res = timer_expired(actx); + if (res < 0) + return false; + + if (res > 0) + { + /* + * Timer is expired. We could drain the event manually from the + * timerfd, but it's easier to simply disable it; that keeps the + * platform-specific code in set_timer(). + */ + if (!set_timer(actx, -1)) + return false; + } + + if (was_expired) + *was_expired = (res > 0); + + return true; +} + /* * Prints Curl request debugging information to stderr. * @@ -2751,38 +2815,64 @@ pg_fe_run_oauth_flow_impl(PGconn *conn) { PostgresPollingStatusType status; + /* + * Clear any expired timeout before calling back into + * Curl. Curl is not guaranteed to do this for us, because + * its API expects us to use single-shot (i.e. + * edge-triggered) timeouts, and ours are level-triggered + * via the mux. + * + * This can't be combined with the comb_multiplexer() call + * below: we might accidentally clear a short timeout that + * was both set and expired during the call to + * drive_request(). + */ + if (!drain_timer_events(actx, NULL)) + goto error_return; + + /* Move the request forward. */ status = drive_request(actx); if (status == PGRES_POLLING_FAILED) goto error_return; - else if (status != PGRES_POLLING_OK) - { - /* not done yet */ - return status; - } + else if (status == PGRES_POLLING_OK) + break; /* done! */ - break; + /* + * This request is still running. + * + * Make sure that stale events don't cause us to come back + * early. (Currently, this can occur only with kqueue.) If + * this is forgotten, the multiplexer can get stuck in a + * signaled state and we'll burn CPU cycles pointlessly. + */ + if (!comb_multiplexer(actx)) + goto error_return; + + return status; } case OAUTH_STEP_WAIT_INTERVAL: - - /* - * The client application is supposed to wait until our timer - * expires before calling PQconnectPoll() again, but that - * might not happen. To avoid sending a token request early, - * check the timer before continuing. - */ - if (!timer_expired(actx)) { - set_conn_altsock(conn, actx->timerfd); - return PGRES_POLLING_READING; - } + bool expired; - /* Disable the expired timer. */ - if (!set_timer(actx, -1)) - goto error_return; + /* + * The client application is supposed to wait until our + * timer expires before calling PQconnectPoll() again, but + * that might not happen. To avoid sending a token request + * early, check the timer before continuing. + */ + if (!drain_timer_events(actx, &expired)) + goto error_return; - break; + if (!expired) + { + set_conn_altsock(conn, actx->timerfd); + return PGRES_POLLING_READING; + } + + break; + } } /* @@ -2932,6 +3022,8 @@ PostgresPollingStatusType pg_fe_run_oauth_flow(PGconn *conn) { PostgresPollingStatusType result; + fe_oauth_state *state = conn_sasl_state(conn); + struct async_ctx *actx; #ifndef WIN32 sigset_t osigset; bool sigpipe_pending; @@ -2960,6 +3052,25 @@ pg_fe_run_oauth_flow(PGconn *conn) result = pg_fe_run_oauth_flow_impl(conn); + /* + * To assist with finding bugs in comb_multiplexer() and + * drain_timer_events(), when we're in debug mode, track the total number + * of calls to this function and print that at the end of the flow. + * + * Be careful that state->async_ctx could be NULL if early initialization + * fails during the first call. + */ + actx = state->async_ctx; + Assert(actx || result == PGRES_POLLING_FAILED); + + if (actx && actx->debugging) + { + actx->dbg_num_calls++; + if (result == PGRES_POLLING_OK || result == PGRES_POLLING_FAILED) + fprintf(stderr, "[libpq] total number of polls: %d\n", + actx->dbg_num_calls); + } + #ifndef WIN32 if (masked) { diff --git a/src/interfaces/libpq-oauth/t/001_oauth.pl b/src/interfaces/libpq-oauth/t/001_oauth.pl new file mode 100644 index 0000000000000..6c972056bbd49 --- /dev/null +++ b/src/interfaces/libpq-oauth/t/001_oauth.pl @@ -0,0 +1,24 @@ +# Copyright (c) 2025, PostgreSQL Global Development Group +use strict; +use warnings FATAL => 'all'; + +use PostgreSQL::Test::Utils; +use Test::More; + +# Defer entirely to the oauth_tests executable. stdout/err is routed through +# Test::More so that our logging infrastructure can handle it correctly. Using +# IPC::Run::new_chunker seems to help interleave the two streams a little better +# than without. +# +# TODO: prove can also deal with native executables itself, which we could +# probably make use of via PROVE_TESTS on the Makefile side. But the Meson setup +# calls Perl directly, which would require more code to work around... and +# there's still the matter of logging. +my $builder = Test::More->builder; +my $out = $builder->output; +my $err = $builder->failure_output; + +IPC::Run::run ['oauth_tests'], + '>' => (IPC::Run::new_chunker, sub { $out->print($_[0]) }), + '2>' => (IPC::Run::new_chunker, sub { $err->print($_[0]) }) + or die "oauth_tests returned $?"; diff --git a/src/interfaces/libpq-oauth/test-oauth-curl.c b/src/interfaces/libpq-oauth/test-oauth-curl.c new file mode 100644 index 0000000000000..8263aff2f4ad4 --- /dev/null +++ b/src/interfaces/libpq-oauth/test-oauth-curl.c @@ -0,0 +1,527 @@ +/* + * test-oauth-curl.c + * + * A unit test driver for libpq-oauth. This #includes oauth-curl.c, which lets + * the tests reference static functions and other internals. + * + * USE_ASSERT_CHECKING is required, to make it easy for tests to wrap + * must-succeed code as part of test setup. + * + * Copyright (c) 2025, PostgreSQL Global Development Group + */ + +#include "oauth-curl.c" + +#include + +#ifdef USE_ASSERT_CHECKING + +/* + * TAP Helpers + */ + +static int num_tests = 0; + +/* + * Reports ok/not ok to the TAP stream on stdout. + */ +#define ok(OK, TEST) \ + ok_impl(OK, TEST, #OK, __FILE__, __LINE__) + +static bool +ok_impl(bool ok, const char *test, const char *teststr, const char *file, int line) +{ + printf("%sok %d - %s\n", ok ? "" : "not ", ++num_tests, test); + + if (!ok) + { + printf("# at %s:%d:\n", file, line); + printf("# expression is false: %s\n", teststr); + } + + return ok; +} + +/* + * Like ok(this == that), but with more diagnostics on failure. + * + * Only works on ints, but luckily that's all we need here. Note that the much + * simpler-looking macro implementation + * + * is_diag(ok(THIS == THAT, TEST), THIS, #THIS, THAT, #THAT) + * + * suffers from multiple evaluation of the macro arguments... + */ +#define is(THIS, THAT, TEST) \ + do { \ + int this_ = (THIS), \ + that_ = (THAT); \ + is_diag( \ + ok_impl(this_ == that_, TEST, #THIS " == " #THAT, __FILE__, __LINE__), \ + this_, #THIS, that_, #THAT \ + ); \ + } while (0) + +static bool +is_diag(bool ok, int this, const char *thisstr, int that, const char *thatstr) +{ + if (!ok) + printf("# %s = %d; %s = %d\n", thisstr, this, thatstr, that); + + return ok; +} + +/* + * Utilities + */ + +/* + * Creates a partially-initialized async_ctx for the purposes of testing. Free + * with free_test_actx(). + */ +static struct async_ctx * +init_test_actx(void) +{ + struct async_ctx *actx; + + actx = calloc(1, sizeof(*actx)); + Assert(actx); + + actx->mux = PGINVALID_SOCKET; + actx->timerfd = -1; + actx->debugging = true; + + initPQExpBuffer(&actx->errbuf); + + Assert(setup_multiplexer(actx)); + + return actx; +} + +static void +free_test_actx(struct async_ctx *actx) +{ + termPQExpBuffer(&actx->errbuf); + + if (actx->mux != PGINVALID_SOCKET) + close(actx->mux); + if (actx->timerfd >= 0) + close(actx->timerfd); + + free(actx); +} + +static char dummy_buf[4 * 1024]; /* for fill_pipe/drain_pipe */ + +/* + * Writes to the write side of a pipe until it won't take any more data. Returns + * the amount written. + */ +static ssize_t +fill_pipe(int fd) +{ + int mode; + ssize_t written = 0; + + /* Don't block. */ + Assert((mode = fcntl(fd, F_GETFL)) != -1); + Assert(fcntl(fd, F_SETFL, mode | O_NONBLOCK) == 0); + + while (true) + { + ssize_t w; + + w = write(fd, dummy_buf, sizeof(dummy_buf)); + if (w < 0) + { + if (errno != EAGAIN && errno != EWOULDBLOCK) + { + perror("write to pipe"); + written = -1; + } + break; + } + + written += w; + } + + /* Reset the descriptor flags. */ + Assert(fcntl(fd, F_SETFD, mode) == 0); + + return written; +} + +/* + * Drains the requested amount of data from the read side of a pipe. + */ +static bool +drain_pipe(int fd, ssize_t n) +{ + Assert(n > 0); + + while (n) + { + size_t to_read = (n <= sizeof(dummy_buf)) ? n : sizeof(dummy_buf); + ssize_t drained; + + drained = read(fd, dummy_buf, to_read); + if (drained < 0) + { + perror("read from pipe"); + return false; + } + + n -= drained; + } + + return true; +} + +/* + * Tests whether the multiplexer is marked ready by the deadline. This is a + * macro so that file/line information makes sense during failures. + * + * NB: our current multiplexer implementations (epoll/kqueue) are *readable* + * when the underlying libcurl sockets are *writable*. This behavior is pinned + * here to record that expectation; PGRES_POLLING_READING is hardcoded + * throughout the flow and would need to be changed if a new multiplexer does + * something different. + */ +#define mux_is_ready(MUX, DEADLINE, TEST) \ + do { \ + int res_ = PQsocketPoll(MUX, 1, 0, DEADLINE); \ + Assert(res_ != -1); \ + ok(res_ > 0, "multiplexer is ready " TEST); \ + } while (0) + +/* + * The opposite of mux_is_ready(). + */ +#define mux_is_not_ready(MUX, TEST) \ + do { \ + int res_ = PQsocketPoll(MUX, 1, 0, 0); \ + Assert(res_ != -1); \ + is(res_, 0, "multiplexer is not ready " TEST); \ + } while (0) + +/* + * Test Suites + */ + +/* Per-suite timeout. Set via the PG_TEST_TIMEOUT_DEFAULT envvar. */ +static pg_usec_time_t timeout_us = 180 * 1000 * 1000; + +static void +test_set_timer(void) +{ + struct async_ctx *actx = init_test_actx(); + const pg_usec_time_t deadline = PQgetCurrentTimeUSec() + timeout_us; + + printf("# test_set_timer\n"); + + /* A zero-duration timer should result in a near-immediate ready signal. */ + Assert(set_timer(actx, 0)); + mux_is_ready(actx->mux, deadline, "when timer expires"); + is(timer_expired(actx), 1, "timer_expired() returns 1 when timer expires"); + + /* Resetting the timer far in the future should unset the ready signal. */ + Assert(set_timer(actx, INT_MAX)); + mux_is_not_ready(actx->mux, "when timer is reset to the future"); + is(timer_expired(actx), 0, "timer_expired() returns 0 with unexpired timer"); + + /* Setting another zero-duration timer should override the previous one. */ + Assert(set_timer(actx, 0)); + mux_is_ready(actx->mux, deadline, "when timer is re-expired"); + is(timer_expired(actx), 1, "timer_expired() returns 1 when timer is re-expired"); + + /* And disabling that timer should once again unset the ready signal. */ + Assert(set_timer(actx, -1)); + mux_is_not_ready(actx->mux, "when timer is unset"); + is(timer_expired(actx), 0, "timer_expired() returns 0 when timer is unset"); + + { + bool expired; + + /* Make sure drain_timer_events() functions correctly as well. */ + Assert(set_timer(actx, 0)); + mux_is_ready(actx->mux, deadline, "when timer is re-expired (drain_timer_events)"); + + Assert(drain_timer_events(actx, &expired)); + mux_is_not_ready(actx->mux, "when timer is drained after expiring"); + is(expired, 1, "drain_timer_events() reports expiration"); + is(timer_expired(actx), 0, "timer_expired() returns 0 after timer is drained"); + + /* A second drain should do nothing. */ + Assert(drain_timer_events(actx, &expired)); + mux_is_not_ready(actx->mux, "when timer is drained a second time"); + is(expired, 0, "drain_timer_events() reports no expiration"); + is(timer_expired(actx), 0, "timer_expired() still returns 0"); + } + + free_test_actx(actx); +} + +static void +test_register_socket(void) +{ + struct async_ctx *actx = init_test_actx(); + int pipefd[2]; + int rfd, + wfd; + bool bidirectional; + + /* Create a local pipe for communication. */ + Assert(pipe(pipefd) == 0); + rfd = pipefd[0]; + wfd = pipefd[1]; + + /* + * Some platforms (FreeBSD) implement bidirectional pipes, affecting the + * behavior of some of these tests. Store that knowledge for later. + */ + bidirectional = PQsocketPoll(rfd /* read */ , 0, 1 /* write */ , 0) > 0; + + /* + * This suite runs twice -- once using CURL_POLL_IN/CURL_POLL_OUT for + * read/write operations, respectively, and once using CURL_POLL_INOUT for + * both sides. + */ + for (int inout = 0; inout < 2; inout++) + { + const int in_event = inout ? CURL_POLL_INOUT : CURL_POLL_IN; + const int out_event = inout ? CURL_POLL_INOUT : CURL_POLL_OUT; + const pg_usec_time_t deadline = PQgetCurrentTimeUSec() + timeout_us; + size_t bidi_pipe_size = 0; /* silence compiler warnings */ + + printf("# test_register_socket %s\n", inout ? "(INOUT)" : ""); + + /* + * At the start of the test, the read side should be blocked and the + * write side should be open. (There's a mistake at the end of this + * loop otherwise.) + */ + Assert(PQsocketPoll(rfd, 1, 0, 0) == 0); + Assert(PQsocketPoll(wfd, 0, 1, 0) > 0); + + /* + * For bidirectional systems, emulate unidirectional behavior here by + * filling up the "read side" of the pipe. + */ + if (bidirectional) + Assert((bidi_pipe_size = fill_pipe(rfd)) > 0); + + /* Listen on the read side. The multiplexer shouldn't be ready yet. */ + Assert(register_socket(NULL, rfd, in_event, actx, NULL) == 0); + mux_is_not_ready(actx->mux, "when fd is not readable"); + + /* Writing to the pipe should result in a read-ready multiplexer. */ + Assert(write(wfd, "x", 1) == 1); + mux_is_ready(actx->mux, deadline, "when fd is readable"); + + /* + * Update the registration to wait on write events instead. The + * multiplexer should be unset. + */ + Assert(register_socket(NULL, rfd, CURL_POLL_OUT, actx, NULL) == 0); + mux_is_not_ready(actx->mux, "when waiting for writes on readable fd"); + + /* Re-register for read events. */ + Assert(register_socket(NULL, rfd, in_event, actx, NULL) == 0); + mux_is_ready(actx->mux, deadline, "when waiting for reads again"); + + /* Stop listening. The multiplexer should be unset. */ + Assert(register_socket(NULL, rfd, CURL_POLL_REMOVE, actx, NULL) == 0); + mux_is_not_ready(actx->mux, "when readable fd is removed"); + + /* Listen again. */ + Assert(register_socket(NULL, rfd, in_event, actx, NULL) == 0); + mux_is_ready(actx->mux, deadline, "when readable fd is re-added"); + + /* + * Draining the pipe should unset the multiplexer again, once the old + * event is cleared. + */ + Assert(drain_pipe(rfd, 1)); + Assert(comb_multiplexer(actx)); + mux_is_not_ready(actx->mux, "when fd is drained"); + + /* Undo any unidirectional emulation. */ + if (bidirectional) + Assert(drain_pipe(wfd, bidi_pipe_size)); + + /* Listen on the write side. An empty buffer should be writable. */ + Assert(register_socket(NULL, rfd, CURL_POLL_REMOVE, actx, NULL) == 0); + Assert(register_socket(NULL, wfd, out_event, actx, NULL) == 0); + mux_is_ready(actx->mux, deadline, "when fd is writable"); + + /* As above, wait on read events instead. */ + Assert(register_socket(NULL, wfd, CURL_POLL_IN, actx, NULL) == 0); + mux_is_not_ready(actx->mux, "when waiting for reads on writable fd"); + + /* Re-register for write events. */ + Assert(register_socket(NULL, wfd, out_event, actx, NULL) == 0); + mux_is_ready(actx->mux, deadline, "when waiting for writes again"); + + { + ssize_t written; + + /* + * Fill the pipe. Once the old writable event is cleared, the mux + * should not be ready. + */ + Assert((written = fill_pipe(wfd)) > 0); + printf("# pipe buffer is full at %zd bytes\n", written); + + Assert(comb_multiplexer(actx)); + mux_is_not_ready(actx->mux, "when fd buffer is full"); + + /* Drain the pipe again. */ + Assert(drain_pipe(rfd, written)); + mux_is_ready(actx->mux, deadline, "when fd buffer is drained"); + } + + /* Stop listening. */ + Assert(register_socket(NULL, wfd, CURL_POLL_REMOVE, actx, NULL) == 0); + mux_is_not_ready(actx->mux, "when fd is removed"); + + /* Make sure an expired timer doesn't interfere with event draining. */ + { + bool expired; + + /* Make the rfd appear unidirectional if necessary. */ + if (bidirectional) + Assert((bidi_pipe_size = fill_pipe(rfd)) > 0); + + /* Set the timer and wait for it to expire. */ + Assert(set_timer(actx, 0)); + Assert(PQsocketPoll(actx->timerfd, 1, 0, deadline) > 0); + is(timer_expired(actx), 1, "timer is expired"); + + /* Register for read events and make the fd readable. */ + Assert(register_socket(NULL, rfd, in_event, actx, NULL) == 0); + Assert(write(wfd, "x", 1) == 1); + mux_is_ready(actx->mux, deadline, "when fd is readable and timer expired"); + + /* + * Draining the pipe should unset the multiplexer again, once the + * old event is drained and the timer is reset. + * + * Order matters, since comb_multiplexer() doesn't have to remove + * stale events when active events exist. Follow the call sequence + * used in the code: drain the timer expiration, drain the pipe, + * then clear the stale events. + */ + Assert(drain_timer_events(actx, &expired)); + Assert(drain_pipe(rfd, 1)); + Assert(comb_multiplexer(actx)); + + is(expired, 1, "drain_timer_events() reports expiration"); + is(timer_expired(actx), 0, "timer is no longer expired"); + mux_is_not_ready(actx->mux, "when fd is drained and timer reset"); + + /* Stop listening. */ + Assert(register_socket(NULL, rfd, CURL_POLL_REMOVE, actx, NULL) == 0); + + /* Undo any unidirectional emulation. */ + if (bidirectional) + Assert(drain_pipe(wfd, bidi_pipe_size)); + } + + /* Ensure comb_multiplexer() can handle multiple stale events. */ + { + int rfd2, + wfd2; + + /* Create a second local pipe. */ + Assert(pipe(pipefd) == 0); + rfd2 = pipefd[0]; + wfd2 = pipefd[1]; + + /* Make both rfds appear unidirectional if necessary. */ + if (bidirectional) + { + Assert((bidi_pipe_size = fill_pipe(rfd)) > 0); + Assert(fill_pipe(rfd2) == bidi_pipe_size); + } + + /* Register for read events on both fds, and make them readable. */ + Assert(register_socket(NULL, rfd, in_event, actx, NULL) == 0); + Assert(register_socket(NULL, rfd2, in_event, actx, NULL) == 0); + + Assert(write(wfd, "x", 1) == 1); + Assert(write(wfd2, "x", 1) == 1); + + mux_is_ready(actx->mux, deadline, "when two fds are readable"); + + /* + * Drain both fds. comb_multiplexer() should then ensure that the + * mux is no longer readable. + */ + Assert(drain_pipe(rfd, 1)); + Assert(drain_pipe(rfd2, 1)); + Assert(comb_multiplexer(actx)); + mux_is_not_ready(actx->mux, "when two fds are drained"); + + /* Stop listening. */ + Assert(register_socket(NULL, rfd, CURL_POLL_REMOVE, actx, NULL) == 0); + Assert(register_socket(NULL, rfd2, CURL_POLL_REMOVE, actx, NULL) == 0); + + /* Undo any unidirectional emulation. */ + if (bidirectional) + { + Assert(drain_pipe(wfd, bidi_pipe_size)); + Assert(drain_pipe(wfd2, bidi_pipe_size)); + } + + close(rfd2); + close(wfd2); + } + } + + close(rfd); + close(wfd); + free_test_actx(actx); +} + +int +main(int argc, char *argv[]) +{ + const char *timeout; + + /* Grab the default timeout. */ + timeout = getenv("PG_TEST_TIMEOUT_DEFAULT"); + if (timeout) + { + int timeout_s = atoi(timeout); + + if (timeout_s > 0) + timeout_us = timeout_s * 1000 * 1000; + } + + /* + * Set up line buffering for our output, to let stderr interleave in the + * log files. + */ + setvbuf(stdout, NULL, PG_IOLBF, 0); + + test_set_timer(); + test_register_socket(); + + printf("1..%d\n", num_tests); + return 0; +} + +#else /* !USE_ASSERT_CHECKING */ + +/* + * Skip the test suite when we don't have assertions. + */ +int +main(int argc, char *argv[]) +{ + printf("1..0 # skip: cassert is not enabled\n"); + + return 0; +} + +#endif /* USE_ASSERT_CHECKING */ diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile index 47d6781150944..da6650066d46e 100644 --- a/src/interfaces/libpq/Makefile +++ b/src/interfaces/libpq/Makefile @@ -24,7 +24,7 @@ NAME= pq SO_MAJOR_VERSION= 5 SO_MINOR_VERSION= $(MAJORVERSION) -override CPPFLAGS := -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port +override CPPFLAGS := -I$(srcdir) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port $(CPPFLAGS) ifneq ($(PORTNAME), win32) override CFLAGS += $(PTHREAD_CFLAGS) endif diff --git a/src/interfaces/libpq/fe-cancel.c b/src/interfaces/libpq/fe-cancel.c index 65517c5703bca..c872a0267f089 100644 --- a/src/interfaces/libpq/fe-cancel.c +++ b/src/interfaces/libpq/fe-cancel.c @@ -379,7 +379,24 @@ PQgetCancel(PGconn *conn) /* Check that we have received a cancellation key */ if (conn->be_cancel_key_len == 0) - return NULL; + { + /* + * In case there is no cancel key, return an all-zero PGcancel object. + * Actually calling PQcancel on this will fail, but we allow creating + * the PGcancel object anyway. Arguably it would be better return NULL + * to indicate that cancellation is not possible, but there'd be no + * way for the caller to distinguish "out of memory" from "server did + * not send a cancel key". Also, this is how PGgetCancel() has always + * behaved, and if we changed it, some clients would stop working + * altogether with servers that don't support cancellation. (The + * modern PQcancelCreate() function returns a failed connection object + * instead.) + * + * The returned dummy object has cancel_pkt_len == 0; we check for + * that in PQcancel() to identify it as a dummy. + */ + return calloc(1, sizeof(PGcancel)); + } cancel_req_len = offsetof(CancelRequestPacket, cancelAuthCode) + conn->be_cancel_key_len; cancel = malloc(offsetof(PGcancel, cancel_req) + cancel_req_len); @@ -544,6 +561,15 @@ PQcancel(PGcancel *cancel, char *errbuf, int errbufsize) return false; } + if (cancel->cancel_pkt_len == 0) + { + /* This is a dummy PGcancel object, see PQgetCancel */ + strlcpy(errbuf, "PQcancel() -- no cancellation key received", errbufsize); + /* strlcpy probably doesn't change errno, but be paranoid */ + SOCK_ERRNO_SET(save_errno); + return false; + } + /* * We need to open a temporary connection to the postmaster. Do this with * only kernel calls. diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index afa85d9fca961..a3d12931fff30 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -5494,6 +5494,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options, *entry; struct berval **values; LDAP_TIMEVAL time = {PGLDAP_TIMEOUT, 0}; + int ldapversion = LDAP_VERSION3; if ((url = strdup(purl)) == NULL) { @@ -5625,6 +5626,15 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options, return 3; } + if ((rc = ldap_set_option(ld, LDAP_OPT_PROTOCOL_VERSION, &ldapversion)) != LDAP_SUCCESS) + { + libpq_append_error(errorMessage, "could not set LDAP protocol version: %s", + ldap_err2string(rc)); + free(url); + ldap_unbind(ld); + return 3; + } + /* * Perform an explicit anonymous bind. * diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c index 4256ae5c0cc5f..0b1e37ec30bbc 100644 --- a/src/interfaces/libpq/fe-exec.c +++ b/src/interfaces/libpq/fe-exec.c @@ -1076,8 +1076,12 @@ pqSaveMessageField(PGresult *res, char code, const char *value) /* * pqSaveParameterStatus - remember parameter status sent by backend + * + * Returns 1 on success, 0 on out-of-memory. (Note that on out-of-memory, we + * have already released the old value of the parameter, if any. The only + * really safe way to recover is to terminate the connection.) */ -void +int pqSaveParameterStatus(PGconn *conn, const char *name, const char *value) { pgParameterStatus *pstatus; @@ -1119,6 +1123,11 @@ pqSaveParameterStatus(PGconn *conn, const char *name, const char *value) pstatus->next = conn->pstatus; conn->pstatus = pstatus; } + else + { + /* out of memory */ + return 0; + } /* * Save values of settings that are of interest to libpq in fields of the @@ -1190,6 +1199,8 @@ pqSaveParameterStatus(PGconn *conn, const char *name, const char *value) { conn->scram_sha_256_iterations = atoi(value); } + + return 1; } diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index 1599de757d130..da7a8db68c80c 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -43,6 +43,7 @@ (id) == PqMsg_RowDescription) +static void handleFatalError(PGconn *conn); static void handleSyncLoss(PGconn *conn, char id, int msgLength); static int getRowDescriptions(PGconn *conn, int msgLength); static int getParamDescriptions(PGconn *conn, int msgLength); @@ -120,12 +121,12 @@ pqParseInput3(PGconn *conn) conn)) { /* - * XXX add some better recovery code... plan is to skip over - * the message using its length, then report an error. For the - * moment, just treat this like loss of sync (which indeed it - * might be!) + * Abandon the connection. There's not much else we can + * safely do; we can't just ignore the message or we could + * miss important changes to the connection state. + * pqCheckInBufferSpace() already reported the error. */ - handleSyncLoss(conn, id, msgLength); + handleFatalError(conn); } return; } @@ -456,6 +457,11 @@ pqParseInput3(PGconn *conn) /* Normal case: parsing agrees with specified length */ pqParseDone(conn, conn->inCursor); } + else if (conn->error_result && conn->status == CONNECTION_BAD) + { + /* The connection was abandoned and we already reported it */ + return; + } else { /* Trouble --- report it */ @@ -470,15 +476,14 @@ pqParseInput3(PGconn *conn) } /* - * handleSyncLoss: clean up after loss of message-boundary sync + * handleFatalError: clean up after a nonrecoverable error * - * There isn't really a lot we can do here except abandon the connection. + * This is for errors where we need to abandon the connection. The caller has + * already saved the error message in conn->errorMessage. */ static void -handleSyncLoss(PGconn *conn, char id, int msgLength) +handleFatalError(PGconn *conn) { - libpq_append_conn_error(conn, "lost synchronization with server: got message type \"%c\", length %d", - id, msgLength); /* build an error result holding the error message */ pqSaveErrorResult(conn); conn->asyncStatus = PGASYNC_READY; /* drop out of PQgetResult wait loop */ @@ -487,6 +492,19 @@ handleSyncLoss(PGconn *conn, char id, int msgLength) conn->status = CONNECTION_BAD; /* No more connection to backend */ } +/* + * handleSyncLoss: clean up after loss of message-boundary sync + * + * There isn't really a lot we can do here except abandon the connection. + */ +static void +handleSyncLoss(PGconn *conn, char id, int msgLength) +{ + libpq_append_conn_error(conn, "lost synchronization with server: got message type \"%c\", length %d", + id, msgLength); + handleFatalError(conn); +} + /* * parseInput subroutine to read a 'T' (row descriptions) message. * We'll build a new PGresult structure (unless called for a Describe @@ -1519,7 +1537,11 @@ getParameterStatus(PGconn *conn) return EOF; } /* And save it */ - pqSaveParameterStatus(conn, conn->workBuffer.data, valueBuf.data); + if (!pqSaveParameterStatus(conn, conn->workBuffer.data, valueBuf.data)) + { + libpq_append_conn_error(conn, "out of memory"); + handleFatalError(conn); + } termPQExpBuffer(&valueBuf); return 0; } @@ -1547,12 +1569,33 @@ getBackendKeyData(PGconn *conn, int msgLength) cancel_key_len = 5 + msgLength - (conn->inCursor - conn->inStart); + if (cancel_key_len != 4 && conn->pversion == PG_PROTOCOL(3, 0)) + { + libpq_append_conn_error(conn, "received invalid BackendKeyData message: cancel key with length %d not allowed in protocol version 3.0 (must be 4 bytes)", cancel_key_len); + handleFatalError(conn); + return 0; + } + + if (cancel_key_len < 4) + { + libpq_append_conn_error(conn, "received invalid BackendKeyData message: cancel key with length %d is too short (minimum 4 bytes)", cancel_key_len); + handleFatalError(conn); + return 0; + } + + if (cancel_key_len > 256) + { + libpq_append_conn_error(conn, "received invalid BackendKeyData message: cancel key with length %d is too long (maximum 256 bytes)", cancel_key_len); + handleFatalError(conn); + return 0; + } + conn->be_cancel_key = malloc(cancel_key_len); if (conn->be_cancel_key == NULL) { libpq_append_conn_error(conn, "out of memory"); - /* discard the message */ - return EOF; + handleFatalError(conn); + return 0; } if (pqGetnchar(conn->be_cancel_key, cancel_key_len, conn)) { @@ -1589,7 +1632,17 @@ getNotify(PGconn *conn) /* must save name while getting extra string */ svname = strdup(conn->workBuffer.data); if (!svname) - return EOF; + { + /* + * Notify messages can arrive at any state, so we cannot associate the + * error with any particular query. There's no way to return back an + * "async error", so the best we can do is drop the connection. That + * seems better than silently ignoring the notification. + */ + libpq_append_conn_error(conn, "out of memory"); + handleFatalError(conn); + return 0; + } if (pqGets(&conn->workBuffer, conn)) { free(svname); @@ -1604,21 +1657,26 @@ getNotify(PGconn *conn) nmlen = strlen(svname); extralen = strlen(conn->workBuffer.data); newNotify = (PGnotify *) malloc(sizeof(PGnotify) + nmlen + extralen + 2); - if (newNotify) - { - newNotify->relname = (char *) newNotify + sizeof(PGnotify); - strcpy(newNotify->relname, svname); - newNotify->extra = newNotify->relname + nmlen + 1; - strcpy(newNotify->extra, conn->workBuffer.data); - newNotify->be_pid = be_pid; - newNotify->next = NULL; - if (conn->notifyTail) - conn->notifyTail->next = newNotify; - else - conn->notifyHead = newNotify; - conn->notifyTail = newNotify; + if (!newNotify) + { + free(svname); + libpq_append_conn_error(conn, "out of memory"); + handleFatalError(conn); + return 0; } + newNotify->relname = (char *) newNotify + sizeof(PGnotify); + strcpy(newNotify->relname, svname); + newNotify->extra = newNotify->relname + nmlen + 1; + strcpy(newNotify->extra, conn->workBuffer.data); + newNotify->be_pid = be_pid; + newNotify->next = NULL; + if (conn->notifyTail) + conn->notifyTail->next = newNotify; + else + conn->notifyHead = newNotify; + conn->notifyTail = newNotify; + free(svname); return 0; } @@ -1752,12 +1810,12 @@ getCopyDataMessage(PGconn *conn) conn)) { /* - * XXX add some better recovery code... plan is to skip over - * the message using its length, then report an error. For the - * moment, just treat this like loss of sync (which indeed it - * might be!) + * Abandon the connection. There's not much else we can + * safely do; we can't just ignore the message or we could + * miss important changes to the connection state. + * pqCheckInBufferSpace() already reported the error. */ - handleSyncLoss(conn, id, msgLength); + handleFatalError(conn); return -2; } return 0; @@ -2186,12 +2244,12 @@ pqFunctionCall3(PGconn *conn, Oid fnid, conn)) { /* - * XXX add some better recovery code... plan is to skip over - * the message using its length, then report an error. For the - * moment, just treat this like loss of sync (which indeed it - * might be!) + * Abandon the connection. There's not much else we can + * safely do; we can't just ignore the message or we could + * miss important changes to the connection state. + * pqCheckInBufferSpace() already reported the error. */ - handleSyncLoss(conn, id, msgLength); + handleFatalError(conn); break; } continue; @@ -2204,7 +2262,7 @@ pqFunctionCall3(PGconn *conn, Oid fnid, */ switch (id) { - case 'V': /* function result */ + case PqMsg_FunctionCallResponse: if (pqGetInt(actual_result_len, 4, conn)) continue; if (*actual_result_len != -1) @@ -2225,22 +2283,22 @@ pqFunctionCall3(PGconn *conn, Oid fnid, /* correctly finished function result message */ status = PGRES_COMMAND_OK; break; - case 'E': /* error return */ + case PqMsg_ErrorResponse: if (pqGetErrorNotice3(conn, true)) continue; status = PGRES_FATAL_ERROR; break; - case 'A': /* notify message */ + case PqMsg_NotificationResponse: /* handle notify and go back to processing return values */ if (getNotify(conn)) continue; break; - case 'N': /* notice */ + case PqMsg_NoticeResponse: /* handle notice and go back to processing return values */ if (pqGetErrorNotice3(conn, false)) continue; break; - case 'Z': /* backend is ready for new query */ + case PqMsg_ReadyForQuery: if (getReadyForQuery(conn)) continue; @@ -2272,7 +2330,7 @@ pqFunctionCall3(PGconn *conn, Oid fnid, } /* and we're out */ return pqPrepareAsyncResult(conn); - case 'S': /* parameter status */ + case PqMsg_ParameterStatus: if (getParameterStatus(conn)) continue; break; diff --git a/src/interfaces/libpq/fe-trace.c b/src/interfaces/libpq/fe-trace.c index a45f0d855871b..fae5b47e55161 100644 --- a/src/interfaces/libpq/fe-trace.c +++ b/src/interfaces/libpq/fe-trace.c @@ -113,7 +113,7 @@ pqTraceOutputByte1(FILE *pfdebug, const char *data, int *cursor) * that completes ErrorResponse and NoticeResponse messages. */ if (!isprint((unsigned char) *v)) - fprintf(pfdebug, " \\x%02x", *v); + fprintf(pfdebug, " \\x%02x", (unsigned char) *v); else fprintf(pfdebug, " %c", *v); *cursor += 1; @@ -212,7 +212,7 @@ pqTraceOutputNchar(FILE *pfdebug, int len, const char *data, int *cursor, bool s else { fwrite(v + next, 1, i - next, pfdebug); - fprintf(pfdebug, "\\x%02x", v[i]); + fprintf(pfdebug, "\\x%02x", (unsigned char) v[i]); next = i + 1; } } diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index a701c25038a75..02c114f140520 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -746,7 +746,7 @@ extern PGresult *pqPrepareAsyncResult(PGconn *conn); extern void pqInternalNotice(const PGNoticeHooks *hooks, const char *fmt,...) pg_attribute_printf(2, 3); extern void pqSaveMessageField(PGresult *res, char code, const char *value); -extern void pqSaveParameterStatus(PGconn *conn, const char *name, +extern int pqSaveParameterStatus(PGconn *conn, const char *name, const char *value); extern int pqRowProcessor(PGconn *conn, const char **errmsgp); extern void pqCommandQueueAdvance(PGconn *conn, bool isReadyForQuery, diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 29cb4d7e47f80..73ba1748fe0a8 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -1453,7 +1453,7 @@ plperl_sv_to_literal(SV *sv, char *fqtypename) check_spi_usage_allowed(); - typid = DirectFunctionCall1(regtypein, CStringGetDatum(fqtypename)); + typid = DatumGetObjectId(DirectFunctionCall1(regtypein, CStringGetDatum(fqtypename))); if (!OidIsValid(typid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -2569,13 +2569,13 @@ plperl_trigger_handler(PG_FUNCTION_ARGS) TriggerData *trigdata = ((TriggerData *) fcinfo->context); if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) - retval = (Datum) trigdata->tg_trigtuple; + retval = PointerGetDatum(trigdata->tg_trigtuple); else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) - retval = (Datum) trigdata->tg_newtuple; + retval = PointerGetDatum(trigdata->tg_newtuple); else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) - retval = (Datum) trigdata->tg_trigtuple; + retval = PointerGetDatum(trigdata->tg_trigtuple); else if (TRIGGER_FIRED_BY_TRUNCATE(trigdata->tg_event)) - retval = (Datum) trigdata->tg_trigtuple; + retval = PointerGetDatum(trigdata->tg_trigtuple); else retval = (Datum) 0; /* can this happen? */ } diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index ee961425a5b7e..f6976689a6927 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -177,6 +177,7 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo, yyscan_t scanner; Datum prosrcdatum; char *proc_source; + char *proc_signature; HeapTuple typeTup; Form_pg_type typeStruct; PLpgSQL_variable *var; @@ -223,6 +224,9 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo, plpgsql_check_syntax = forValidator; plpgsql_curr_compile = function; + /* format_procedure leaks memory, so run it in temp context */ + proc_signature = format_procedure(fcinfo->flinfo->fn_oid); + /* * All the permanent output of compilation (e.g. parse tree) is kept in a * per-function memory context, so it can be reclaimed easily. @@ -237,7 +241,7 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo, ALLOCSET_DEFAULT_SIZES); plpgsql_compile_tmp_cxt = MemoryContextSwitchTo(func_cxt); - function->fn_signature = format_procedure(fcinfo->flinfo->fn_oid); + function->fn_signature = pstrdup(proc_signature); MemoryContextSetIdentifier(func_cxt, function->fn_signature); function->fn_oid = fcinfo->flinfo->fn_oid; function->fn_input_collation = fcinfo->fncollation; @@ -1673,6 +1677,11 @@ plpgsql_parse_wordrowtype(char *ident) { Oid classOid; Oid typOid; + TypeName *typName; + MemoryContext oldCxt; + + /* Avoid memory leaks in long-term function context */ + oldCxt = MemoryContextSwitchTo(plpgsql_compile_tmp_cxt); /* * Look up the relation. Note that because relation rowtypes have the @@ -1695,9 +1704,12 @@ plpgsql_parse_wordrowtype(char *ident) errmsg("relation \"%s\" does not have a composite type", ident))); + typName = makeTypeName(ident); + + MemoryContextSwitchTo(oldCxt); + /* Build and return the row type struct */ - return plpgsql_build_datatype(typOid, -1, InvalidOid, - makeTypeName(ident)); + return plpgsql_build_datatype(typOid, -1, InvalidOid, typName); } /* ---------- @@ -1711,6 +1723,7 @@ plpgsql_parse_cwordrowtype(List *idents) Oid classOid; Oid typOid; RangeVar *relvar; + TypeName *typName; MemoryContext oldCxt; /* @@ -1733,11 +1746,12 @@ plpgsql_parse_cwordrowtype(List *idents) errmsg("relation \"%s\" does not have a composite type", relvar->relname))); + typName = makeTypeNameFromNameList(idents); + MemoryContextSwitchTo(oldCxt); /* Build and return the row type struct */ - return plpgsql_build_datatype(typOid, -1, InvalidOid, - makeTypeNameFromNameList(idents)); + return plpgsql_build_datatype(typOid, -1, InvalidOid, typName); } /* @@ -1952,6 +1966,8 @@ plpgsql_build_recfield(PLpgSQL_rec *rec, const char *fldname) * origtypname is the parsed form of what the user wrote as the type name. * It can be NULL if the type could not be a composite type, or if it was * identified by OID to begin with (e.g., it's a function argument type). + * origtypname is in short-lived storage and must be copied if we choose + * to incorporate it into the function's parse tree. */ PLpgSQL_type * plpgsql_build_datatype(Oid typeOid, int32 typmod, @@ -2070,7 +2086,7 @@ build_datatype(HeapTuple typeTup, int32 typmod, errmsg("type %s is not composite", format_type_be(typ->typoid)))); - typ->origtypname = origtypname; + typ->origtypname = copyObject(origtypname); typ->tcache = typentry; typ->tupdesc_id = typentry->tupDesc_identifier; } diff --git a/src/pl/plpgsql/src/pl_gram.y b/src/pl/plpgsql/src/pl_gram.y index 7b672ea5179a6..17568d82554d2 100644 --- a/src/pl/plpgsql/src/pl_gram.y +++ b/src/pl/plpgsql/src/pl_gram.y @@ -3853,6 +3853,7 @@ parse_datatype(const char *string, int location, yyscan_t yyscanner) int32 typmod; sql_error_callback_arg cbarg; ErrorContextCallback syntax_errcontext; + MemoryContext oldCxt; cbarg.location = location; cbarg.yyscanner = yyscanner; @@ -3862,9 +3863,14 @@ parse_datatype(const char *string, int location, yyscan_t yyscanner) syntax_errcontext.previous = error_context_stack; error_context_stack = &syntax_errcontext; - /* Let the main parser try to parse it under standard SQL rules */ + /* + * Let the main parser try to parse it under standard SQL rules. The + * parser leaks memory, so run it in temp context. + */ + oldCxt = MemoryContextSwitchTo(plpgsql_compile_tmp_cxt); typeName = typeStringToTypeName(string, NULL); typenameTypeIdAndMod(NULL, typeName, &type_id, &typmod); + MemoryContextSwitchTo(oldCxt); /* Restore former ereport callback */ error_context_stack = syntax_errcontext.previous; diff --git a/src/pl/plpython/Makefile b/src/pl/plpython/Makefile index f959083a0bdec..25f295c3709e2 100644 --- a/src/pl/plpython/Makefile +++ b/src/pl/plpython/Makefile @@ -11,7 +11,7 @@ ifeq ($(PORTNAME), win32) override python_libspec = endif -override CPPFLAGS := -I. -I$(srcdir) $(python_includespec) $(CPPFLAGS) +override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(python_includespec) rpathdir = $(python_libdir) diff --git a/src/pl/plpython/expected/plpython_trigger.out b/src/pl/plpython/expected/plpython_trigger.out index 64eab2fa3f4b5..bd35b220c5eda 100644 --- a/src/pl/plpython/expected/plpython_trigger.out +++ b/src/pl/plpython/expected/plpython_trigger.out @@ -646,3 +646,30 @@ SELECT * FROM recursive_trigger_test; 1 | 2 (2 rows) +-- event triggers +CREATE OR REPLACE FUNCTION pysnitch() RETURNS event_trigger +LANGUAGE plpython3u +AS $$ + plpy.notice("TD[event] => " + TD["event"] + " ; TD[tag] => " + TD["tag"]); +$$; +CREATE EVENT TRIGGER python_a_snitch ON ddl_command_start + EXECUTE PROCEDURE pysnitch(); +CREATE EVENT TRIGGER python_b_snitch ON ddl_command_end + EXECUTE PROCEDURE pysnitch(); +CREATE OR REPLACE FUNCTION foobar() RETURNS int LANGUAGE sql AS $$SELECT 1;$$; +NOTICE: TD[event] => ddl_command_start ; TD[tag] => CREATE FUNCTION +NOTICE: TD[event] => ddl_command_end ; TD[tag] => CREATE FUNCTION +ALTER FUNCTION foobar() COST 77; +NOTICE: TD[event] => ddl_command_start ; TD[tag] => ALTER FUNCTION +NOTICE: TD[event] => ddl_command_end ; TD[tag] => ALTER FUNCTION +DROP FUNCTION foobar(); +NOTICE: TD[event] => ddl_command_start ; TD[tag] => DROP FUNCTION +NOTICE: TD[event] => ddl_command_end ; TD[tag] => DROP FUNCTION +CREATE TABLE foo(); +NOTICE: TD[event] => ddl_command_start ; TD[tag] => CREATE TABLE +NOTICE: TD[event] => ddl_command_end ; TD[tag] => CREATE TABLE +DROP TABLE foo; +NOTICE: TD[event] => ddl_command_start ; TD[tag] => DROP TABLE +NOTICE: TD[event] => ddl_command_end ; TD[tag] => DROP TABLE +DROP EVENT TRIGGER python_a_snitch; +DROP EVENT TRIGGER python_b_snitch; diff --git a/src/pl/plpython/plpy_exec.c b/src/pl/plpython/plpy_exec.c index 28fbd443b98c9..fd06b9e0e4e98 100644 --- a/src/pl/plpython/plpy_exec.c +++ b/src/pl/plpython/plpy_exec.c @@ -9,6 +9,7 @@ #include "access/htup_details.h" #include "access/xact.h" #include "catalog/pg_type.h" +#include "commands/event_trigger.h" #include "commands/trigger.h" #include "executor/spi.h" #include "funcapi.h" @@ -427,6 +428,47 @@ PLy_exec_trigger(FunctionCallInfo fcinfo, PLyProcedure *proc) return rv; } +/* + * event trigger subhandler + */ +void +PLy_exec_event_trigger(FunctionCallInfo fcinfo, PLyProcedure *proc) +{ + EventTriggerData *tdata; + PyObject *volatile pltdata = NULL; + + Assert(CALLED_AS_EVENT_TRIGGER(fcinfo)); + tdata = (EventTriggerData *) fcinfo->context; + + PG_TRY(); + { + PyObject *pltevent, + *plttag; + + pltdata = PyDict_New(); + if (!pltdata) + PLy_elog(ERROR, NULL); + + pltevent = PLyUnicode_FromString(tdata->event); + PyDict_SetItemString(pltdata, "event", pltevent); + Py_DECREF(pltevent); + + plttag = PLyUnicode_FromString(GetCommandTagName(tdata->tag)); + PyDict_SetItemString(pltdata, "tag", plttag); + Py_DECREF(plttag); + + PLy_procedure_call(proc, "TD", pltdata); + + if (SPI_finish() != SPI_OK_FINISH) + elog(ERROR, "SPI_finish() failed"); + } + PG_FINALLY(); + { + Py_XDECREF(pltdata); + } + PG_END_TRY(); +} + /* helper functions for Python code execution */ static PyObject * @@ -509,7 +551,7 @@ PLy_function_save_args(PLyProcedure *proc) Py_XINCREF(result->args); /* If it's a trigger, also save "TD" */ - if (proc->is_trigger) + if (proc->is_trigger == PLPY_TRIGGER) { result->td = PyDict_GetItemString(proc->globals, "TD"); Py_XINCREF(result->td); diff --git a/src/pl/plpython/plpy_exec.h b/src/pl/plpython/plpy_exec.h index 68da1ffcb2ef1..f35eabbd8ee8e 100644 --- a/src/pl/plpython/plpy_exec.h +++ b/src/pl/plpython/plpy_exec.h @@ -9,5 +9,6 @@ extern Datum PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc); extern HeapTuple PLy_exec_trigger(FunctionCallInfo fcinfo, PLyProcedure *proc); +extern void PLy_exec_event_trigger(FunctionCallInfo fcinfo, PLyProcedure *proc); #endif /* PLPY_EXEC_H */ diff --git a/src/pl/plpython/plpy_main.c b/src/pl/plpython/plpy_main.c index f36eadbadc66d..70fc2c9257a89 100644 --- a/src/pl/plpython/plpy_main.c +++ b/src/pl/plpython/plpy_main.c @@ -9,6 +9,7 @@ #include "access/htup_details.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" +#include "commands/event_trigger.h" #include "commands/trigger.h" #include "executor/spi.h" #include "miscadmin.h" @@ -38,7 +39,7 @@ PG_FUNCTION_INFO_V1(plpython3_call_handler); PG_FUNCTION_INFO_V1(plpython3_inline_handler); -static bool PLy_procedure_is_trigger(Form_pg_proc procStruct); +static PLyTrigType PLy_procedure_is_trigger(Form_pg_proc procStruct); static void plpython_error_callback(void *arg); static void plpython_inline_error_callback(void *arg); static void PLy_init_interp(void); @@ -163,7 +164,7 @@ plpython3_validator(PG_FUNCTION_ARGS) Oid funcoid = PG_GETARG_OID(0); HeapTuple tuple; Form_pg_proc procStruct; - bool is_trigger; + PLyTrigType is_trigger; if (!CheckFunctionValidatorAccess(fcinfo->flinfo->fn_oid, funcoid)) PG_RETURN_VOID(); @@ -235,14 +236,21 @@ plpython3_call_handler(PG_FUNCTION_ARGS) Relation tgrel = ((TriggerData *) fcinfo->context)->tg_relation; HeapTuple trv; - proc = PLy_procedure_get(funcoid, RelationGetRelid(tgrel), true); + proc = PLy_procedure_get(funcoid, RelationGetRelid(tgrel), PLPY_TRIGGER); exec_ctx->curr_proc = proc; trv = PLy_exec_trigger(fcinfo, proc); retval = PointerGetDatum(trv); } + else if (CALLED_AS_EVENT_TRIGGER(fcinfo)) + { + proc = PLy_procedure_get(funcoid, InvalidOid, PLPY_EVENT_TRIGGER); + exec_ctx->curr_proc = proc; + PLy_exec_event_trigger(fcinfo, proc); + retval = (Datum) 0; + } else { - proc = PLy_procedure_get(funcoid, InvalidOid, false); + proc = PLy_procedure_get(funcoid, InvalidOid, PLPY_NOT_TRIGGER); exec_ctx->curr_proc = proc; retval = PLy_exec_function(fcinfo, proc); } @@ -336,10 +344,25 @@ plpython3_inline_handler(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -static bool +static PLyTrigType PLy_procedure_is_trigger(Form_pg_proc procStruct) { - return (procStruct->prorettype == TRIGGEROID); + PLyTrigType ret; + + switch (procStruct->prorettype) + { + case TRIGGEROID: + ret = PLPY_TRIGGER; + break; + case EVENT_TRIGGEROID: + ret = PLPY_EVENT_TRIGGER; + break; + default: + ret = PLPY_NOT_TRIGGER; + break; + } + + return ret; } static void diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c index c176d24e80118..22d9ef0fe0677 100644 --- a/src/pl/plpython/plpy_procedure.c +++ b/src/pl/plpython/plpy_procedure.c @@ -21,7 +21,7 @@ static HTAB *PLy_procedure_cache = NULL; -static PLyProcedure *PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger); +static PLyProcedure *PLy_procedure_create(HeapTuple procTup, Oid fn_oid, PLyTrigType is_trigger); static bool PLy_procedure_valid(PLyProcedure *proc, HeapTuple procTup); static char *PLy_procedure_munge_source(const char *name, const char *src); @@ -63,15 +63,20 @@ PLy_procedure_name(PLyProcedure *proc) * be used with, so no sensible fn_rel can be passed. */ PLyProcedure * -PLy_procedure_get(Oid fn_oid, Oid fn_rel, bool is_trigger) +PLy_procedure_get(Oid fn_oid, Oid fn_rel, PLyTrigType is_trigger) { - bool use_cache = !(is_trigger && fn_rel == InvalidOid); + bool use_cache; HeapTuple procTup; PLyProcedureKey key; PLyProcedureEntry *volatile entry = NULL; PLyProcedure *volatile proc = NULL; bool found = false; + if (is_trigger == PLPY_TRIGGER && fn_rel == InvalidOid) + use_cache = false; + else + use_cache = true; + procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(fn_oid)); if (!HeapTupleIsValid(procTup)) elog(ERROR, "cache lookup failed for function %u", fn_oid); @@ -127,7 +132,7 @@ PLy_procedure_get(Oid fn_oid, Oid fn_rel, bool is_trigger) * Create a new PLyProcedure structure */ static PLyProcedure * -PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) +PLy_procedure_create(HeapTuple procTup, Oid fn_oid, PLyTrigType is_trigger) { char procName[NAMEDATALEN + 256]; Form_pg_proc procStruct; @@ -200,7 +205,7 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) * get information required for output conversion of the return value, * but only if this isn't a trigger. */ - if (!is_trigger) + if (is_trigger == PLPY_NOT_TRIGGER) { Oid rettype = procStruct->prorettype; HeapTuple rvTypeTup; diff --git a/src/pl/plpython/plpy_procedure.h b/src/pl/plpython/plpy_procedure.h index 5db854fc8bd2d..3ef22844a9b71 100644 --- a/src/pl/plpython/plpy_procedure.h +++ b/src/pl/plpython/plpy_procedure.h @@ -11,6 +11,16 @@ extern void init_procedure_caches(void); +/* + * Trigger type + */ +typedef enum PLyTrigType +{ + PLPY_TRIGGER, + PLPY_EVENT_TRIGGER, + PLPY_NOT_TRIGGER, +} PLyTrigType; + /* saved arguments for outer recursion level or set-returning function */ typedef struct PLySavedArgs { @@ -33,7 +43,7 @@ typedef struct PLyProcedure bool fn_readonly; bool is_setof; /* true, if function returns result set */ bool is_procedure; - bool is_trigger; /* called as trigger? */ + PLyTrigType is_trigger; /* called as trigger? */ PLyObToDatum result; /* Function result output conversion info */ PLyDatumToOb result_in; /* For converting input tuples in a trigger */ char *src; /* textual procedure code, after mangling */ @@ -65,7 +75,7 @@ typedef struct PLyProcedureEntry /* PLyProcedure manipulation */ extern char *PLy_procedure_name(PLyProcedure *proc); -extern PLyProcedure *PLy_procedure_get(Oid fn_oid, Oid fn_rel, bool is_trigger); +extern PLyProcedure *PLy_procedure_get(Oid fn_oid, Oid fn_rel, PLyTrigType is_trigger); extern void PLy_procedure_compile(PLyProcedure *proc, const char *src); extern void PLy_procedure_delete(PLyProcedure *proc); diff --git a/src/pl/plpython/sql/plpython_trigger.sql b/src/pl/plpython/sql/plpython_trigger.sql index 440549c0785da..e1a552e079fe8 100644 --- a/src/pl/plpython/sql/plpython_trigger.sql +++ b/src/pl/plpython/sql/plpython_trigger.sql @@ -492,3 +492,27 @@ CREATE TRIGGER recursive_trigger_trig INSERT INTO recursive_trigger_test VALUES (0, 0); UPDATE recursive_trigger_test SET a = 11 WHERE b = 0; SELECT * FROM recursive_trigger_test; + + +-- event triggers + +CREATE OR REPLACE FUNCTION pysnitch() RETURNS event_trigger +LANGUAGE plpython3u +AS $$ + plpy.notice("TD[event] => " + TD["event"] + " ; TD[tag] => " + TD["tag"]); +$$; + +CREATE EVENT TRIGGER python_a_snitch ON ddl_command_start + EXECUTE PROCEDURE pysnitch(); +CREATE EVENT TRIGGER python_b_snitch ON ddl_command_end + EXECUTE PROCEDURE pysnitch(); + +CREATE OR REPLACE FUNCTION foobar() RETURNS int LANGUAGE sql AS $$SELECT 1;$$; +ALTER FUNCTION foobar() COST 77; +DROP FUNCTION foobar(); + +CREATE TABLE foo(); +DROP TABLE foo; + +DROP EVENT TRIGGER python_a_snitch; +DROP EVENT TRIGGER python_b_snitch; diff --git a/src/pl/tcl/Makefile b/src/pl/tcl/Makefile index ea52a2efc229d..dd57f7d694c82 100644 --- a/src/pl/tcl/Makefile +++ b/src/pl/tcl/Makefile @@ -11,7 +11,7 @@ top_builddir = ../../.. include $(top_builddir)/src/Makefile.global -override CPPFLAGS := -I. -I$(srcdir) $(TCL_INCLUDE_SPEC) $(CPPFLAGS) +override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(TCL_INCLUDE_SPEC) # On Windows, we don't link directly with the Tcl library; see below ifneq ($(PORTNAME), win32) diff --git a/src/test/isolation/expected/cluster-toast-value-reuse.out b/src/test/isolation/expected/cluster-toast-value-reuse.out new file mode 100644 index 0000000000000..cb14ddcee34b8 --- /dev/null +++ b/src/test/isolation/expected/cluster-toast-value-reuse.out @@ -0,0 +1,29 @@ +Parsed test spec with 2 sessions + +starting permutation: s1_begin s1_update s2_store_chunk_ids s2_cluster s1_commit s2_verify_chunk_ids +step s1_begin: BEGIN; +step s1_update: UPDATE cluster_toast_value SET flag = 1 WHERE TRUE; +step s2_store_chunk_ids: + CREATE TABLE cluster_chunk_id AS + SELECT c.id, pg_column_toast_chunk_id(c.value) AS chunk_id + FROM cluster_toast_value c; + SELECT count(*) FROM cluster_chunk_id; + +count +----- + 1 +(1 row) + +step s2_cluster: CLUSTER cluster_toast_value; +step s1_commit: COMMIT; +step s2_cluster: <... completed> +step s2_verify_chunk_ids: + SELECT o.id AS chunk_ids_preserved + FROM cluster_chunk_id o + JOIN cluster_toast_value c ON o.id = c.id + WHERE o.chunk_id != pg_column_toast_chunk_id(c.value); + +chunk_ids_preserved +------------------- +(0 rows) + diff --git a/src/test/isolation/expected/index-killtuples.out b/src/test/isolation/expected/index-killtuples.out new file mode 100644 index 0000000000000..be7ddd756ef0e --- /dev/null +++ b/src/test/isolation/expected/index-killtuples.out @@ -0,0 +1,355 @@ +Parsed test spec with 1 sessions + +starting permutation: create_table fill_500 create_btree flush disable_seq disable_bitmap measure access flush result measure access flush result delete flush measure access flush result measure access flush result drop_table +step create_table: CREATE TEMPORARY TABLE kill_prior_tuple(key int not null, cat text not null); +step fill_500: INSERT INTO kill_prior_tuple(key, cat) SELECT g.i, 'a' FROM generate_series(1, 500) g(i); +step create_btree: CREATE INDEX kill_prior_tuple_btree ON kill_prior_tuple USING btree (key); +step flush: SELECT FROM pg_stat_force_next_flush(); +step disable_seq: SET enable_seqscan = false; +step disable_bitmap: SET enable_bitmapscan = false; +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +-------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_btree on kill_prior_tuple (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +-------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_btree on kill_prior_tuple (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step delete: DELETE FROM kill_prior_tuple; +step flush: SELECT FROM pg_stat_force_next_flush(); +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +-------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_btree on kill_prior_tuple (actual rows=0.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +-------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_btree on kill_prior_tuple (actual rows=0.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 0 +(1 row) + +step drop_table: DROP TABLE IF EXISTS kill_prior_tuple; + +starting permutation: create_table fill_500 create_ext_btree_gist create_gist flush disable_seq disable_bitmap measure access flush result measure access flush result delete flush measure access flush result measure access flush result drop_table drop_ext_btree_gist +step create_table: CREATE TEMPORARY TABLE kill_prior_tuple(key int not null, cat text not null); +step fill_500: INSERT INTO kill_prior_tuple(key, cat) SELECT g.i, 'a' FROM generate_series(1, 500) g(i); +step create_ext_btree_gist: CREATE EXTENSION btree_gist; +step create_gist: CREATE INDEX kill_prior_tuple_gist ON kill_prior_tuple USING gist (key); +step flush: SELECT FROM pg_stat_force_next_flush(); +step disable_seq: SET enable_seqscan = false; +step disable_bitmap: SET enable_bitmapscan = false; +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_gist on kill_prior_tuple (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_gist on kill_prior_tuple (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step delete: DELETE FROM kill_prior_tuple; +step flush: SELECT FROM pg_stat_force_next_flush(); +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_gist on kill_prior_tuple (actual rows=0.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_gist on kill_prior_tuple (actual rows=0.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 0 +(1 row) + +step drop_table: DROP TABLE IF EXISTS kill_prior_tuple; +step drop_ext_btree_gist: DROP EXTENSION btree_gist; + +starting permutation: create_table fill_10 create_ext_btree_gist create_gist flush disable_seq disable_bitmap measure access flush result measure access flush result delete flush measure access flush result measure access flush result drop_table drop_ext_btree_gist +step create_table: CREATE TEMPORARY TABLE kill_prior_tuple(key int not null, cat text not null); +step fill_10: INSERT INTO kill_prior_tuple(key, cat) SELECT g.i, 'a' FROM generate_series(1, 10) g(i); +step create_ext_btree_gist: CREATE EXTENSION btree_gist; +step create_gist: CREATE INDEX kill_prior_tuple_gist ON kill_prior_tuple USING gist (key); +step flush: SELECT FROM pg_stat_force_next_flush(); +step disable_seq: SET enable_seqscan = false; +step disable_bitmap: SET enable_bitmapscan = false; +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_gist on kill_prior_tuple (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_gist on kill_prior_tuple (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step delete: DELETE FROM kill_prior_tuple; +step flush: SELECT FROM pg_stat_force_next_flush(); +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_gist on kill_prior_tuple (actual rows=0.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_gist on kill_prior_tuple (actual rows=0.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step drop_table: DROP TABLE IF EXISTS kill_prior_tuple; +step drop_ext_btree_gist: DROP EXTENSION btree_gist; + +starting permutation: create_table fill_500 create_hash flush disable_seq disable_bitmap measure access flush result measure access flush result delete flush measure access flush result measure access flush result drop_table +step create_table: CREATE TEMPORARY TABLE kill_prior_tuple(key int not null, cat text not null); +step fill_500: INSERT INTO kill_prior_tuple(key, cat) SELECT g.i, 'a' FROM generate_series(1, 500) g(i); +step create_hash: CREATE INDEX kill_prior_tuple_hash ON kill_prior_tuple USING hash (key); +step flush: SELECT FROM pg_stat_force_next_flush(); +step disable_seq: SET enable_seqscan = false; +step disable_bitmap: SET enable_bitmapscan = false; +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_hash on kill_prior_tuple (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_hash on kill_prior_tuple (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step delete: DELETE FROM kill_prior_tuple; +step flush: SELECT FROM pg_stat_force_next_flush(); +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_hash on kill_prior_tuple (actual rows=0.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +------------------------------------------------------------------------------------- +Index Scan using kill_prior_tuple_hash on kill_prior_tuple (actual rows=0.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(3 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 0 +(1 row) + +step drop_table: DROP TABLE IF EXISTS kill_prior_tuple; + +starting permutation: create_table fill_500 create_ext_btree_gin create_gin flush disable_seq delete flush measure access flush result measure access flush result drop_table drop_ext_btree_gin +step create_table: CREATE TEMPORARY TABLE kill_prior_tuple(key int not null, cat text not null); +step fill_500: INSERT INTO kill_prior_tuple(key, cat) SELECT g.i, 'a' FROM generate_series(1, 500) g(i); +step create_ext_btree_gin: CREATE EXTENSION btree_gin; +step create_gin: CREATE INDEX kill_prior_tuple_gin ON kill_prior_tuple USING gin (key); +step flush: SELECT FROM pg_stat_force_next_flush(); +step disable_seq: SET enable_seqscan = false; +step delete: DELETE FROM kill_prior_tuple; +step flush: SELECT FROM pg_stat_force_next_flush(); +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +-------------------------------------------------------------------------- +Bitmap Heap Scan on kill_prior_tuple (actual rows=0.00 loops=1) + Recheck Cond: (key = 1) + Heap Blocks: exact=1 + -> Bitmap Index Scan on kill_prior_tuple_gin (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(6 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step measure: UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); +step access: EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; +QUERY PLAN +-------------------------------------------------------------------------- +Bitmap Heap Scan on kill_prior_tuple (actual rows=0.00 loops=1) + Recheck Cond: (key = 1) + Heap Blocks: exact=1 + -> Bitmap Index Scan on kill_prior_tuple_gin (actual rows=1.00 loops=1) + Index Cond: (key = 1) + Index Searches: 1 +(6 rows) + +step flush: SELECT FROM pg_stat_force_next_flush(); +step result: SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; +new_heap_accesses +----------------- + 1 +(1 row) + +step drop_table: DROP TABLE IF EXISTS kill_prior_tuple; +step drop_ext_btree_gin: DROP EXTENSION btree_gin; diff --git a/src/test/isolation/expected/intra-grant-inplace.out b/src/test/isolation/expected/intra-grant-inplace.out index 1aa9da622da05..23c34d0ca0935 100644 --- a/src/test/isolation/expected/intra-grant-inplace.out +++ b/src/test/isolation/expected/intra-grant-inplace.out @@ -226,7 +226,7 @@ step revoke4: <... completed> starting permutation: b1 drop1 b3 sfu3 revoke4 c1 r3 step b1: BEGIN; step drop1: - DROP TABLE intra_grant_inplace; + DELETE FROM pg_class WHERE relname = 'intra_grant_inplace'; step b3: BEGIN ISOLATION LEVEL READ COMMITTED; step sfu3: @@ -248,6 +248,6 @@ relhasindex ----------- (0 rows) -s4: WARNING: got: relation "intra_grant_inplace" does not exist +s4: WARNING: got: cache lookup failed for relation REDACTED step revoke4: <... completed> step r3: ROLLBACK; diff --git a/src/test/isolation/expected/merge-match-recheck.out b/src/test/isolation/expected/merge-match-recheck.out index 90300f1db5ab3..4250b85af2d3c 100644 --- a/src/test/isolation/expected/merge-match-recheck.out +++ b/src/test/isolation/expected/merge-match-recheck.out @@ -271,6 +271,151 @@ key|balance|status|val step c1: COMMIT; +starting permutation: update1 update6 merge_bal c2 select1 c1 +step update1: UPDATE target t SET balance = balance + 10, val = t.val || ' updated by update1' WHERE t.key = 1; +step update6: UPDATE target t SET balance = balance - 100, val = t.val || ' updated by update6' WHERE t.key = 1; +step merge_bal: + MERGE INTO target t + USING (SELECT 1 as key) s + ON s.key = t.key + WHEN MATCHED AND balance < 100 THEN + UPDATE SET balance = balance * 2, val = t.val || ' when1' + WHEN MATCHED AND balance < 200 THEN + UPDATE SET balance = balance * 4, val = t.val || ' when2' + WHEN MATCHED AND balance < 300 THEN + UPDATE SET balance = balance * 8, val = t.val || ' when3'; + +step c2: COMMIT; +step merge_bal: <... completed> +step select1: SELECT * FROM target; +key|balance|status|val +---+-------+------+------------------------------------------------- + 1| 140|s1 |setup updated by update1 updated by update6 when1 +(1 row) + +step c1: COMMIT; + +starting permutation: update1_pa update6_pa merge_bal_pa c2 select1_pa c1 +step update1_pa: UPDATE target_pa t SET balance = balance + 10, val = t.val || ' updated by update1_pa' WHERE t.key = 1; +step update6_pa: UPDATE target_pa t SET balance = balance - 100, val = t.val || ' updated by update6_pa' WHERE t.key = 1; +step merge_bal_pa: + MERGE INTO target_pa t + USING (SELECT 1 as key) s + ON s.key = t.key + WHEN MATCHED AND balance < 100 THEN + UPDATE SET balance = balance * 2, val = t.val || ' when1' + WHEN MATCHED AND balance < 200 THEN + UPDATE SET balance = balance * 4, val = t.val || ' when2' + WHEN MATCHED AND balance < 300 THEN + UPDATE SET balance = balance * 8, val = t.val || ' when3'; + +step c2: COMMIT; +step merge_bal_pa: <... completed> +step select1_pa: SELECT * FROM target_pa; +key|balance|status|val +---+-------+------+------------------------------------------------------- + 1| 140|s1 |setup updated by update1_pa updated by update6_pa when1 +(1 row) + +step c1: COMMIT; + +starting permutation: update1_tg update6_tg merge_bal_tg c2 select1_tg c1 +s2: NOTICE: Update: (1,160,s1,setup) -> (1,170,s1,"setup updated by update1_tg") +step update1_tg: UPDATE target_tg t SET balance = balance + 10, val = t.val || ' updated by update1_tg' WHERE t.key = 1; +s2: NOTICE: Update: (1,170,s1,"setup updated by update1_tg") -> (1,70,s1,"setup updated by update1_tg updated by update6_tg") +step update6_tg: UPDATE target_tg t SET balance = balance - 100, val = t.val || ' updated by update6_tg' WHERE t.key = 1; +step merge_bal_tg: + WITH t AS ( + MERGE INTO target_tg t + USING (SELECT 1 as key) s + ON s.key = t.key + WHEN MATCHED AND balance < 100 THEN + UPDATE SET balance = balance * 2, val = t.val || ' when1' + WHEN MATCHED AND balance < 200 THEN + UPDATE SET balance = balance * 4, val = t.val || ' when2' + WHEN MATCHED AND balance < 300 THEN + UPDATE SET balance = balance * 8, val = t.val || ' when3' + RETURNING t.* + ) + SELECT * FROM t; + +step c2: COMMIT; +s1: NOTICE: Update: (1,70,s1,"setup updated by update1_tg updated by update6_tg") -> (1,140,s1,"setup updated by update1_tg updated by update6_tg when1") +step merge_bal_tg: <... completed> +key|balance|status|val +---+-------+------+------------------------------------------------------- + 1| 140|s1 |setup updated by update1_tg updated by update6_tg when1 +(1 row) + +step select1_tg: SELECT * FROM target_tg; +key|balance|status|val +---+-------+------+------------------------------------------------------- + 1| 140|s1 |setup updated by update1_tg updated by update6_tg when1 +(1 row) + +step c1: COMMIT; + +starting permutation: update7 update6 merge_bal c2 select1 c1 +step update7: UPDATE target t SET balance = 350, val = t.val || ' updated by update7' WHERE t.key = 1; +step update6: UPDATE target t SET balance = balance - 100, val = t.val || ' updated by update6' WHERE t.key = 1; +step merge_bal: + MERGE INTO target t + USING (SELECT 1 as key) s + ON s.key = t.key + WHEN MATCHED AND balance < 100 THEN + UPDATE SET balance = balance * 2, val = t.val || ' when1' + WHEN MATCHED AND balance < 200 THEN + UPDATE SET balance = balance * 4, val = t.val || ' when2' + WHEN MATCHED AND balance < 300 THEN + UPDATE SET balance = balance * 8, val = t.val || ' when3'; + +step c2: COMMIT; +step merge_bal: <... completed> +step select1: SELECT * FROM target; +key|balance|status|val +---+-------+------+------------------------------------------------- + 1| 2000|s1 |setup updated by update7 updated by update6 when3 +(1 row) + +step c1: COMMIT; + +starting permutation: update1_pa_move merge_bal_pa c2 c1 +step update1_pa_move: UPDATE target_pa t SET balance = 210, val = t.val || ' updated by update1_pa_move' WHERE t.key = 1; +step merge_bal_pa: + MERGE INTO target_pa t + USING (SELECT 1 as key) s + ON s.key = t.key + WHEN MATCHED AND balance < 100 THEN + UPDATE SET balance = balance * 2, val = t.val || ' when1' + WHEN MATCHED AND balance < 200 THEN + UPDATE SET balance = balance * 4, val = t.val || ' when2' + WHEN MATCHED AND balance < 300 THEN + UPDATE SET balance = balance * 8, val = t.val || ' when3'; + +step c2: COMMIT; +step merge_bal_pa: <... completed> +ERROR: tuple to be locked was already moved to another partition due to concurrent update +step c1: COMMIT; + +starting permutation: update1_pa update1_pa_move merge_bal_pa c2 c1 +step update1_pa: UPDATE target_pa t SET balance = balance + 10, val = t.val || ' updated by update1_pa' WHERE t.key = 1; +step update1_pa_move: UPDATE target_pa t SET balance = 210, val = t.val || ' updated by update1_pa_move' WHERE t.key = 1; +step merge_bal_pa: + MERGE INTO target_pa t + USING (SELECT 1 as key) s + ON s.key = t.key + WHEN MATCHED AND balance < 100 THEN + UPDATE SET balance = balance * 2, val = t.val || ' when1' + WHEN MATCHED AND balance < 200 THEN + UPDATE SET balance = balance * 4, val = t.val || ' when2' + WHEN MATCHED AND balance < 300 THEN + UPDATE SET balance = balance * 8, val = t.val || ' when3'; + +step c2: COMMIT; +step merge_bal_pa: <... completed> +ERROR: tuple to be locked was already moved to another partition due to concurrent update +step c1: COMMIT; + starting permutation: update1 merge_delete c2 select1 c1 step update1: UPDATE target t SET balance = balance + 10, val = t.val || ' updated by update1' WHERE t.key = 1; step merge_delete: diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule index e3c669a29c7aa..9f1e997d81b00 100644 --- a/src/test/isolation/isolation_schedule +++ b/src/test/isolation/isolation_schedule @@ -16,6 +16,7 @@ test: ri-trigger test: partial-index test: two-ids test: multiple-row-versions +test: index-killtuples test: index-only-scan test: index-only-bitmapscan test: predicate-lock-hot-tuple @@ -110,6 +111,7 @@ test: partition-key-update-4 test: plpgsql-toast test: cluster-conflict test: cluster-conflict-partition +test: cluster-toast-value-reuse test: truncate-conflict test: serializable-parallel test: serializable-parallel-2 diff --git a/src/test/isolation/specs/cluster-toast-value-reuse.spec b/src/test/isolation/specs/cluster-toast-value-reuse.spec new file mode 100644 index 0000000000000..9a2d10600b39f --- /dev/null +++ b/src/test/isolation/specs/cluster-toast-value-reuse.spec @@ -0,0 +1,69 @@ +# Tests with CLUSTER for toast values + +# This test does a relation rewrite, with toast values reused to make the +# rewrite cheaper (see data_todo = 0 case in toast_save_datum()). +# +# A first session updates the table with an attribute not toasted. CLUSTER +# is then executed in a second session. The comparison of the values +# allocated for the toasted values are done using a CTAS. The allocated +# chunk_ids are saved before the rewrite, and compared after the rewrite. + +# ---------- global setup ---------- +setup +{ + DROP TABLE IF EXISTS cluster_toast_value CASCADE; + DROP TABLE IF EXISTS cluster_chunk_id CASCADE; + + CREATE TABLE cluster_toast_value ( + id serial PRIMARY KEY, + flag integer, + value text); + + -- Make sure 'value' is large enough to be toasted. + ALTER TABLE cluster_toast_value ALTER COLUMN value SET STORAGE EXTERNAL; + + -- Clustering index. + CLUSTER cluster_toast_value_pkey ON cluster_toast_value; + + -- Seed data: one row with big string to force TOAST tuple and trigger the todo=0 code path. + INSERT INTO cluster_toast_value(flag, value) + VALUES (0, repeat(encode(sha256('1'), 'hex'), 120) || repeat('x', 8000)); + + CLUSTER cluster_toast_value; +} + +teardown +{ + DROP TABLE IF EXISTS cluster_toast_value; + DROP TABLE IF EXISTS cluster_chunk_id; +} + +session s1 +step s1_begin { BEGIN; } +step s1_update { UPDATE cluster_toast_value SET flag = 1 WHERE TRUE; } +step s1_commit { COMMIT; } + +session s2 +# Store the primary key values and their associated chunk IDs. This makes +# sure that some data is captured. +step s2_store_chunk_ids { + CREATE TABLE cluster_chunk_id AS + SELECT c.id, pg_column_toast_chunk_id(c.value) AS chunk_id + FROM cluster_toast_value c; + SELECT count(*) FROM cluster_chunk_id; +} +step s2_cluster { CLUSTER cluster_toast_value; } + +# Verify that toast values allocated are the same, indicating reuse. +# This query reports the tuples with toast values that do not match. +step s2_verify_chunk_ids { + SELECT o.id AS chunk_ids_preserved + FROM cluster_chunk_id o + JOIN cluster_toast_value c ON o.id = c.id + WHERE o.chunk_id != pg_column_toast_chunk_id(c.value); +} + +# Run UPDATE with its transaction still open, then store the chunk IDs. +# CLUSTER will wait until the first transaction commit. Finally, the chunk +# IDs are compared. +permutation s1_begin s1_update s2_store_chunk_ids s2_cluster s1_commit s2_verify_chunk_ids diff --git a/src/test/isolation/specs/eval-plan-qual-trigger.spec b/src/test/isolation/specs/eval-plan-qual-trigger.spec index b512edd28798a..232b3e27652a1 100644 --- a/src/test/isolation/specs/eval-plan-qual-trigger.spec +++ b/src/test/isolation/specs/eval-plan-qual-trigger.spec @@ -336,7 +336,7 @@ permutation s1_trig_rep_b_u s1_trig_rep_a_u s1_ins_a s1_ins_c s1_b_rc s2_b_rc s1_upd_a_tob s2_upd_all_data s1_c s2_c s0_rep -# s1 deletes, s2 updates, s1 committs, EPQ failure should lead to no update +# s1 deletes, s2 updates, s1 commits, EPQ failure should lead to no update permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u s1_ins_a s1_ins_c s1_b_rc s2_b_rc s1_del_a s2_upd_a_data s1_c s2_c @@ -346,7 +346,7 @@ permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u s1_ins_a s1_ins_c s1_b_rc s2_b_rc s1_del_a s2_upd_a_data s1_r s2_c s0_rep -# s1 deletes, s2 deletes, s1 committs, EPQ failure should lead to no delete +# s1 deletes, s2 deletes, s1 commits, EPQ failure should lead to no delete permutation s1_trig_rep_b_d s1_trig_rep_a_d s1_ins_a s1_ins_c s1_b_rc s2_b_rc s1_del_a s2_del_a s1_c s2_c diff --git a/src/test/isolation/specs/index-killtuples.spec b/src/test/isolation/specs/index-killtuples.spec new file mode 100644 index 0000000000000..77fe8c689a793 --- /dev/null +++ b/src/test/isolation/specs/index-killtuples.spec @@ -0,0 +1,127 @@ +# Basic testing of killtuples / kill_prior_tuples / all_dead testing +# for various index AMs +# +# This tests just enough to ensure that the kill* routines are actually +# executed and does something approximately reasonable. It's *not* sufficient +# testing for adding killitems support to a new AM! +# +# This doesn't really need to be an isolation test, it could be written as a +# regular regression test. However, writing it as an isolation test ends up a +# *lot* less verbose. + +setup +{ + CREATE TABLE counter(heap_accesses int); + INSERT INTO counter(heap_accesses) VALUES (0); +} + +teardown +{ + DROP TABLE counter; +} + +session s1 +# to ensure GUCs are reset +setup { RESET ALL; } + +step disable_seq { SET enable_seqscan = false; } + +step disable_bitmap { SET enable_bitmapscan = false; } + +# use a temporary table to make sure no other session can interfere with +# visibility determinations +step create_table { CREATE TEMPORARY TABLE kill_prior_tuple(key int not null, cat text not null); } + +step fill_10 { INSERT INTO kill_prior_tuple(key, cat) SELECT g.i, 'a' FROM generate_series(1, 10) g(i); } + +step fill_500 { INSERT INTO kill_prior_tuple(key, cat) SELECT g.i, 'a' FROM generate_series(1, 500) g(i); } + +# column-less select to make output easier to read +step flush { SELECT FROM pg_stat_force_next_flush(); } + +step measure { UPDATE counter SET heap_accesses = (SELECT heap_blks_read + heap_blks_hit FROM pg_statio_all_tables WHERE relname = 'kill_prior_tuple'); } + +step result { SELECT heap_blks_read + heap_blks_hit - counter.heap_accesses AS new_heap_accesses FROM counter, pg_statio_all_tables WHERE relname = 'kill_prior_tuple'; } + +step access { EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM kill_prior_tuple WHERE key = 1; } + +step delete { DELETE FROM kill_prior_tuple; } + +step drop_table { DROP TABLE IF EXISTS kill_prior_tuple; } + +### steps for testing btree indexes ### +step create_btree { CREATE INDEX kill_prior_tuple_btree ON kill_prior_tuple USING btree (key); } + +### steps for testing gist indexes ### +# Creating the extensions takes time, so we don't want to do so when testing +# other AMs +step create_ext_btree_gist { CREATE EXTENSION btree_gist; } +step drop_ext_btree_gist { DROP EXTENSION btree_gist; } +step create_gist { CREATE INDEX kill_prior_tuple_gist ON kill_prior_tuple USING gist (key); } + +### steps for testing gin indexes ### +# See create_ext_btree_gist +step create_ext_btree_gin { CREATE EXTENSION btree_gin; } +step drop_ext_btree_gin { DROP EXTENSION btree_gin; } +step create_gin { CREATE INDEX kill_prior_tuple_gin ON kill_prior_tuple USING gin (key); } + +### steps for testing hash indexes ### +step create_hash { CREATE INDEX kill_prior_tuple_hash ON kill_prior_tuple USING hash (key); } + + +# test killtuples with btree index +permutation + create_table fill_500 create_btree flush + disable_seq disable_bitmap + # show each access to non-deleted tuple increments heap_blks_* + measure access flush result + measure access flush result + delete flush + # first access after accessing deleted tuple still needs to access heap + measure access flush result + # but after kill_prior_tuple did its thing, we shouldn't access heap anymore + measure access flush result + drop_table + +# Same as first permutation, except testing gist +permutation + create_table fill_500 create_ext_btree_gist create_gist flush + disable_seq disable_bitmap + measure access flush result + measure access flush result + delete flush + measure access flush result + measure access flush result + drop_table drop_ext_btree_gist + +# Test gist, but with fewer rows - shows that killitems doesn't work anymore! +permutation + create_table fill_10 create_ext_btree_gist create_gist flush + disable_seq disable_bitmap + measure access flush result + measure access flush result + delete flush + measure access flush result + measure access flush result + drop_table drop_ext_btree_gist + +# Same as first permutation, except testing hash +permutation + create_table fill_500 create_hash flush + disable_seq disable_bitmap + measure access flush result + measure access flush result + delete flush + measure access flush result + measure access flush result + drop_table + +# # Similar to first permutation, except that gin does not have killtuples support +permutation + create_table fill_500 create_ext_btree_gin create_gin flush + disable_seq + delete flush + measure access flush result + # will still fetch from heap + measure access flush result + drop_table drop_ext_btree_gin diff --git a/src/test/isolation/specs/intra-grant-inplace.spec b/src/test/isolation/specs/intra-grant-inplace.spec index 9936d389359e5..e9c7848624cd7 100644 --- a/src/test/isolation/specs/intra-grant-inplace.spec +++ b/src/test/isolation/specs/intra-grant-inplace.spec @@ -20,7 +20,7 @@ step grant1 { GRANT SELECT ON intra_grant_inplace TO PUBLIC; } step drop1 { - DROP TABLE intra_grant_inplace; + DELETE FROM pg_class WHERE relname = 'intra_grant_inplace'; } step c1 { COMMIT; } diff --git a/src/test/isolation/specs/merge-match-recheck.spec b/src/test/isolation/specs/merge-match-recheck.spec index 15226e40c9efc..6e7a776d17e5a 100644 --- a/src/test/isolation/specs/merge-match-recheck.spec +++ b/src/test/isolation/specs/merge-match-recheck.spec @@ -146,6 +146,8 @@ setup BEGIN ISOLATION LEVEL READ COMMITTED; } step "update1" { UPDATE target t SET balance = balance + 10, val = t.val || ' updated by update1' WHERE t.key = 1; } +step "update1_pa" { UPDATE target_pa t SET balance = balance + 10, val = t.val || ' updated by update1_pa' WHERE t.key = 1; } +step "update1_pa_move" { UPDATE target_pa t SET balance = 210, val = t.val || ' updated by update1_pa_move' WHERE t.key = 1; } step "update1_tg" { UPDATE target_tg t SET balance = balance + 10, val = t.val || ' updated by update1_tg' WHERE t.key = 1; } step "update2" { UPDATE target t SET status = 's2', val = t.val || ' updated by update2' WHERE t.key = 1; } step "update2_tg" { UPDATE target_tg t SET status = 's2', val = t.val || ' updated by update2_tg' WHERE t.key = 1; } @@ -153,6 +155,10 @@ step "update3" { UPDATE target t SET status = 's3', val = t.val || ' updated by step "update3_tg" { UPDATE target_tg t SET status = 's3', val = t.val || ' updated by update3_tg' WHERE t.key = 1; } step "update5" { UPDATE target t SET status = 's5', val = t.val || ' updated by update5' WHERE t.key = 1; } step "update5_tg" { UPDATE target_tg t SET status = 's5', val = t.val || ' updated by update5_tg' WHERE t.key = 1; } +step "update6" { UPDATE target t SET balance = balance - 100, val = t.val || ' updated by update6' WHERE t.key = 1; } +step "update6_pa" { UPDATE target_pa t SET balance = balance - 100, val = t.val || ' updated by update6_pa' WHERE t.key = 1; } +step "update6_tg" { UPDATE target_tg t SET balance = balance - 100, val = t.val || ' updated by update6_tg' WHERE t.key = 1; } +step "update7" { UPDATE target t SET balance = 350, val = t.val || ' updated by update7' WHERE t.key = 1; } step "update_bal1" { UPDATE target t SET balance = 50, val = t.val || ' updated by update_bal1' WHERE t.key = 1; } step "update_bal1_pa" { UPDATE target_pa t SET balance = 50, val = t.val || ' updated by update_bal1_pa' WHERE t.key = 1; } step "update_bal1_tg" { UPDATE target_tg t SET balance = 50, val = t.val || ' updated by update_bal1_tg' WHERE t.key = 1; } @@ -179,6 +185,18 @@ permutation "update_bal1" "merge_bal" "c2" "select1" "c1" permutation "update_bal1_pa" "merge_bal_pa" "c2" "select1_pa" "c1" permutation "update_bal1_tg" "merge_bal_tg" "c2" "select1_tg" "c1" +# merge_bal sees row concurrently updated twice and rechecks WHEN conditions, different check passes, so final balance = 140 +permutation "update1" "update6" "merge_bal" "c2" "select1" "c1" +permutation "update1_pa" "update6_pa" "merge_bal_pa" "c2" "select1_pa" "c1" +permutation "update1_tg" "update6_tg" "merge_bal_tg" "c2" "select1_tg" "c1" + +# merge_bal sees row concurrently updated twice, first update would cause all checks to fail, second update causes different check to pass, so final balance = 2000 +permutation "update7" "update6" "merge_bal" "c2" "select1" "c1" + +# merge_bal sees concurrently updated row moved to new partition, so fails +permutation "update1_pa_move" "merge_bal_pa" "c2" "c1" +permutation "update1_pa" "update1_pa_move" "merge_bal_pa" "c2" "c1" + # merge_delete sees concurrently updated row and rechecks WHEN conditions, but recheck passes and row is deleted permutation "update1" "merge_delete" "c2" "select1" "c1" permutation "update1_tg" "merge_delete_tg" "c2" "select1_tg" "c1" diff --git a/src/test/ldap/meson.build b/src/test/ldap/meson.build index 7eaa393212ad0..04c738d27580c 100644 --- a/src/test/ldap/meson.build +++ b/src/test/ldap/meson.build @@ -8,6 +8,7 @@ tests += { 'tests': [ 't/001_auth.pl', 't/002_bindpasswd.pl', + 't/003_ldap_connection_param_lookup.pl', ], 'env': { 'with_ldap': ldap.found() ? 'yes' : 'no', diff --git a/src/test/ldap/t/003_ldap_connection_param_lookup.pl b/src/test/ldap/t/003_ldap_connection_param_lookup.pl new file mode 100644 index 0000000000000..8c1e1caf992a4 --- /dev/null +++ b/src/test/ldap/t/003_ldap_connection_param_lookup.pl @@ -0,0 +1,216 @@ + +# Copyright (c) 2025, PostgreSQL Global Development Group + +use strict; +use warnings FATAL => 'all'; + +use FindBin; +use lib "$FindBin::RealBin/.."; + +use File::Copy; +use LdapServer; +use PostgreSQL::Test::Utils; +use PostgreSQL::Test::Cluster; +use Test::More; + +if ($ENV{with_ldap} ne 'yes') +{ + plan skip_all => 'LDAP not supported by this build'; +} +elsif (!$ENV{PG_TEST_EXTRA} || $ENV{PG_TEST_EXTRA} !~ /\bldap\b/) +{ + plan skip_all => + 'Potentially unsafe test LDAP not enabled in PG_TEST_EXTRA'; +} +elsif (!$LdapServer::setup) +{ + plan skip_all => $LdapServer::setup_error; +} + +# This tests scenarios related to the service name and the service file, +# for the connection options and their environment variables. +my $dummy_node = PostgreSQL::Test::Cluster->new('dummy_node'); +$dummy_node->init; + +my $node = PostgreSQL::Test::Cluster->new('node'); +$node->init; +$node->start; + +note "setting up LDAP server"; + +my $ldap_rootpw = 'secret'; +my $ldap = LdapServer->new($ldap_rootpw, 'anonymous'); # use anonymous auth +$ldap->ldapadd_file('authdata.ldif'); +$ldap->ldapsetpw('uid=test1,dc=example,dc=net', 'secret1'); +$ldap->ldapsetpw('uid=test2,dc=example,dc=net', 'secret2'); + +# Windows vs non-Windows: CRLF vs LF for the file's newline, relying on +# the fact that libpq uses fgets() when reading the lines of a service file. +my $newline = $windows_os ? "\r\n" : "\n"; + +my $td = PostgreSQL::Test::Utils::tempdir; + +# create ldap file based on postgres connection info +my $ldif_valid = "$td/connection_params.ldif"; +append_to_file($ldif_valid, "version:1"); +append_to_file($ldif_valid, $newline); +append_to_file($ldif_valid, "dn:cn=mydatabase,dc=example,dc=net"); +append_to_file($ldif_valid, $newline); +append_to_file($ldif_valid, "changetype:add"); +append_to_file($ldif_valid, $newline); +append_to_file($ldif_valid, "objectclass:top"); +append_to_file($ldif_valid, $newline); +append_to_file($ldif_valid, "objectclass:device"); +append_to_file($ldif_valid, $newline); +append_to_file($ldif_valid, "cn:mydatabase"); +append_to_file($ldif_valid, $newline); +append_to_file($ldif_valid, "description:host="); +append_to_file($ldif_valid, $node->host); +append_to_file($ldif_valid, $newline); +append_to_file($ldif_valid, "description:port="); +append_to_file($ldif_valid, $node->port); + +$ldap->ldapadd_file($ldif_valid); + +my ($ldap_server, $ldap_port, $ldaps_port, $ldap_url, + $ldaps_url, $ldap_basedn, $ldap_rootdn +) = $ldap->prop(qw(server port s_port url s_url basedn rootdn)); + +# don't bother to check the server's cert (though perhaps we should) +$ENV{'LDAPTLS_REQCERT'} = "never"; + +note "setting up PostgreSQL instance"; + +# Create the set of service files used in the tests. + +# File that includes a valid service name, that uses a decomposed +# connection string for its contents, split on spaces. +my $srvfile_valid = "$td/pg_service_valid.conf"; +append_to_file($srvfile_valid, "[my_srv]"); +append_to_file($srvfile_valid, $newline); +append_to_file($srvfile_valid, "ldap://localhost:"); +append_to_file($srvfile_valid, $ldap_port); +append_to_file($srvfile_valid, + "/dc=example,dc=net?description?one?(cn=mydatabase)"); + +# File defined with no contents, used as default value for +# PGSERVICEFILE, so that no lookup is attempted in the user's home +# directory. +my $srvfile_empty = "$td/pg_service_empty.conf"; +append_to_file($srvfile_empty, ''); + +# Default service file in PGSYSCONFDIR. +my $srvfile_default = "$td/pg_service.conf"; + +# Missing service file. +my $srvfile_missing = "$td/pg_service_missing.conf"; + +# Set the fallback directory lookup of the service file to the +# temporary directory of this test. PGSYSCONFDIR is used if the +# service file defined in PGSERVICEFILE cannot be found, or when a +# service file is found but not the service name. +local $ENV{PGSYSCONFDIR} = $td; + +# Force PGSERVICEFILE to a default location, so as this test never +# tries to look at a home directory. This value needs to remain at +# the top of this script before running any tests, and should never be +# changed. +local $ENV{PGSERVICEFILE} = "$srvfile_empty"; + +# Checks combinations of service name and a valid service file. +{ + local $ENV{PGSERVICEFILE} = $srvfile_valid; + + $dummy_node->connect_ok( + 'service=my_srv', + 'connection with correct "service" string and PGSERVICEFILE', + sql => "SELECT 'connect1_1'", + expected_stdout => qr/connect1_1/); + + $dummy_node->connect_ok( + 'postgres://?service=my_srv', + 'connection with correct "service" URI and PGSERVICEFILE', + sql => "SELECT 'connect1_2'", + expected_stdout => qr/connect1_2/); + + $dummy_node->connect_fails( + 'service=undefined-service', + 'connection with incorrect "service" string and PGSERVICEFILE', + expected_stderr => + qr/definition of service "undefined-service" not found/); + + local $ENV{PGSERVICE} = 'my_srv'; + + $dummy_node->connect_ok( + '', + 'connection with correct PGSERVICE and PGSERVICEFILE', + sql => "SELECT 'connect1_3'", + expected_stdout => qr/connect1_3/); + + local $ENV{PGSERVICE} = 'undefined-service'; + + $dummy_node->connect_fails( + '', + 'connection with incorrect PGSERVICE and PGSERVICEFILE', + expected_stdout => + qr/definition of service "undefined-service" not found/); +} + +# Checks case of incorrect service file. +{ + local $ENV{PGSERVICEFILE} = $srvfile_missing; + + $dummy_node->connect_fails( + 'service=my_srv', + 'connection with correct "service" string and incorrect PGSERVICEFILE', + expected_stderr => + qr/service file ".*pg_service_missing.conf" not found/); +} + +# Checks case of service file named "pg_service.conf" in PGSYSCONFDIR. +{ + # Create copy of valid file + my $srvfile_default = "$td/pg_service.conf"; + copy($srvfile_valid, $srvfile_default); + + $dummy_node->connect_ok( + 'service=my_srv', + 'connection with correct "service" string and pg_service.conf', + sql => "SELECT 'connect2_1'", + expected_stdout => qr/connect2_1/); + + $dummy_node->connect_ok( + 'postgres://?service=my_srv', + 'connection with correct "service" URI and default pg_service.conf', + sql => "SELECT 'connect2_2'", + expected_stdout => qr/connect2_2/); + + $dummy_node->connect_fails( + 'service=undefined-service', + 'connection with incorrect "service" string and default pg_service.conf', + expected_stderr => + qr/definition of service "undefined-service" not found/); + + local $ENV{PGSERVICE} = 'my_srv'; + + $dummy_node->connect_ok( + '', + 'connection with correct PGSERVICE and default pg_service.conf', + sql => "SELECT 'connect2_3'", + expected_stdout => qr/connect2_3/); + + local $ENV{PGSERVICE} = 'undefined-service'; + + $dummy_node->connect_fails( + '', + 'connection with incorrect PGSERVICE and default pg_service.conf', + expected_stdout => + qr/definition of service "undefined-service" not found/); + + # Remove default pg_service.conf. + unlink($srvfile_default); +} + +$node->teardown_node; + +done_testing(); diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile index 7d3d3d52b45e9..903a8ac151aa1 100644 --- a/src/test/modules/Makefile +++ b/src/test/modules/Makefile @@ -25,6 +25,7 @@ SUBDIRS = \ test_escape \ test_extensions \ test_ginpostinglist \ + test_int128 \ test_integerset \ test_json_parser \ test_lfind \ diff --git a/src/test/modules/libpq_pipeline/libpq_pipeline.c b/src/test/modules/libpq_pipeline/libpq_pipeline.c index 9a3c0236325c6..b3af70fa09bf8 100644 --- a/src/test/modules/libpq_pipeline/libpq_pipeline.c +++ b/src/test/modules/libpq_pipeline/libpq_pipeline.c @@ -88,20 +88,67 @@ pg_fatal_impl(int line, const char *fmt,...) } /* - * Check that the query on the given connection got canceled. + * Check that libpq next returns a PGresult with the specified status, + * returning the PGresult so that caller can perform additional checks. */ -#define confirm_query_canceled(conn) confirm_query_canceled_impl(__LINE__, conn) -static void -confirm_query_canceled_impl(int line, PGconn *conn) +#define confirm_result_status(conn, status) confirm_result_status_impl(__LINE__, conn, status) +static PGresult * +confirm_result_status_impl(int line, PGconn *conn, ExecStatusType status) { - PGresult *res = NULL; + PGresult *res; res = PQgetResult(conn); if (res == NULL) - pg_fatal_impl(line, "PQgetResult returned null: %s", + pg_fatal_impl(line, "PQgetResult returned null unexpectedly: %s", PQerrorMessage(conn)); - if (PQresultStatus(res) != PGRES_FATAL_ERROR) - pg_fatal_impl(line, "query did not fail when it was expected"); + if (PQresultStatus(res) != status) + pg_fatal_impl(line, "PQgetResult returned status %s, expected %s: %s", + PQresStatus(PQresultStatus(res)), + PQresStatus(status), + PQerrorMessage(conn)); + return res; +} + +/* + * Check that libpq next returns a PGresult with the specified status, + * then free the PGresult. + */ +#define consume_result_status(conn, status) consume_result_status_impl(__LINE__, conn, status) +static void +consume_result_status_impl(int line, PGconn *conn, ExecStatusType status) +{ + PGresult *res; + + res = confirm_result_status_impl(line, conn, status); + PQclear(res); +} + +/* + * Check that libpq next returns a null PGresult. + */ +#define consume_null_result(conn) consume_null_result_impl(__LINE__, conn) +static void +consume_null_result_impl(int line, PGconn *conn) +{ + PGresult *res; + + res = PQgetResult(conn); + if (res != NULL) + pg_fatal_impl(line, "expected NULL PGresult, got %s: %s", + PQresStatus(PQresultStatus(res)), + PQerrorMessage(conn)); +} + +/* + * Check that the query on the given connection got canceled. + */ +#define consume_query_cancel(conn) consume_query_cancel_impl(__LINE__, conn) +static void +consume_query_cancel_impl(int line, PGconn *conn) +{ + PGresult *res; + + res = confirm_result_status_impl(line, conn, PGRES_FATAL_ERROR); if (strcmp(PQresultErrorField(res, PG_DIAG_SQLSTATE), "57014") != 0) pg_fatal_impl(line, "query failed with a different error than cancellation: %s", PQerrorMessage(conn)); @@ -234,6 +281,10 @@ copy_connection(PGconn *conn) pg_fatal("Connection to database failed: %s", PQerrorMessage(copyConn)); + pfree(keywords); + pfree(vals); + PQconninfoFree(opts); + return copyConn; } @@ -265,13 +316,13 @@ test_cancel(PGconn *conn) cancel = PQgetCancel(conn); if (!PQcancel(cancel, errorbuf, sizeof(errorbuf))) pg_fatal("failed to run PQcancel: %s", errorbuf); - confirm_query_canceled(conn); + consume_query_cancel(conn); /* PGcancel object can be reused for the next query */ send_cancellable_query(conn, monitorConn); if (!PQcancel(cancel, errorbuf, sizeof(errorbuf))) pg_fatal("failed to run PQcancel: %s", errorbuf); - confirm_query_canceled(conn); + consume_query_cancel(conn); PQfreeCancel(cancel); @@ -279,14 +330,14 @@ test_cancel(PGconn *conn) send_cancellable_query(conn, monitorConn); if (!PQrequestCancel(conn)) pg_fatal("failed to run PQrequestCancel: %s", PQerrorMessage(conn)); - confirm_query_canceled(conn); + consume_query_cancel(conn); /* test PQcancelBlocking */ send_cancellable_query(conn, monitorConn); cancelConn = PQcancelCreate(conn); if (!PQcancelBlocking(cancelConn)) pg_fatal("failed to run PQcancelBlocking: %s", PQcancelErrorMessage(cancelConn)); - confirm_query_canceled(conn); + consume_query_cancel(conn); PQcancelFinish(cancelConn); /* test PQcancelCreate and then polling with PQcancelPoll */ @@ -340,7 +391,7 @@ test_cancel(PGconn *conn) } if (PQcancelStatus(cancelConn) != CONNECTION_OK) pg_fatal("unexpected cancel connection status: %s", PQcancelErrorMessage(cancelConn)); - confirm_query_canceled(conn); + consume_query_cancel(conn); /* * test PQcancelReset works on the cancel connection and it can be reused @@ -397,9 +448,10 @@ test_cancel(PGconn *conn) } if (PQcancelStatus(cancelConn) != CONNECTION_OK) pg_fatal("unexpected cancel connection status: %s", PQcancelErrorMessage(cancelConn)); - confirm_query_canceled(conn); + consume_query_cancel(conn); PQcancelFinish(cancelConn); + PQfinish(monitorConn); fprintf(stderr, "ok\n"); } @@ -428,6 +480,7 @@ test_disallowed_in_pipeline(PGconn *conn) "synchronous command execution functions are not allowed in pipeline mode\n") != 0) pg_fatal("did not get expected error message; got: \"%s\"", PQerrorMessage(conn)); + PQclear(res); /* PQsendQuery should fail in pipeline mode */ if (PQsendQuery(conn, "SELECT 1") != 0) @@ -460,6 +513,7 @@ test_disallowed_in_pipeline(PGconn *conn) if (PQresultStatus(res) != PGRES_TUPLES_OK) pg_fatal("PQexec should succeed after exiting pipeline mode but failed with: %s", PQerrorMessage(conn)); + PQclear(res); fprintf(stderr, "ok\n"); } @@ -467,7 +521,6 @@ test_disallowed_in_pipeline(PGconn *conn) static void test_multi_pipelines(PGconn *conn) { - PGresult *res = NULL; const char *dummy_params[1] = {"1"}; Oid dummy_param_oids[1] = {INT4OID}; @@ -508,87 +561,31 @@ test_multi_pipelines(PGconn *conn) /* OK, start processing the results */ /* first pipeline */ + consume_result_status(conn, PGRES_TUPLES_OK); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null when there's a pipeline item: %s", - PQerrorMessage(conn)); - - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("Unexpected result code %s from first pipeline item", - PQresStatus(PQresultStatus(res))); - PQclear(res); - res = NULL; - - if (PQgetResult(conn) != NULL) - pg_fatal("PQgetResult returned something extra after first result"); + consume_null_result(conn); if (PQexitPipelineMode(conn) != 0) pg_fatal("exiting pipeline mode after query but before sync succeeded incorrectly"); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null when sync result expected: %s", - PQerrorMessage(conn)); - - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("Unexpected result code %s instead of sync result, error: %s", - PQresStatus(PQresultStatus(res)), PQerrorMessage(conn)); - PQclear(res); + consume_result_status(conn, PGRES_PIPELINE_SYNC); /* second pipeline */ + consume_result_status(conn, PGRES_TUPLES_OK); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null when there's a pipeline item: %s", - PQerrorMessage(conn)); - - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("Unexpected result code %s from second pipeline item", - PQresStatus(PQresultStatus(res))); - PQclear(res); - res = NULL; - - if (PQgetResult(conn) != NULL) - pg_fatal("PQgetResult returned something extra after first result"); + consume_null_result(conn); if (PQexitPipelineMode(conn) != 0) pg_fatal("exiting pipeline mode after query but before sync succeeded incorrectly"); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null when sync result expected: %s", - PQerrorMessage(conn)); - - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("Unexpected result code %s instead of sync result, error: %s", - PQresStatus(PQresultStatus(res)), PQerrorMessage(conn)); - PQclear(res); + consume_result_status(conn, PGRES_PIPELINE_SYNC); /* third pipeline */ + consume_result_status(conn, PGRES_TUPLES_OK); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null when there's a pipeline item: %s", - PQerrorMessage(conn)); - - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("Unexpected result code %s from third pipeline item", - PQresStatus(PQresultStatus(res))); - - res = PQgetResult(conn); - if (res != NULL) - pg_fatal("Expected null result, got %s", - PQresStatus(PQresultStatus(res))); + consume_null_result(conn); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null when there's a pipeline item: %s", - PQerrorMessage(conn)); - - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("Unexpected result code %s from second pipeline sync", - PQresStatus(PQresultStatus(res))); + consume_result_status(conn, PGRES_PIPELINE_SYNC); /* We're still in pipeline mode ... */ if (PQpipelineStatus(conn) == PQ_PIPELINE_OFF) @@ -657,36 +654,17 @@ test_nosync(PGconn *conn) /* Now read all results */ for (;;) { - PGresult *res; - - res = PQgetResult(conn); - - /* NULL results are only expected after TUPLES_OK */ - if (res == NULL) - pg_fatal("got unexpected NULL result after %d results", results); - /* We expect exactly one TUPLES_OK result for each query we sent */ - if (PQresultStatus(res) == PGRES_TUPLES_OK) - { - PGresult *res2; - - /* and one NULL result should follow each */ - res2 = PQgetResult(conn); - if (res2 != NULL) - pg_fatal("expected NULL, got %s", - PQresStatus(PQresultStatus(res2))); - PQclear(res); - results++; + consume_result_status(conn, PGRES_TUPLES_OK); - /* if we're done, we're done */ - if (results == numqueries) - break; + /* and one NULL result should follow each */ + consume_null_result(conn); - continue; - } + results++; - /* anything else is unexpected */ - pg_fatal("got unexpected %s\n", PQresStatus(PQresultStatus(res))); + /* if we're done, we're done */ + if (results == numqueries) + break; } fprintf(stderr, "ok\n"); @@ -716,10 +694,12 @@ test_pipeline_abort(PGconn *conn) res = PQexec(conn, drop_table_sql); if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("dispatching DROP TABLE failed: %s", PQerrorMessage(conn)); + PQclear(res); res = PQexec(conn, create_table_sql); if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("dispatching CREATE TABLE failed: %s", PQerrorMessage(conn)); + PQclear(res); /* * Queue up a couple of small pipelines and process each without returning @@ -763,33 +743,16 @@ test_pipeline_abort(PGconn *conn) * a pipeline aborted message for the second insert, a pipeline-end, then * a command-ok and a pipeline-ok for the second pipeline operation. */ - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("Unexpected NULL result: %s", PQerrorMessage(conn)); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - pg_fatal("Unexpected result status %s: %s", - PQresStatus(PQresultStatus(res)), - PQresultErrorMessage(res)); - PQclear(res); + consume_result_status(conn, PGRES_COMMAND_OK); /* NULL result to signal end-of-results for this command */ - if ((res = PQgetResult(conn)) != NULL) - pg_fatal("Expected null result, got %s", - PQresStatus(PQresultStatus(res))); + consume_null_result(conn); /* Second query caused error, so we expect an error next */ - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("Unexpected NULL result: %s", PQerrorMessage(conn)); - if (PQresultStatus(res) != PGRES_FATAL_ERROR) - pg_fatal("Unexpected result code -- expected PGRES_FATAL_ERROR, got %s", - PQresStatus(PQresultStatus(res))); - PQclear(res); + consume_result_status(conn, PGRES_FATAL_ERROR); /* NULL result to signal end-of-results for this command */ - if ((res = PQgetResult(conn)) != NULL) - pg_fatal("Expected null result, got %s", - PQresStatus(PQresultStatus(res))); + consume_null_result(conn); /* * pipeline should now be aborted. @@ -802,17 +765,10 @@ test_pipeline_abort(PGconn *conn) pg_fatal("pipeline should be flagged as aborted but isn't"); /* third query in pipeline, the second insert */ - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("Unexpected NULL result: %s", PQerrorMessage(conn)); - if (PQresultStatus(res) != PGRES_PIPELINE_ABORTED) - pg_fatal("Unexpected result code -- expected PGRES_PIPELINE_ABORTED, got %s", - PQresStatus(PQresultStatus(res))); - PQclear(res); + consume_result_status(conn, PGRES_PIPELINE_ABORTED); /* NULL result to signal end-of-results for this command */ - if ((res = PQgetResult(conn)) != NULL) - pg_fatal("Expected null result, got %s", PQresStatus(PQresultStatus(res))); + consume_null_result(conn); if (PQpipelineStatus(conn) != PQ_PIPELINE_ABORTED) pg_fatal("pipeline should be flagged as aborted but isn't"); @@ -827,14 +783,7 @@ test_pipeline_abort(PGconn *conn) * (This is so clients know to start processing results normally again and * can tell the difference between skipped commands and the sync.) */ - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("Unexpected NULL result: %s", PQerrorMessage(conn)); - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("Unexpected result code from first pipeline sync\n" - "Expected PGRES_PIPELINE_SYNC, got %s", - PQresStatus(PQresultStatus(res))); - PQclear(res); + consume_result_status(conn, PGRES_PIPELINE_SYNC); if (PQpipelineStatus(conn) == PQ_PIPELINE_ABORTED) pg_fatal("sync should've cleared the aborted flag but didn't"); @@ -844,30 +793,16 @@ test_pipeline_abort(PGconn *conn) pg_fatal("Fell out of pipeline mode somehow"); /* the insert from the second pipeline */ - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("Unexpected NULL result: %s", PQerrorMessage(conn)); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - pg_fatal("Unexpected result code %s from first item in second pipeline", - PQresStatus(PQresultStatus(res))); - PQclear(res); + consume_result_status(conn, PGRES_COMMAND_OK); /* Read the NULL result at the end of the command */ - if ((res = PQgetResult(conn)) != NULL) - pg_fatal("Expected null result, got %s", PQresStatus(PQresultStatus(res))); + consume_null_result(conn); /* the second pipeline sync */ - if ((res = PQgetResult(conn)) == NULL) - pg_fatal("Unexpected NULL result: %s", PQerrorMessage(conn)); - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("Unexpected result code %s from second pipeline sync", - PQresStatus(PQresultStatus(res))); - PQclear(res); + consume_result_status(conn, PGRES_PIPELINE_SYNC); - if ((res = PQgetResult(conn)) != NULL) - pg_fatal("Expected null result, got %s: %s", - PQresStatus(PQresultStatus(res)), - PQerrorMessage(conn)); + /* Read the NULL result at the end of the command */ + consume_null_result(conn); /* Try to send two queries in one command */ if (PQsendQueryParams(conn, "SELECT 1; SELECT 2", 0, NULL, NULL, NULL, NULL, 0) != 1) @@ -890,15 +825,14 @@ test_pipeline_abort(PGconn *conn) pg_fatal("got unexpected status %s", PQresStatus(PQresultStatus(res))); break; } + PQclear(res); } if (!goterror) pg_fatal("did not get cannot-insert-multiple-commands error"); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("got NULL result"); - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("Unexpected result code %s from pipeline sync", - PQresStatus(PQresultStatus(res))); + + /* the second pipeline sync */ + consume_result_status(conn, PGRES_PIPELINE_SYNC); + fprintf(stderr, "ok\n"); /* Test single-row mode with an error partways */ @@ -935,13 +869,9 @@ test_pipeline_abort(PGconn *conn) pg_fatal("did not get division-by-zero error"); if (gotrows != 3) pg_fatal("did not get three rows"); + /* the third pipeline sync */ - if ((res = PQgetResult(conn)) == NULL) - pg_fatal("Unexpected NULL result: %s", PQerrorMessage(conn)); - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("Unexpected result code %s from third pipeline sync", - PQresStatus(PQresultStatus(res))); - PQclear(res); + consume_result_status(conn, PGRES_PIPELINE_SYNC); /* We're still in pipeline mode... */ if (PQpipelineStatus(conn) == PQ_PIPELINE_OFF) @@ -1274,21 +1204,11 @@ test_prepared(PGconn *conn) if (PQpipelineSync(conn) != 1) pg_fatal("pipeline sync failed: %s", PQerrorMessage(conn)); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null"); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - pg_fatal("expected COMMAND_OK, got %s", PQresStatus(PQresultStatus(res))); - PQclear(res); - res = PQgetResult(conn); - if (res != NULL) - pg_fatal("expected NULL result"); + consume_result_status(conn, PGRES_COMMAND_OK); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned NULL"); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - pg_fatal("expected COMMAND_OK, got %s", PQresStatus(PQresultStatus(res))); + consume_null_result(conn); + + res = confirm_result_status(conn, PGRES_COMMAND_OK); if (PQnfields(res) != lengthof(expected_oids)) pg_fatal("expected %zu columns, got %d", lengthof(expected_oids), PQnfields(res)); @@ -1300,13 +1220,10 @@ test_prepared(PGconn *conn) i, expected_oids[i], typ); } PQclear(res); - res = PQgetResult(conn); - if (res != NULL) - pg_fatal("expected NULL result"); - res = PQgetResult(conn); - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("expected PGRES_PIPELINE_SYNC, got %s", PQresStatus(PQresultStatus(res))); + consume_null_result(conn); + + consume_result_status(conn, PGRES_PIPELINE_SYNC); fprintf(stderr, "closing statement.."); if (PQsendClosePrepared(conn, "select_one") != 1) @@ -1314,18 +1231,11 @@ test_prepared(PGconn *conn) if (PQpipelineSync(conn) != 1) pg_fatal("pipeline sync failed: %s", PQerrorMessage(conn)); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("expected non-NULL result"); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - pg_fatal("expected COMMAND_OK, got %s", PQresStatus(PQresultStatus(res))); - PQclear(res); - res = PQgetResult(conn); - if (res != NULL) - pg_fatal("expected NULL result"); - res = PQgetResult(conn); - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("expected PGRES_PIPELINE_SYNC, got %s", PQresStatus(PQresultStatus(res))); + consume_result_status(conn, PGRES_COMMAND_OK); + + consume_null_result(conn); + + consume_result_status(conn, PGRES_PIPELINE_SYNC); if (PQexitPipelineMode(conn) != 1) pg_fatal("could not exit pipeline mode: %s", PQerrorMessage(conn)); @@ -1334,6 +1244,7 @@ test_prepared(PGconn *conn) res = PQdescribePrepared(conn, "select_one"); if (PQresultStatus(res) != PGRES_FATAL_ERROR) pg_fatal("expected FATAL_ERROR, got %s", PQresStatus(PQresultStatus(res))); + PQclear(res); /* * Also test the blocking close, this should not fail since closing a @@ -1342,32 +1253,36 @@ test_prepared(PGconn *conn) res = PQclosePrepared(conn, "select_one"); if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("expected COMMAND_OK, got %s", PQresStatus(PQresultStatus(res))); + PQclear(res); fprintf(stderr, "creating portal... "); - PQexec(conn, "BEGIN"); - PQexec(conn, "DECLARE cursor_one CURSOR FOR SELECT 1"); + + res = PQexec(conn, "BEGIN"); + if (PQresultStatus(res) != PGRES_COMMAND_OK) + pg_fatal("BEGIN failed: %s", PQerrorMessage(conn)); + PQclear(res); + + res = PQexec(conn, "DECLARE cursor_one CURSOR FOR SELECT 1"); + if (PQresultStatus(res) != PGRES_COMMAND_OK) + pg_fatal("DECLARE CURSOR failed: %s", PQerrorMessage(conn)); + PQclear(res); + PQenterPipelineMode(conn); if (PQsendDescribePortal(conn, "cursor_one") != 1) pg_fatal("PQsendDescribePortal failed: %s", PQerrorMessage(conn)); if (PQpipelineSync(conn) != 1) pg_fatal("pipeline sync failed: %s", PQerrorMessage(conn)); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null"); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - pg_fatal("expected COMMAND_OK, got %s", PQresStatus(PQresultStatus(res))); + res = confirm_result_status(conn, PGRES_COMMAND_OK); typ = PQftype(res, 0); if (typ != INT4OID) pg_fatal("portal: expected type %u, got %u", INT4OID, typ); PQclear(res); - res = PQgetResult(conn); - if (res != NULL) - pg_fatal("expected NULL result"); - res = PQgetResult(conn); - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("expected PGRES_PIPELINE_SYNC, got %s", PQresStatus(PQresultStatus(res))); + + consume_null_result(conn); + + consume_result_status(conn, PGRES_PIPELINE_SYNC); fprintf(stderr, "closing portal... "); if (PQsendClosePortal(conn, "cursor_one") != 1) @@ -1375,18 +1290,11 @@ test_prepared(PGconn *conn) if (PQpipelineSync(conn) != 1) pg_fatal("pipeline sync failed: %s", PQerrorMessage(conn)); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("expected non-NULL result"); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - pg_fatal("expected COMMAND_OK, got %s", PQresStatus(PQresultStatus(res))); - PQclear(res); - res = PQgetResult(conn); - if (res != NULL) - pg_fatal("expected NULL result"); - res = PQgetResult(conn); - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("expected PGRES_PIPELINE_SYNC, got %s", PQresStatus(PQresultStatus(res))); + consume_result_status(conn, PGRES_COMMAND_OK); + + consume_null_result(conn); + + consume_result_status(conn, PGRES_PIPELINE_SYNC); if (PQexitPipelineMode(conn) != 1) pg_fatal("could not exit pipeline mode: %s", PQerrorMessage(conn)); @@ -1395,6 +1303,7 @@ test_prepared(PGconn *conn) res = PQdescribePortal(conn, "cursor_one"); if (PQresultStatus(res) != PGRES_FATAL_ERROR) pg_fatal("expected FATAL_ERROR, got %s", PQresStatus(PQresultStatus(res))); + PQclear(res); /* * Also test the blocking close, this should not fail since closing a @@ -1403,6 +1312,7 @@ test_prepared(PGconn *conn) res = PQclosePortal(conn, "cursor_one"); if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("expected COMMAND_OK, got %s", PQresStatus(PQresultStatus(res))); + PQclear(res); fprintf(stderr, "ok\n"); } @@ -1509,6 +1419,10 @@ test_protocol_version(PGconn *conn) pg_fatal("expected 30002, got %d", protocol_version); PQfinish(conn); + + pfree(keywords); + pfree(vals); + PQconninfoFree(opts); } /* Notice processor: print notices, and count how many we got */ @@ -1525,7 +1439,6 @@ notice_processor(void *arg, const char *message) static void test_pipeline_idle(PGconn *conn) { - PGresult *res; int n_notices = 0; fprintf(stderr, "\npipeline idle...\n"); @@ -1538,17 +1451,11 @@ test_pipeline_idle(PGconn *conn) if (PQsendQueryParams(conn, "SELECT 1", 0, NULL, NULL, NULL, NULL, 0) != 1) pg_fatal("failed to send query: %s", PQerrorMessage(conn)); PQsendFlushRequest(conn); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null when there's a pipeline item: %s", - PQerrorMessage(conn)); - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("unexpected result code %s from first pipeline item", - PQresStatus(PQresultStatus(res))); - PQclear(res); - res = PQgetResult(conn); - if (res != NULL) - pg_fatal("did not receive terminating NULL"); + + consume_result_status(conn, PGRES_TUPLES_OK); + + consume_null_result(conn); + if (PQsendQueryParams(conn, "SELECT 2", 0, NULL, NULL, NULL, NULL, 0) != 1) pg_fatal("failed to send query: %s", PQerrorMessage(conn)); if (PQexitPipelineMode(conn) == 1) @@ -1558,14 +1465,11 @@ test_pipeline_idle(PGconn *conn) pg_fatal("did not get expected error; got: %s", PQerrorMessage(conn)); PQsendFlushRequest(conn); - res = PQgetResult(conn); - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("unexpected result code %s from second pipeline item", - PQresStatus(PQresultStatus(res))); - PQclear(res); - res = PQgetResult(conn); - if (res != NULL) - pg_fatal("did not receive terminating NULL"); + + consume_result_status(conn, PGRES_TUPLES_OK); + + consume_null_result(conn); + if (PQexitPipelineMode(conn) != 1) pg_fatal("exiting pipeline failed: %s", PQerrorMessage(conn)); @@ -1579,11 +1483,9 @@ test_pipeline_idle(PGconn *conn) if (PQsendQueryParams(conn, "SELECT pg_catalog.pg_advisory_unlock(1,1)", 0, NULL, NULL, NULL, NULL, 0) != 1) pg_fatal("failed to send query: %s", PQerrorMessage(conn)); PQsendFlushRequest(conn); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("unexpected NULL result received"); - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("unexpected result code %s", PQresStatus(PQresultStatus(res))); + + consume_result_status(conn, PGRES_TUPLES_OK); + if (PQexitPipelineMode(conn) != 1) pg_fatal("failed to exit pipeline mode: %s", PQerrorMessage(conn)); fprintf(stderr, "ok - 2\n"); @@ -1592,7 +1494,6 @@ test_pipeline_idle(PGconn *conn) static void test_simple_pipeline(PGconn *conn) { - PGresult *res = NULL; const char *dummy_params[1] = {"1"}; Oid dummy_param_oids[1] = {INT4OID}; @@ -1623,20 +1524,9 @@ test_simple_pipeline(PGconn *conn) if (PQpipelineSync(conn) != 1) pg_fatal("pipeline sync failed: %s", PQerrorMessage(conn)); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null when there's a pipeline item: %s", - PQerrorMessage(conn)); - - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("Unexpected result code %s from first pipeline item", - PQresStatus(PQresultStatus(res))); - - PQclear(res); - res = NULL; + consume_result_status(conn, PGRES_TUPLES_OK); - if (PQgetResult(conn) != NULL) - pg_fatal("PQgetResult returned something extra after first query result."); + consume_null_result(conn); /* * Even though we've processed the result there's still a sync to come and @@ -1645,21 +1535,9 @@ test_simple_pipeline(PGconn *conn) if (PQexitPipelineMode(conn) != 0) pg_fatal("exiting pipeline mode after query but before sync succeeded incorrectly"); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("PQgetResult returned null when sync result PGRES_PIPELINE_SYNC expected: %s", - PQerrorMessage(conn)); - - if (PQresultStatus(res) != PGRES_PIPELINE_SYNC) - pg_fatal("Unexpected result code %s instead of PGRES_PIPELINE_SYNC, error: %s", - PQresStatus(PQresultStatus(res)), PQerrorMessage(conn)); - - PQclear(res); - res = NULL; + consume_result_status(conn, PGRES_PIPELINE_SYNC); - if (PQgetResult(conn) != NULL) - pg_fatal("PQgetResult returned something extra after pipeline end: %s", - PQresStatus(PQresultStatus(res))); + consume_null_result(conn); /* We're still in pipeline mode... */ if (PQpipelineStatus(conn) == PQ_PIPELINE_OFF) @@ -1792,20 +1670,12 @@ test_singlerowmode(PGconn *conn) pg_fatal("failed to send flush request"); if (PQsetSingleRowMode(conn) != 1) pg_fatal("PQsetSingleRowMode() failed"); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("unexpected NULL"); - if (PQresultStatus(res) != PGRES_SINGLE_TUPLE) - pg_fatal("Expected PGRES_SINGLE_TUPLE, got %s", - PQresStatus(PQresultStatus(res))); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("unexpected NULL"); - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("Expected PGRES_TUPLES_OK, got %s", - PQresStatus(PQresultStatus(res))); - if (PQgetResult(conn) != NULL) - pg_fatal("expected NULL result"); + + consume_result_status(conn, PGRES_SINGLE_TUPLE); + + consume_result_status(conn, PGRES_TUPLES_OK); + + consume_null_result(conn); if (PQsendQueryParams(conn, "SELECT 1", 0, NULL, NULL, NULL, NULL, 0) != 1) @@ -1813,14 +1683,10 @@ test_singlerowmode(PGconn *conn) PQerrorMessage(conn)); if (PQsendFlushRequest(conn) != 1) pg_fatal("failed to send flush request"); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("unexpected NULL"); - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("Expected PGRES_TUPLES_OK, got %s", - PQresStatus(PQresultStatus(res))); - if (PQgetResult(conn) != NULL) - pg_fatal("expected NULL result"); + + consume_result_status(conn, PGRES_TUPLES_OK); + + consume_null_result(conn); /* * Try chunked mode as well; make sure that it correctly delivers a @@ -1834,33 +1700,23 @@ test_singlerowmode(PGconn *conn) pg_fatal("failed to send flush request"); if (PQsetChunkedRowsMode(conn, 3) != 1) pg_fatal("PQsetChunkedRowsMode() failed"); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("unexpected NULL"); - if (PQresultStatus(res) != PGRES_TUPLES_CHUNK) - pg_fatal("Expected PGRES_TUPLES_CHUNK, got %s: %s", - PQresStatus(PQresultStatus(res)), - PQerrorMessage(conn)); + + res = confirm_result_status(conn, PGRES_TUPLES_CHUNK); if (PQntuples(res) != 3) pg_fatal("Expected 3 rows, got %d", PQntuples(res)); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("unexpected NULL"); - if (PQresultStatus(res) != PGRES_TUPLES_CHUNK) - pg_fatal("Expected PGRES_TUPLES_CHUNK, got %s", - PQresStatus(PQresultStatus(res))); + PQclear(res); + + res = confirm_result_status(conn, PGRES_TUPLES_CHUNK); if (PQntuples(res) != 2) pg_fatal("Expected 2 rows, got %d", PQntuples(res)); - res = PQgetResult(conn); - if (res == NULL) - pg_fatal("unexpected NULL"); - if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("Expected PGRES_TUPLES_OK, got %s", - PQresStatus(PQresultStatus(res))); + PQclear(res); + + res = confirm_result_status(conn, PGRES_TUPLES_OK); if (PQntuples(res) != 0) pg_fatal("Expected 0 rows, got %d", PQntuples(res)); - if (PQgetResult(conn) != NULL) - pg_fatal("expected NULL result"); + PQclear(res); + + consume_null_result(conn); if (PQexitPipelineMode(conn) != 1) pg_fatal("failed to end pipeline mode: %s", PQerrorMessage(conn)); @@ -1995,9 +1851,8 @@ test_transaction(PGconn *conn) if (num_syncs <= 0) break; } - if (PQgetResult(conn) != NULL) - pg_fatal("returned something extra after all the syncs: %s", - PQresStatus(PQresultStatus(res))); + + consume_null_result(conn); if (PQexitPipelineMode(conn) != 1) pg_fatal("failed to end pipeline mode: %s", PQerrorMessage(conn)); @@ -2053,16 +1908,19 @@ test_uniqviol(PGconn *conn) "create table ppln_uniqviol(id bigint primary key, idata bigint)"); if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("failed to create table: %s", PQerrorMessage(conn)); + PQclear(res); res = PQexec(conn, "begin"); if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("failed to begin transaction: %s", PQerrorMessage(conn)); + PQclear(res); res = PQprepare(conn, "insertion", "insert into ppln_uniqviol values ($1, $2) returning id", 2, paramTypes); - if (res == NULL || PQresultStatus(res) != PGRES_COMMAND_OK) + if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("failed to prepare query: %s", PQerrorMessage(conn)); + PQclear(res); if (PQenterPipelineMode(conn) != 1) pg_fatal("failed to enter pipeline mode"); @@ -2191,7 +2049,6 @@ test_uniqviol(PGconn *conn) static bool process_result(PGconn *conn, PGresult *res, int results, int numsent) { - PGresult *res2; bool got_error = false; if (res == NULL) @@ -2203,29 +2060,19 @@ process_result(PGconn *conn, PGresult *res, int results, int numsent) got_error = true; fprintf(stderr, "result %d/%d (error): %s\n", results, numsent, PQerrorMessage(conn)); PQclear(res); - - res2 = PQgetResult(conn); - if (res2 != NULL) - pg_fatal("expected NULL, got %s", - PQresStatus(PQresultStatus(res2))); + consume_null_result(conn); break; case PGRES_TUPLES_OK: fprintf(stderr, "result %d/%d: %s\n", results, numsent, PQgetvalue(res, 0, 0)); PQclear(res); - - res2 = PQgetResult(conn); - if (res2 != NULL) - pg_fatal("expected NULL, got %s", - PQresStatus(PQresultStatus(res2))); + consume_null_result(conn); break; case PGRES_PIPELINE_ABORTED: fprintf(stderr, "result %d/%d: pipeline aborted\n", results, numsent); - res2 = PQgetResult(conn); - if (res2 != NULL) - pg_fatal("expected NULL, got %s", - PQresStatus(PQresultStatus(res2))); + PQclear(res); + consume_null_result(conn); break; default: @@ -2271,7 +2118,7 @@ main(int argc, char **argv) { const char *conninfo = ""; PGconn *conn; - FILE *trace; + FILE *trace = NULL; char *testname; int numrows = 10000; PGresult *res; @@ -2332,9 +2179,11 @@ main(int argc, char **argv) res = PQexec(conn, "SET lc_messages TO \"C\""); if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("failed to set \"lc_messages\": %s", PQerrorMessage(conn)); + PQclear(res); res = PQexec(conn, "SET debug_parallel_query = off"); if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("failed to set \"debug_parallel_query\": %s", PQerrorMessage(conn)); + PQclear(res); /* Set the trace file, if requested */ if (tracefile != NULL) @@ -2388,5 +2237,9 @@ main(int argc, char **argv) /* close the connection to the database and cleanup */ PQfinish(conn); + + if (trace && trace != stdout) + fclose(trace); + return 0; } diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build index dd5cd065ba10c..93be0f57289a8 100644 --- a/src/test/modules/meson.build +++ b/src/test/modules/meson.build @@ -24,6 +24,7 @@ subdir('test_dsm_registry') subdir('test_escape') subdir('test_extensions') subdir('test_ginpostinglist') +subdir('test_int128') subdir('test_integerset') subdir('test_json_parser') subdir('test_lfind') diff --git a/src/test/modules/oauth_validator/t/001_server.pl b/src/test/modules/oauth_validator/t/001_server.pl index 41672ebd5c6dc..c0dafb8be7642 100644 --- a/src/test/modules/oauth_validator/t/001_server.pl +++ b/src/test/modules/oauth_validator/t/001_server.pl @@ -418,6 +418,35 @@ sub connstr qr/failed to obtain access token: mutual TLS required for client \(invalid_client\)/ ); +# Count the number of calls to the internal flow when multiple retries are +# triggered. The exact number depends on many things -- the TCP stack, the +# version of Curl in use, random chance -- but a ridiculously high number +# suggests something is wrong with our ability to clear multiplexer events after +# they're no longer applicable. +my ($ret, $stdout, $stderr) = $node->psql( + 'postgres', + "SELECT 'connected for call count'", + extra_params => ['-w'], + connstr => connstr(stage => 'token', retries => 2), + on_error_stop => 0); + +is($ret, 0, "call count connection succeeds"); +like( + $stderr, + qr@Visit https://example\.com/ and enter the code: postgresuser@, + "call count: stderr matches"); + +my $count_pattern = qr/\[libpq\] total number of polls: (\d+)/; +if (like($stderr, $count_pattern, "call count: count is printed")) +{ + # For reference, a typical flow with two retries might take between 5-15 + # calls to the client implementation. And while this will probably continue + # to change across OSes and Curl updates, we're likely in trouble if we see + # hundreds or thousands of calls. + $stderr =~ $count_pattern; + cmp_ok($1, '<', 100, "call count is reasonably small"); +} + # Stress test: make sure our builtin flow operates correctly even if the client # application isn't respecting PGRES_POLLING_READING/WRITING signals returned # from PQconnectPoll(). @@ -428,7 +457,7 @@ sub connstr connstr(stage => 'all', retries => 1, interval => 1)); note "running '" . join("' '", @cmd) . "'"; -my ($stdout, $stderr) = run_command(\@cmd); +($stdout, $stderr) = run_command(\@cmd); like($stdout, qr/connection succeeded/, "stress-async: stdout matches"); unlike( diff --git a/src/test/modules/test_aio/t/001_aio.pl b/src/test/modules/test_aio/t/001_aio.pl index 82ffffc058f75..3f0453619e896 100644 --- a/src/test/modules/test_aio/t/001_aio.pl +++ b/src/test/modules/test_aio/t/001_aio.pl @@ -396,8 +396,8 @@ sub test_io_error { my $invalid_page_re = $tblname eq 'tbl_corr' - ? qr/invalid page in block 1 of relation base\/\d+\/\d+/ - : qr/invalid page in block 1 of relation base\/\d+\/t\d+_\d+/; + ? qr/invalid page in block 1 of relation "base\/\d+\/\d+/ + : qr/invalid page in block 1 of relation "base\/\d+\/t\d+_\d+/; # verify the error is reported in custom C code psql_like( @@ -798,7 +798,7 @@ sub test_inject "shortened multi-block read detects invalid page", qq(SELECT count(*) FROM tbl_corr WHERE ctid < '(2, 1)'), qr/^$/, - qr/ERROR:.*invalid page in block 1 of relation base\/.*/); + qr/ERROR:.*invalid page in block 1 of relation "base\/.*/); # trigger a hard error, should error out $psql->query_safe( @@ -985,7 +985,7 @@ sub test_zero qq( SELECT read_rel_block_ll('tbl_zero', 0, zero_on_error=>false)), qr/^$/, - qr/^psql::\d+: ERROR: invalid page in block 0 of relation base\/.*\/.*$/ + qr/^psql::\d+: ERROR: invalid page in block 0 of relation "base\/.*\/.*$/ ); # Check that page validity errors are zeroed @@ -996,7 +996,7 @@ sub test_zero qq( SELECT read_rel_block_ll('tbl_zero', 0, zero_on_error=>true)), qr/^$/, - qr/^psql::\d+: WARNING: invalid page in block 0 of relation base\/.*\/.*; zeroing out page$/ + qr/^psql::\d+: WARNING: invalid page in block 0 of relation "base\/.*\/.*"; zeroing out page$/ ); # And that once the corruption is fixed, we can read again @@ -1027,7 +1027,7 @@ sub test_zero "$persistency: test zeroing of invalid block 3", qq(SELECT read_rel_block_ll('tbl_zero', 3, zero_on_error=>true);), qr/^$/, - qr/^psql::\d+: WARNING: invalid page in block 3 of relation base\/.*\/.*; zeroing out page$/ + qr/^psql::\d+: WARNING: invalid page in block 3 of relation "base\/.*\/.*"; zeroing out page$/ ); @@ -1044,7 +1044,7 @@ sub test_zero "$persistency: test reading of invalid block 2,3 in larger read", qq(SELECT read_rel_block_ll('tbl_zero', 1, nblocks=>4, zero_on_error=>false)), qr/^$/, - qr/^psql::\d+: ERROR: 2 invalid pages among blocks 1..4 of relation base\/.*\/.*\nDETAIL: Block 2 held first invalid page\.\nHINT:[^\n]+$/ + qr/^psql::\d+: ERROR: 2 invalid pages among blocks 1..4 of relation "base\/.*\/.*\nDETAIL: Block 2 held the first invalid page\.\nHINT:[^\n]+$/ ); # Then test zeroing via ZERO_ON_ERROR flag @@ -1054,7 +1054,7 @@ sub test_zero "$persistency: test zeroing of invalid block 2,3 in larger read, ZERO_ON_ERROR", qq(SELECT read_rel_block_ll('tbl_zero', 1, nblocks=>4, zero_on_error=>true)), qr/^$/, - qr/^psql::\d+: WARNING: zeroing out 2 invalid pages among blocks 1..4 of relation base\/.*\/.*\nDETAIL: Block 2 held first zeroed page\.\nHINT:[^\n]+$/ + qr/^psql::\d+: WARNING: zeroing out 2 invalid pages among blocks 1..4 of relation "base\/.*\/.*\nDETAIL: Block 2 held the first zeroed page\.\nHINT:[^\n]+$/ ); # Then test zeroing via zero_damaged_pages @@ -1069,7 +1069,7 @@ sub test_zero COMMIT; ), qr/^$/, - qr/^psql::\d+: WARNING: zeroing out 2 invalid pages among blocks 1..4 of relation base\/.*\/.*\nDETAIL: Block 2 held first zeroed page\.\nHINT:[^\n]+$/ + qr/^psql::\d+: WARNING: zeroing out 2 invalid pages among blocks 1..4 of relation "base\/.*\/.*\nDETAIL: Block 2 held the first zeroed page\.\nHINT:[^\n]+$/ ); $psql_a->query_safe(qq(COMMIT)); @@ -1091,7 +1091,7 @@ sub test_zero qq( SELECT count(*) FROM tbl_zero), qr/^$/, - qr/^psql::\d+: ERROR: invalid page in block 2 of relation base\/.*\/.*$/ + qr/^psql::\d+: ERROR: invalid page in block 2 of relation "base\/.*\/.*$/ ); # Verify that bufmgr.c IO zeroes out pages with page validity errors @@ -1106,7 +1106,7 @@ sub test_zero COMMIT; ), qr/^\d+$/, - qr/^psql::\d+: WARNING: invalid page in block 2 of relation base\/.*\/.*$/ + qr/^psql::\d+: WARNING: invalid page in block 2 of relation "base\/.*\/.*$/ ); # Check that warnings/errors about page validity in an IO started by @@ -1192,7 +1192,7 @@ sub test_checksum qq( SELECT read_rel_block_ll('tbl_normal', 3, nblocks=>1, zero_on_error=>false);), qr/^$/, - qr/^psql::\d+: ERROR: invalid page in block 3 of relation base\/\d+\/\d+$/ + qr/^psql::\d+: ERROR: invalid page in block 3 of relation "base\/\d+\/\d+"$/ ); my ($cs_count_after, $cs_ts_after) = @@ -1214,7 +1214,7 @@ sub test_checksum qq( SELECT read_rel_block_ll('tbl_temp', 4, nblocks=>2, zero_on_error=>false);), qr/^$/, - qr/^psql::\d+: ERROR: invalid page in block 4 of relation base\/\d+\/t\d+_\d+$/ + qr/^psql::\d+: ERROR: invalid page in block 4 of relation "base\/\d+\/t\d+_\d+"$/ ); ($cs_count_after, $cs_ts_after) = checksum_failures($psql_a, 'postgres'); @@ -1235,7 +1235,7 @@ sub test_checksum qq( SELECT read_rel_block_ll('pg_shseclabel', 2, nblocks=>2, zero_on_error=>false);), qr/^$/, - qr/^psql::\d+: ERROR: 2 invalid pages among blocks 2..3 of relation global\/\d+\nDETAIL: Block 2 held first invalid page\.\nHINT:[^\n]+$/ + qr/^psql::\d+: ERROR: 2 invalid pages among blocks 2..3 of relation "global\/\d+"\nDETAIL: Block 2 held the first invalid page\.\nHINT:[^\n]+$/ ); ($cs_count_after, $cs_ts_after) = checksum_failures($psql_a); @@ -1300,7 +1300,7 @@ sub test_checksum_createdb "create database w/ wal strategy, invalid source", $createdb_sql, qr/^$/, - qr/psql::\d+: ERROR: invalid page in block 1 of relation base\/\d+\/\d+$/ + qr/psql::\d+: ERROR: invalid page in block 1 of relation "base\/\d+\/\d+"$/ ); my ($cs_count_after, $cs_ts_after) = checksum_failures($psql, 'regression_createdb_source'); @@ -1409,7 +1409,7 @@ sub test_ignore_checksum qq( SELECT read_rel_block_ll('tbl_cs_fail', 2, nblocks=>3, zero_on_error=>false);), qr/^$/, - qr/^psql::\d+: ERROR: invalid page in block 4 of relation base\/\d+\/\d+$/ + qr/^psql::\d+: ERROR: invalid page in block 4 of relation "base\/\d+\/\d+"$/ ); # Test multi-block read with different problems in different blocks @@ -1431,7 +1431,7 @@ sub test_ignore_checksum qq( SELECT read_rel_block_ll('tbl_cs_fail', 1, nblocks=>5, zero_on_error=>true);), qr/^$/, - qr/^psql::\d+: WARNING: zeroing 3 page\(s\) and ignoring 2 checksum failure\(s\) among blocks 1..5 of relation/ + qr/^psql::\d+: WARNING: zeroing 3 page\(s\) and ignoring 2 checksum failure\(s\) among blocks 1..5 of relation "/ ); @@ -1444,17 +1444,17 @@ sub test_ignore_checksum ok(1, "$io_method: found information about checksum failure in block 2"); $node->wait_for_log( - qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/, + qr/LOG: invalid page in block 3 of relation "base.*"; zeroing out page/, $log_location); ok(1, "$io_method: found information about invalid page in block 3"); $node->wait_for_log( - qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/, + qr/LOG: invalid page in block 4 of relation "base.*"; zeroing out page/, $log_location); ok(1, "$io_method: found information about checksum failure in block 4"); $node->wait_for_log( - qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/, + qr/LOG: invalid page in block 5 of relation "base.*"; zeroing out page/, $log_location); ok(1, "$io_method: found information about checksum failure in block 5"); @@ -1473,7 +1473,7 @@ sub test_ignore_checksum qq( SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);), qr/^$/, - qr/^psql::\d+: ERROR: invalid page in block 3 of relation/); + qr/^psql::\d+: ERROR: invalid page in block 3 of relation "/); psql_like( $io_method, @@ -1482,7 +1482,7 @@ sub test_ignore_checksum qq( SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>true);), qr/^$/, - qr/^psql::\d+: WARNING: invalid page in block 3 of relation base\/.*; zeroing out page/ + qr/^psql::\d+: WARNING: invalid page in block 3 of relation "base\/.*"; zeroing out page/ ); diff --git a/src/test/modules/test_ddl_deparse/Makefile b/src/test/modules/test_ddl_deparse/Makefile index 3a57a95c84969..6a9c133ebe970 100644 --- a/src/test/modules/test_ddl_deparse/Makefile +++ b/src/test/modules/test_ddl_deparse/Makefile @@ -13,7 +13,7 @@ REGRESS = test_ddl_deparse \ create_type \ create_conversion \ create_domain \ - create_sequence_1 \ + create_sequence \ create_table \ create_transform \ alter_table \ diff --git a/src/test/modules/test_ddl_deparse/expected/create_sequence_1.out b/src/test/modules/test_ddl_deparse/expected/create_sequence.out similarity index 100% rename from src/test/modules/test_ddl_deparse/expected/create_sequence_1.out rename to src/test/modules/test_ddl_deparse/expected/create_sequence.out diff --git a/src/test/modules/test_ddl_deparse/meson.build b/src/test/modules/test_ddl_deparse/meson.build index bff65ba6333d8..e60aee3b1d3b6 100644 --- a/src/test/modules/test_ddl_deparse/meson.build +++ b/src/test/modules/test_ddl_deparse/meson.build @@ -33,7 +33,7 @@ tests += { 'create_type', 'create_conversion', 'create_domain', - 'create_sequence_1', + 'create_sequence', 'create_table', 'create_transform', 'alter_table', diff --git a/src/test/modules/test_ddl_deparse/sql/create_sequence_1.sql b/src/test/modules/test_ddl_deparse/sql/create_sequence.sql similarity index 100% rename from src/test/modules/test_ddl_deparse/sql/create_sequence_1.sql rename to src/test/modules/test_ddl_deparse/sql/create_sequence.sql diff --git a/src/test/modules/test_dsa/test_dsa.c b/src/test/modules/test_dsa/test_dsa.c index cd24d0f48736d..01d5c6fa67f0e 100644 --- a/src/test/modules/test_dsa/test_dsa.c +++ b/src/test/modules/test_dsa/test_dsa.c @@ -29,8 +29,7 @@ test_dsa_basic(PG_FUNCTION_ARGS) dsa_pointer p[100]; /* XXX: this tranche is leaked */ - tranche_id = LWLockNewTrancheId(); - LWLockRegisterTranche(tranche_id, "test_dsa"); + tranche_id = LWLockNewTrancheId("test_dsa"); a = dsa_create(tranche_id); for (int i = 0; i < 100; i++) @@ -70,8 +69,7 @@ test_dsa_resowners(PG_FUNCTION_ARGS) ResourceOwner childowner; /* XXX: this tranche is leaked */ - tranche_id = LWLockNewTrancheId(); - LWLockRegisterTranche(tranche_id, "test_dsa"); + tranche_id = LWLockNewTrancheId("test_dsa"); /* Create DSA in parent resource owner */ a = dsa_create(tranche_id); diff --git a/src/test/modules/test_dsm_registry/test_dsm_registry.c b/src/test/modules/test_dsm_registry/test_dsm_registry.c index 141c8ed1b34e3..4cc2ccdac3f11 100644 --- a/src/test/modules/test_dsm_registry/test_dsm_registry.c +++ b/src/test/modules/test_dsm_registry/test_dsm_registry.c @@ -48,7 +48,7 @@ init_tdr_dsm(void *ptr) { TestDSMRegistryStruct *dsm = (TestDSMRegistryStruct *) ptr; - LWLockInitialize(&dsm->lck, LWLockNewTrancheId()); + LWLockInitialize(&dsm->lck, LWLockNewTrancheId("test_dsm_registry")); dsm->val = 0; } @@ -61,7 +61,6 @@ tdr_attach_shmem(void) sizeof(TestDSMRegistryStruct), init_tdr_dsm, &found); - LWLockRegisterTranche(tdr_dsm->lck.tranche, "test_dsm_registry"); if (tdr_dsa == NULL) tdr_dsa = GetNamedDSA("test_dsm_registry_dsa", &found); diff --git a/src/test/modules/test_int128/.gitignore b/src/test/modules/test_int128/.gitignore new file mode 100644 index 0000000000000..277fec6ed2cd6 --- /dev/null +++ b/src/test/modules/test_int128/.gitignore @@ -0,0 +1,2 @@ +/tmp_check/ +/test_int128 diff --git a/src/test/modules/test_int128/Makefile b/src/test/modules/test_int128/Makefile new file mode 100644 index 0000000000000..2e86ee93a9d7c --- /dev/null +++ b/src/test/modules/test_int128/Makefile @@ -0,0 +1,23 @@ +# src/test/modules/test_int128/Makefile + +PGFILEDESC = "test_int128 - test 128-bit integer arithmetic" + +PROGRAM = test_int128 +OBJS = $(WIN32RES) test_int128.o + +PG_CPPFLAGS = -I$(libpq_srcdir) +PG_LIBS_INTERNAL += $(libpq_pgport) + +NO_INSTALL = 1 +TAP_TESTS = 1 + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = src/test/modules/test_int128 +top_builddir = ../../../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/src/test/modules/test_int128/meson.build b/src/test/modules/test_int128/meson.build new file mode 100644 index 0000000000000..4c2be7a0326f7 --- /dev/null +++ b/src/test/modules/test_int128/meson.build @@ -0,0 +1,33 @@ +# Copyright (c) 2025, PostgreSQL Global Development Group + +test_int128_sources = files( + 'test_int128.c', +) + +if host_system == 'windows' + test_int128_sources += rc_bin_gen.process(win32ver_rc, extra_args: [ + '--NAME', 'test_int128', + '--FILEDESC', 'test int128 program',]) +endif + +test_int128 = executable('test_int128', + test_int128_sources, + dependencies: [frontend_code, libpq], + kwargs: default_bin_args + { + 'install': false, + }, +) +testprep_targets += test_int128 + + +tests += { + 'name': 'test_int128', + 'sd': meson.current_source_dir(), + 'bd': meson.current_build_dir(), + 'tap': { + 'tests': [ + 't/001_test_int128.pl', + ], + 'deps': [test_int128], + }, +} diff --git a/src/test/modules/test_int128/t/001_test_int128.pl b/src/test/modules/test_int128/t/001_test_int128.pl new file mode 100644 index 0000000000000..0c683869f34e7 --- /dev/null +++ b/src/test/modules/test_int128/t/001_test_int128.pl @@ -0,0 +1,27 @@ +# Copyright (c) 2025, PostgreSQL Global Development Group + +# Test 128-bit integer arithmetic code in int128.h + +use strict; +use warnings FATAL => 'all'; + +use PostgreSQL::Test::Utils; +use Test::More; + +# Run the test program with 1M iterations +my $exe = "test_int128"; +my $size = 1_000_000; + +note "testing executable $exe"; + +my ($stdout, $stderr) = run_command([ $exe, $size ]); + +SKIP: +{ + skip "no native int128 type", 2 if $stdout =~ /skipping tests/; + + is($stdout, "", "test_int128: no stdout"); + is($stderr, "", "test_int128: no stderr"); +} + +done_testing(); diff --git a/src/test/modules/test_int128/test_int128.c b/src/test/modules/test_int128/test_int128.c new file mode 100644 index 0000000000000..c9c17a73a4e85 --- /dev/null +++ b/src/test/modules/test_int128/test_int128.c @@ -0,0 +1,281 @@ +/*------------------------------------------------------------------------- + * + * test_int128.c + * Testbed for roll-our-own 128-bit integer arithmetic. + * + * This is a standalone test program that compares the behavior of an + * implementation in int128.h to an (assumed correct) int128 native type. + * + * Copyright (c) 2017-2025, PostgreSQL Global Development Group + * + * + * IDENTIFICATION + * src/test/modules/test_int128/test_int128.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres_fe.h" + +#include + +/* Require a native int128 type */ +#ifdef HAVE_INT128 + +/* + * By default, we test the non-native implementation in int128.h; but + * by predefining USE_NATIVE_INT128 to 1, you can test the native + * implementation, just to be sure. + */ +#ifndef USE_NATIVE_INT128 +#define USE_NATIVE_INT128 0 +#endif + +#include "common/int128.h" +#include "common/pg_prng.h" + +/* + * We assume the parts of this union are laid out compatibly. + */ +typedef union +{ + int128 i128; + INT128 I128; + struct + { +#ifdef WORDS_BIGENDIAN + int64 hi; + uint64 lo; +#else + uint64 lo; + int64 hi; +#endif + } hl; +} test128; + +#define INT128_HEX_FORMAT "%016" PRIx64 "%016" PRIx64 + +/* + * Control version of comparator. + */ +static inline int +my_int128_compare(int128 x, int128 y) +{ + if (x < y) + return -1; + if (x > y) + return 1; + return 0; +} + +/* + * Main program. + * + * Generates a lot of random numbers and tests the implementation for each. + * The results should be reproducible, since we use a fixed PRNG seed. + * + * You can give a loop count if you don't like the default 1B iterations. + */ +int +main(int argc, char **argv) +{ + long count; + + pg_prng_seed(&pg_global_prng_state, (uint64) time(NULL)); + + if (argc >= 2) + count = strtol(argv[1], NULL, 0); + else + count = 1000000000; + + while (count-- > 0) + { + int64 x = pg_prng_uint64(&pg_global_prng_state); + int64 y = pg_prng_uint64(&pg_global_prng_state); + int64 z = pg_prng_uint64(&pg_global_prng_state); + int64 w = pg_prng_uint64(&pg_global_prng_state); + int32 z32 = (int32) z; + test128 t1; + test128 t2; + test128 t3; + int32 r1; + int32 r2; + + /* check unsigned addition */ + t1.hl.hi = x; + t1.hl.lo = y; + t2 = t1; + t1.i128 += (int128) (uint64) z; + int128_add_uint64(&t2.I128, (uint64) z); + + if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) + { + printf(INT128_HEX_FORMAT " + unsigned %016" PRIx64 "\n", x, y, z); + printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + + /* check signed addition */ + t1.hl.hi = x; + t1.hl.lo = y; + t2 = t1; + t1.i128 += (int128) z; + int128_add_int64(&t2.I128, z); + + if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) + { + printf(INT128_HEX_FORMAT " + signed %016" PRIx64 "\n", x, y, z); + printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + + /* check 128-bit signed addition */ + t1.hl.hi = x; + t1.hl.lo = y; + t2 = t1; + t3.hl.hi = z; + t3.hl.lo = w; + t1.i128 += t3.i128; + int128_add_int128(&t2.I128, t3.I128); + + if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) + { + printf(INT128_HEX_FORMAT " + " INT128_HEX_FORMAT "\n", x, y, z, w); + printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + + /* check unsigned subtraction */ + t1.hl.hi = x; + t1.hl.lo = y; + t2 = t1; + t1.i128 -= (int128) (uint64) z; + int128_sub_uint64(&t2.I128, (uint64) z); + + if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) + { + printf(INT128_HEX_FORMAT " - unsigned %016" PRIx64 "\n", x, y, z); + printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + + /* check signed subtraction */ + t1.hl.hi = x; + t1.hl.lo = y; + t2 = t1; + t1.i128 -= (int128) z; + int128_sub_int64(&t2.I128, z); + + if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) + { + printf(INT128_HEX_FORMAT " - signed %016" PRIx64 "\n", x, y, z); + printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + + /* check 64x64-bit multiply-add */ + t1.hl.hi = x; + t1.hl.lo = y; + t2 = t1; + t1.i128 += (int128) z * (int128) w; + int128_add_int64_mul_int64(&t2.I128, z, w); + + if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) + { + printf(INT128_HEX_FORMAT " + %016" PRIx64 " * %016" PRIx64 "\n", x, y, z, w); + printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + + /* check 64x64-bit multiply-subtract */ + t1.hl.hi = x; + t1.hl.lo = y; + t2 = t1; + t1.i128 -= (int128) z * (int128) w; + int128_sub_int64_mul_int64(&t2.I128, z, w); + + if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) + { + printf(INT128_HEX_FORMAT " - %016" PRIx64 " * %016" PRIx64 "\n", x, y, z, w); + printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + + /* check 128/32-bit division */ + t3.hl.hi = x; + t3.hl.lo = y; + t1.i128 = t3.i128 / z32; + r1 = (int32) (t3.i128 % z32); + t2 = t3; + int128_div_mod_int32(&t2.I128, z32, &r2); + + if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) + { + printf(INT128_HEX_FORMAT " / signed %08X\n", t3.hl.hi, t3.hl.lo, z32); + printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + if (r1 != r2) + { + printf(INT128_HEX_FORMAT " %% signed %08X\n", t3.hl.hi, t3.hl.lo, z32); + printf("native = %08X\n", r1); + printf("result = %08X\n", r2); + return 1; + } + + /* check comparison */ + t1.hl.hi = x; + t1.hl.lo = y; + t2.hl.hi = z; + t2.hl.lo = w; + + if (my_int128_compare(t1.i128, t2.i128) != + int128_compare(t1.I128, t2.I128)) + { + printf("comparison failure: %d vs %d\n", + my_int128_compare(t1.i128, t2.i128), + int128_compare(t1.I128, t2.I128)); + printf("arg1 = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("arg2 = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + + /* check case with identical hi parts; above will hardly ever hit it */ + t2.hl.hi = x; + + if (my_int128_compare(t1.i128, t2.i128) != + int128_compare(t1.I128, t2.I128)) + { + printf("comparison failure: %d vs %d\n", + my_int128_compare(t1.i128, t2.i128), + int128_compare(t1.I128, t2.I128)); + printf("arg1 = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo); + printf("arg2 = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo); + return 1; + } + } + + return 0; +} + +#else /* ! HAVE_INT128 */ + +/* + * For now, do nothing if we don't have a native int128 type. + */ +int +main(int argc, char **argv) +{ + printf("skipping tests: no native int128 type\n"); + return 0; +} + +#endif diff --git a/src/test/modules/test_misc/Makefile b/src/test/modules/test_misc/Makefile index 919a25fc67fd3..399b9094a3880 100644 --- a/src/test/modules/test_misc/Makefile +++ b/src/test/modules/test_misc/Makefile @@ -2,7 +2,8 @@ TAP_TESTS = 1 -EXTRA_INSTALL=src/test/modules/injection_points +EXTRA_INSTALL=src/test/modules/injection_points \ + contrib/test_decoding export enable_injection_points diff --git a/src/test/modules/test_misc/meson.build b/src/test/modules/test_misc/meson.build index 9c50de7efb0f7..6b1e730bf46d0 100644 --- a/src/test/modules/test_misc/meson.build +++ b/src/test/modules/test_misc/meson.build @@ -16,6 +16,7 @@ tests += { 't/005_timeouts.pl', 't/006_signal_autovacuum.pl', 't/007_catcache_inval.pl', + 't/008_replslot_single_user.pl', ], }, } diff --git a/src/test/modules/test_misc/t/008_replslot_single_user.pl b/src/test/modules/test_misc/t/008_replslot_single_user.pl new file mode 100644 index 0000000000000..796700d621f88 --- /dev/null +++ b/src/test/modules/test_misc/t/008_replslot_single_user.pl @@ -0,0 +1,95 @@ +# Copyright (c) 2025, PostgreSQL Global Development Group + +# Test manipulations of replication slots with the single-user mode. + +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; + +# Skip the tests on Windows, as single-user mode would fail on permission +# failure with privileged accounts. +if ($windows_os) +{ + plan skip_all => 'this test is not supported by this platform'; +} + +# Run set of queries in single-user mode. +sub test_single_mode +{ + my ($node, $queries, $testname) = @_; + + my $result = run_log( + [ + 'postgres', '--single', '-F', + '-c' => 'exit_on_error=true', + '-D' => $node->data_dir, + 'postgres' + ], + '<' => \$queries); + + ok($result, $testname); +} + +my $slot_logical = 'slot_logical'; +my $slot_physical = 'slot_physical'; + +# Initialize a node +my $node = PostgreSQL::Test::Cluster->new('node'); +$node->init(allows_streaming => "logical"); +$node->start; + +# Define initial table +$node->safe_psql('postgres', "CREATE TABLE foo (id int)"); + +$node->stop; + +test_single_mode( + $node, + "SELECT pg_create_logical_replication_slot('$slot_logical', 'test_decoding')", + "logical slot creation"); +test_single_mode( + $node, + "SELECT pg_create_physical_replication_slot('$slot_physical', true)", + "physical slot creation"); +test_single_mode( + $node, + "SELECT pg_create_physical_replication_slot('slot_tmp', true, true)", + "temporary physical slot creation"); + +test_single_mode( + $node, qq( +INSERT INTO foo VALUES (1); +SELECT pg_logical_slot_get_changes('$slot_logical', NULL, NULL); +), + "logical decoding"); + +test_single_mode( + $node, + "SELECT pg_replication_slot_advance('$slot_logical', pg_current_wal_lsn())", + "logical slot advance"); +test_single_mode( + $node, + "SELECT pg_replication_slot_advance('$slot_physical', pg_current_wal_lsn())", + "physical slot advance"); + +test_single_mode( + $node, + "SELECT pg_copy_logical_replication_slot('$slot_logical', 'slot_log_copy')", + "logical slot copy"); +test_single_mode( + $node, + "SELECT pg_copy_physical_replication_slot('$slot_physical', 'slot_phy_copy')", + "physical slot copy"); + +test_single_mode( + $node, + "SELECT pg_drop_replication_slot('$slot_logical')", + "logical slot drop"); +test_single_mode( + $node, + "SELECT pg_drop_replication_slot('$slot_physical')", + "physical slot drop"); + +done_testing(); diff --git a/src/test/modules/test_radixtree/test_radixtree.c b/src/test/modules/test_radixtree/test_radixtree.c index 32de6a3123e4d..787162c879330 100644 --- a/src/test/modules/test_radixtree/test_radixtree.c +++ b/src/test/modules/test_radixtree/test_radixtree.c @@ -44,7 +44,7 @@ uint64 _expected = (expected_expr); \ if (_result != _expected) \ elog(ERROR, \ - "%s yielded " UINT64_HEX_FORMAT ", expected " UINT64_HEX_FORMAT " (%s) in file \"%s\" line %u", \ + "%s yielded %" PRIx64 ", expected %" PRIx64 " (%s) in file \"%s\" line %u", \ #result_expr, _result, _expected, #expected_expr, __FILE__, __LINE__); \ } while (0) @@ -124,10 +124,9 @@ test_empty(void) rt_iter *iter; uint64 key; #ifdef TEST_SHARED_RT - int tranche_id = LWLockNewTrancheId(); + int tranche_id = LWLockNewTrancheId("test_radix_tree"); dsa_area *dsa; - LWLockRegisterTranche(tranche_id, "test_radix_tree"); dsa = dsa_create(tranche_id); radixtree = rt_create(dsa, tranche_id); #else @@ -167,10 +166,9 @@ test_basic(rt_node_class_test_elem *test_info, int shift, bool asc) uint64 *keys; int children = test_info->nkeys; #ifdef TEST_SHARED_RT - int tranche_id = LWLockNewTrancheId(); + int tranche_id = LWLockNewTrancheId("test_radix_tree"); dsa_area *dsa; - LWLockRegisterTranche(tranche_id, "test_radix_tree"); dsa = dsa_create(tranche_id); radixtree = rt_create(dsa, tranche_id); #else @@ -304,10 +302,9 @@ test_random(void) int num_keys = 100000; uint64 *keys; #ifdef TEST_SHARED_RT - int tranche_id = LWLockNewTrancheId(); + int tranche_id = LWLockNewTrancheId("test_radix_tree"); dsa_area *dsa; - LWLockRegisterTranche(tranche_id, "test_radix_tree"); dsa = dsa_create(tranche_id); radixtree = rt_create(dsa, tranche_id); #else diff --git a/src/test/modules/test_slru/test_slru.c b/src/test/modules/test_slru/test_slru.c index 32750930e433d..e963466aef1cd 100644 --- a/src/test/modules/test_slru/test_slru.c +++ b/src/test/modules/test_slru/test_slru.c @@ -219,8 +219,8 @@ test_slru_shmem_startup(void) */ const bool long_segment_names = true; const char slru_dir_name[] = "pg_test_slru"; - int test_tranche_id; - int test_buffer_tranche_id; + int test_tranche_id = -1; + int test_buffer_tranche_id = -1; if (prev_shmem_startup_hook) prev_shmem_startup_hook(); @@ -231,12 +231,18 @@ test_slru_shmem_startup(void) */ (void) MakePGDirectory(slru_dir_name); - /* initialize the SLRU facility */ - test_tranche_id = LWLockNewTrancheId(); - LWLockRegisterTranche(test_tranche_id, "test_slru_tranche"); - - test_buffer_tranche_id = LWLockNewTrancheId(); - LWLockRegisterTranche(test_tranche_id, "test_buffer_tranche"); + /* + * Initialize the SLRU facility. In EXEC_BACKEND builds, the + * shmem_startup_hook is called in the postmaster and in each backend, but + * we only need to generate the LWLock tranches once. Note that these + * tranche ID variables are not used by SimpleLruInit() when + * IsUnderPostmaster is true. + */ + if (!IsUnderPostmaster) + { + test_tranche_id = LWLockNewTrancheId("test_slru_tranche"); + test_buffer_tranche_id = LWLockNewTrancheId("test_buffer_tranche"); + } TestSlruCtl->PagePrecedes = test_slru_page_precedes_logically; SimpleLruInit(TestSlruCtl, "TestSLRU", diff --git a/src/test/modules/test_tidstore/test_tidstore.c b/src/test/modules/test_tidstore/test_tidstore.c index eb16e0fbfa647..0c8f43867e55e 100644 --- a/src/test/modules/test_tidstore/test_tidstore.c +++ b/src/test/modules/test_tidstore/test_tidstore.c @@ -103,8 +103,7 @@ test_create(PG_FUNCTION_ARGS) { int tranche_id; - tranche_id = LWLockNewTrancheId(); - LWLockRegisterTranche(tranche_id, "test_tidstore"); + tranche_id = LWLockNewTrancheId("test_tidstore"); tidstore = TidStoreCreateShared(tidstore_max_size, tranche_id); diff --git a/src/test/modules/worker_spi/worker_spi.c b/src/test/modules/worker_spi/worker_spi.c index 9c53d896b6ae5..bea8339f46469 100644 --- a/src/test/modules/worker_spi/worker_spi.c +++ b/src/test/modules/worker_spi/worker_spi.c @@ -30,7 +30,7 @@ /* these headers are used by this particular worker's code */ #include "access/xact.h" -#include "commands/dbcommands.h" +#include "catalog/pg_database.h" #include "executor/spi.h" #include "fmgr.h" #include "lib/stringinfo.h" diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index 61f68e0cc2e51..35413f140198b 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -304,6 +304,7 @@ sub is_alive my $ret = PostgreSQL::Test::Utils::system_log( 'pg_isready', + '--timeout' => $PostgreSQL::Test::Utils::timeout_default, '--host' => $self->host, '--port' => $self->port); diff --git a/src/test/perl/PostgreSQL/Test/Kerberos.pm b/src/test/perl/PostgreSQL/Test/Kerberos.pm index b72dd2fbaf414..07a1ea899d031 100644 --- a/src/test/perl/PostgreSQL/Test/Kerberos.pm +++ b/src/test/perl/PostgreSQL/Test/Kerberos.pm @@ -9,6 +9,7 @@ package PostgreSQL::Test::Kerberos; use strict; use warnings FATAL => 'all'; use PostgreSQL::Test::Utils; +use Test::More; our ( $krb5_bin_dir, $krb5_sbin_dir, $krb5_config, $kinit, diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index debfa635c36fe..4c5af018ee44e 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -228,6 +228,13 @@ 'before-orderly-restart', 'can still write after crash restart'); +# Confirm that the logical replication launcher, a background worker +# without the never-restart flag, has also restarted successfully. +is($node->poll_query_until('postgres', + "SELECT count(*) = 1 FROM pg_stat_activity WHERE backend_type = 'logical replication launcher'"), + '1', + 'logical replication launcher restarted after crash'); + # Just to be sure, check that an orderly restart now still works $node->restart(); diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl index 5d2c06ba06e73..589c79d97d3a9 100644 --- a/src/test/recovery/t/027_stream_regress.pl +++ b/src/test/recovery/t/027_stream_regress.pl @@ -117,6 +117,7 @@ 'pg_dumpall', '--file' => $outputdir . '/primary.dump', '--no-sync', '--no-statistics', + '--restrict-key' => 'test', '--port' => $node_primary->port, '--no-unlogged-table-data', # if unlogged, standby has schema only ], @@ -126,6 +127,7 @@ 'pg_dumpall', '--file' => $outputdir . '/standby.dump', '--no-sync', '--no-statistics', + '--restrict-key' => 'test', '--port' => $node_standby_1->port, ], 'dump standby server'); @@ -145,6 +147,7 @@ '--schema' => 'pg_catalog', '--file' => $outputdir . '/catalogs_primary.dump', '--no-sync', + '--restrict-key' => 'test', '--port', $node_primary->port, '--no-unlogged-table-data', 'regression', @@ -156,6 +159,7 @@ '--schema' => 'pg_catalog', '--file' => $outputdir . '/catalogs_standby.dump', '--no-sync', + '--restrict-key' => 'test', '--port' => $node_standby_1->port, 'regression', ], diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl index 921813483e37c..c9c182892cf84 100644 --- a/src/test/recovery/t/035_standby_logical_decoding.pl +++ b/src/test/recovery/t/035_standby_logical_decoding.pl @@ -8,6 +8,7 @@ use PostgreSQL::Test::Cluster; use PostgreSQL::Test::Utils; +use Time::HiRes qw(usleep); use Test::More; if ($ENV{enable_injection_points} ne 'yes') @@ -623,7 +624,7 @@ sub wait_until_vacuum_can_remove /ERROR: cannot copy invalidated replication slot "vacuum_full_inactiveslot"/, "invalidated slot cannot be copied"); -# Turn hot_standby_feedback back on +# Set hot_standby_feedback to on change_hot_standby_feedback_and_wait_for_xmins(1, 1); ################################################## @@ -754,12 +755,12 @@ sub wait_until_vacuum_can_remove # message should not be issued ok( !$node_standby->log_contains( - "invalidating obsolete slot \"no_conflict_inactiveslot\"", $logstart), + "invalidating obsolete replication slot \"no_conflict_inactiveslot\"", $logstart), 'inactiveslot slot invalidation is not logged with vacuum on conflict_test' ); ok( !$node_standby->log_contains( - "invalidating obsolete slot \"no_conflict_activeslot\"", $logstart), + "invalidating obsolete replication slot \"no_conflict_activeslot\"", $logstart), 'activeslot slot invalidation is not logged with vacuum on conflict_test' ); diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out index 1f1ce2380af68..c35288eecde37 100644 --- a/src/test/regress/expected/aggregates.out +++ b/src/test/regress/expected/aggregates.out @@ -680,6 +680,25 @@ SELECT sum2(q1,q2) FROM int8_tbl; 18271560493827981 (1 row) +-- sanity checks +SELECT sum(q1+q2), sum(q1)+sum(q2) FROM int8_tbl; + sum | ?column? +-------------------+------------------- + 18271560493827981 | 18271560493827981 +(1 row) + +SELECT sum(q1-q2), sum(q2-q1), sum(q1)-sum(q2) FROM int8_tbl; + sum | sum | ?column? +------------------+-------------------+------------------ + 9135780246913245 | -9135780246913245 | 9135780246913245 +(1 row) + +SELECT sum(q1*2000), sum(-q1*2000), 2000*sum(q1) FROM int8_tbl; + sum | sum | ?column? +----------------------+-----------------------+---------------------- + 27407340740741226000 | -27407340740741226000 | 27407340740741226000 +(1 row) + -- test for outer-level aggregates -- this should work select ten, sum(distinct four) from onek a @@ -3379,26 +3398,6 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) ba | 0 | 1 (2 rows) --- Make sure that generation of HashAggregate for uniqification purposes --- does not lead to array overflow due to unexpected duplicate hash keys --- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com -set enable_memoize to off; -explain (costs off) - select 1 from tenk1 - where (hundred, thousand) in (select twothousand, twothousand from onek); - QUERY PLAN -------------------------------------------------------------- - Hash Join - Hash Cond: (tenk1.hundred = onek.twothousand) - -> Seq Scan on tenk1 - Filter: (hundred = thousand) - -> Hash - -> HashAggregate - Group Key: onek.twothousand, onek.twothousand - -> Seq Scan on onek -(8 rows) - -reset enable_memoize; -- -- Hash Aggregation Spill tests -- diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out index 08984dd98f168..b33e06a0d3d51 100644 --- a/src/test/regress/expected/alter_table.out +++ b/src/test/regress/expected/alter_table.out @@ -3567,12 +3567,15 @@ SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment F -- filenode function call can return NULL for a relation dropped concurrently -- with the call's surrounding query, so ignore a NULL mapped_oid for -- relations that no longer exist after all calls finish. +-- Temporary relations are ignored, as not supported by pg_filenode_relation(). CREATE TEMP TABLE filenode_mapping AS SELECT oid, mapped_oid, reltablespace, relfilenode, relname FROM pg_class, pg_filenode_relation(reltablespace, pg_relation_filenode(oid)) AS mapped_oid -WHERE relkind IN ('r', 'i', 'S', 't', 'm') AND mapped_oid IS DISTINCT FROM oid; +WHERE relkind IN ('r', 'i', 'S', 't', 'm') + AND relpersistence != 't' + AND mapped_oid IS DISTINCT FROM oid; SELECT m.* FROM filenode_mapping m LEFT JOIN pg_class c ON c.oid = m.oid WHERE c.oid IS NOT NULL OR m.mapped_oid IS NOT NULL; oid | mapped_oid | reltablespace | relfilenode | relname diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out index caa3c44f0d0ca..f3fdce23459ac 100644 --- a/src/test/regress/expected/copy2.out +++ b/src/test/regress/expected/copy2.out @@ -163,6 +163,7 @@ COPY x TO stdout WHERE a = 1; ERROR: WHERE clause not allowed with COPY TO LINE 1: COPY x TO stdout WHERE a = 1; ^ +HINT: Try the COPY (SELECT ... WHERE ...) TO variant. COPY x from stdin WHERE a = 50004; COPY x from stdin WHERE a > 60003; COPY x from stdin WHERE f > 60003; diff --git a/src/test/regress/expected/create_function_sql.out b/src/test/regress/expected/create_function_sql.out index 963b6f863ff95..da112608d6619 100644 --- a/src/test/regress/expected/create_function_sql.out +++ b/src/test/regress/expected/create_function_sql.out @@ -733,6 +733,22 @@ SELECT double_append(array_append(ARRAY[q1], q2), q3) {4,5,6,4,5,6} (2 rows) +-- Check that we can re-use a SQLFunctionCache after a run-time error. +-- This function will fail with zero-divide at run time (not plan time). +CREATE FUNCTION part_hashint4_error(value int4, seed int8) RETURNS int8 +LANGUAGE SQL STRICT IMMUTABLE PARALLEL SAFE AS +$$ SELECT value + seed + random()::int/0 $$; +-- Put it into an operator class so that FmgrInfo will be cached in relcache. +CREATE OPERATOR CLASS part_test_int4_ops_bad FOR TYPE int4 USING hash AS + FUNCTION 2 part_hashint4_error(int4, int8); +CREATE TABLE pt(i int) PARTITION BY hash (i part_test_int4_ops_bad); +CREATE TABLE p1 PARTITION OF pt FOR VALUES WITH (modulus 4, remainder 0); +INSERT INTO pt VALUES (1); +ERROR: division by zero +CONTEXT: SQL function "part_hashint4_error" statement 1 +INSERT INTO pt VALUES (1); +ERROR: division by zero +CONTEXT: SQL function "part_hashint4_error" statement 1 -- Things that shouldn't work: CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL AS 'SELECT ''not an integer'';'; @@ -773,7 +789,7 @@ CONTEXT: SQL function "test1" during startup RESET check_function_bodies; -- Cleanup DROP SCHEMA temp_func_test CASCADE; -NOTICE: drop cascades to 35 other objects +NOTICE: drop cascades to 38 other objects DETAIL: drop cascades to function functest_a_1(text,date) drop cascades to function functest_a_2(text[]) drop cascades to function functest_a_3() @@ -808,6 +824,9 @@ drop cascades to function create_and_insert() drop cascades to table ddl_test drop cascades to function alter_and_insert() drop cascades to function double_append(anyarray,anyelement) +drop cascades to function part_hashint4_error(integer,bigint) +drop cascades to operator family part_test_int4_ops_bad for access method hash +drop cascades to table pt drop cascades to function test1(anyelement) DROP USER regress_unpriv_user; RESET search_path; diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out index 9ade7b835e69f..98e68e972be05 100644 --- a/src/test/regress/expected/create_index.out +++ b/src/test/regress/expected/create_index.out @@ -1624,8 +1624,8 @@ DROP TABLE cwi_test; -- CREATE TABLE syscol_table (a INT); -- System columns cannot be indexed -CREATE INDEX ON syscolcol_table (ctid); -ERROR: relation "syscolcol_table" does not exist +CREATE INDEX ON syscol_table (ctid); +ERROR: index creation on system columns is not supported -- nor used in expressions CREATE INDEX ON syscol_table ((ctid >= '(1000,0)')); ERROR: index creation on system columns is not supported diff --git a/src/test/regress/expected/create_table.out b/src/test/regress/expected/create_table.out index 76604705a93cc..029beb20aaee3 100644 --- a/src/test/regress/expected/create_table.out +++ b/src/test/regress/expected/create_table.out @@ -102,6 +102,18 @@ ERROR: tables declared WITH OIDS are not supported -- but explicitly not adding oids is still supported CREATE TEMP TABLE withoutoid() WITHOUT OIDS; DROP TABLE withoutoid; CREATE TEMP TABLE withoutoid() WITH (oids = false); DROP TABLE withoutoid; +-- temporary tables are ignored by pg_filenode_relation(). +CREATE TEMP TABLE relation_filenode_check(c1 int); +SELECT relpersistence, + pg_filenode_relation (reltablespace, pg_relation_filenode(oid)) + FROM pg_class + WHERE relname = 'relation_filenode_check'; + relpersistence | pg_filenode_relation +----------------+---------------------- + t | +(1 row) + +DROP TABLE relation_filenode_check; -- check restriction with default expressions -- invalid use of column reference in default expressions CREATE TABLE default_expr_column (id int DEFAULT (id)); diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out index 29a779c2e9072..d3c35c148475d 100644 --- a/src/test/regress/expected/create_table_like.out +++ b/src/test/regress/expected/create_table_like.out @@ -320,6 +320,7 @@ DROP TABLE inhz; -- including storage and comments CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) ENFORCED PRIMARY KEY, b text CHECK (length(b) > 100) NOT ENFORCED); +ALTER TABLE ctlt1 ADD CONSTRAINT cc CHECK (length(b) > 100) NOT VALID; CREATE INDEX ctlt1_b_key ON ctlt1 (b); CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b)); CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1; @@ -378,6 +379,7 @@ SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1); NOTICE: merging column "a" with inherited definition NOTICE: merging column "b" with inherited definition +NOTICE: merging constraint "cc" with inherited definition NOTICE: merging constraint "ctlt1_a_check" with inherited definition NOTICE: merging constraint "ctlt1_b_check" with inherited definition \d+ ctlt1_inh @@ -387,6 +389,7 @@ NOTICE: merging constraint "ctlt1_b_check" with inherited definition a | text | | not null | | main | | A b | text | | | | extended | | B Check constraints: + "cc" CHECK (length(b) > 100) "ctlt1_a_check" CHECK (length(a) > 2) "ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED Not-null constraints: @@ -409,6 +412,7 @@ NOTICE: merging multiple inherited definitions of column "a" b | text | | | | extended | | c | text | | | | external | | Check constraints: + "cc" CHECK (length(b) > 100) "ctlt1_a_check" CHECK (length(a) > 2) "ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED "ctlt3_a_check" CHECK (length(a) < 5) @@ -430,6 +434,7 @@ NOTICE: merging column "a" with inherited definition Indexes: "ctlt13_like_expr_idx" btree ((a || c)) Check constraints: + "cc" CHECK (length(b) > 100) "ctlt1_a_check" CHECK (length(a) > 2) "ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED "ctlt3_a_check" CHECK (length(a) < 5) @@ -456,6 +461,7 @@ Indexes: "ctlt_all_b_idx" btree (b) "ctlt_all_expr_idx" btree ((a || b)) Check constraints: + "cc" CHECK (length(b) > 100) "ctlt1_a_check" CHECK (length(a) > 2) "ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED Statistics objects: @@ -499,6 +505,7 @@ Indexes: "pg_attrdef_b_idx" btree (b) "pg_attrdef_expr_idx" btree ((a || b)) Check constraints: + "cc" CHECK (length(b) > 100) "ctlt1_a_check" CHECK (length(a) > 2) "ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED Statistics objects: @@ -524,6 +531,7 @@ Indexes: "ctlt1_b_idx" btree (b) "ctlt1_expr_idx" btree ((a || b)) Check constraints: + "cc" CHECK (length(b) > 100) "ctlt1_a_check" CHECK (length(a) > 2) "ctlt1_b_check" CHECK (length(b) > 100) NOT ENFORCED Statistics objects: diff --git a/src/test/regress/expected/enum.out b/src/test/regress/expected/enum.out index 4d9f36d0d3677..990ce66c7bb17 100644 --- a/src/test/regress/expected/enum.out +++ b/src/test/regress/expected/enum.out @@ -52,6 +52,9 @@ hint | sql_error_code | 22P02 \x +-- check for duplicate enum entries +CREATE TYPE dup_enum AS ENUM ('foo','bar','foo'); +ERROR: enum label "foo" used more than once -- -- adding new values -- diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out index f9bd252444f53..dc541d61adfa5 100644 --- a/src/test/regress/expected/foreign_key.out +++ b/src/test/regress/expected/foreign_key.out @@ -1750,7 +1750,7 @@ Indexes: Referenced by: TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) --- Check the exsting FK trigger +-- Check the existing FK trigger SELECT conname, tgrelid::regclass as tgrel, regexp_replace(tgname, '[0-9]+', 'N') as tgname, tgtype FROM pg_trigger t JOIN pg_constraint c ON (t.tgconstraint = c.oid) WHERE tgrelid IN (SELECT relid FROM pg_partition_tree('fk_partitioned_fk'::regclass) diff --git a/src/test/regress/expected/indexing.out b/src/test/regress/expected/indexing.out index bcf1db11d731d..4d29fb85293e0 100644 --- a/src/test/regress/expected/indexing.out +++ b/src/test/regress/expected/indexing.out @@ -248,7 +248,7 @@ alter index idxpart_a_b_idx attach partition idxpart1_a_b_idx; -- quiet create index idxpart1_2_a_b on idxpart1 (a, b); alter index idxpart_a_b_idx attach partition idxpart1_2_a_b; ERROR: cannot attach index "idxpart1_2_a_b" as a partition of index "idxpart_a_b_idx" -DETAIL: Another index is already attached for partition "idxpart1". +DETAIL: Another index "idxpart1_a_b_idx" is already attached for partition "idxpart1". drop table idxpart; -- make sure everything's gone select indexrelid::regclass, indrelid::regclass diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 4d5d35d07270d..04079268b9861 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -3222,6 +3222,24 @@ where b.unique2 is null; -> Index Only Scan using tenk1_unique2 on tenk1 b (5 rows) +-- check that we avoid de-duplicating columns redundantly +set enable_memoize to off; +explain (costs off) +select 1 from tenk1 +where (hundred, thousand) in (select twothousand, twothousand from onek); + QUERY PLAN +------------------------------------------------- + Hash Join + Hash Cond: (tenk1.hundred = onek.twothousand) + -> Seq Scan on tenk1 + Filter: (hundred = thousand) + -> Hash + -> HashAggregate + Group Key: onek.twothousand + -> Seq Scan on onek +(8 rows) + +reset enable_memoize; -- -- regression test for bogus RTE_GROUP entries -- @@ -6500,6 +6518,128 @@ where t1.a = s.c; ---------- (0 rows) +rollback; +-- check handling of semijoins after join removal: we must suppress +-- unique-ification of known-constant values +begin; +create temp table t (a int unique, b int); +insert into t values (1, 2); +explain (verbose, costs off) +select t1.a from t t1 + left join t t2 on t1.a = t2.a + join t t3 on true +where exists (select 1 from t t4 + join t t5 on t4.b = t5.b + join t t6 on t5.b = t6.b + where t1.a = t4.a and t3.a = t5.a and t4.a = 1); + QUERY PLAN +------------------------------------------------------------------------------------ + Nested Loop + Output: t1.a + Inner Unique: true + -> Nested Loop + Output: t1.a, t5.a + -> Index Only Scan using t_a_key on pg_temp.t t1 + Output: t1.a + Index Cond: (t1.a = 1) + -> HashAggregate + Output: t5.a + Group Key: t5.a + -> Hash Join + Output: t5.a + Hash Cond: (t6.b = t4.b) + -> Seq Scan on pg_temp.t t6 + Output: t6.a, t6.b + -> Hash + Output: t4.b, t5.b, t5.a + -> Hash Join + Output: t4.b, t5.b, t5.a + Inner Unique: true + Hash Cond: (t5.b = t4.b) + -> Seq Scan on pg_temp.t t5 + Output: t5.a, t5.b + -> Hash + Output: t4.b, t4.a + -> Index Scan using t_a_key on pg_temp.t t4 + Output: t4.b, t4.a + Index Cond: (t4.a = 1) + -> Index Only Scan using t_a_key on pg_temp.t t3 + Output: t3.a + Index Cond: (t3.a = t5.a) +(32 rows) + +select t1.a from t t1 + left join t t2 on t1.a = t2.a + join t t3 on true +where exists (select 1 from t t4 + join t t5 on t4.b = t5.b + join t t6 on t5.b = t6.b + where t1.a = t4.a and t3.a = t5.a and t4.a = 1); + a +--- + 1 +(1 row) + +rollback; +-- check handling of semijoins if all RHS columns are equated to constants: we +-- should suppress unique-ification in this case. +begin; +create temp table t (a int, b int); +insert into t values (1, 2); +explain (costs off) +select * from t t1, t t2 where exists + (select 1 from t t3 where t1.a = t3.a and t2.b = t3.b and t3.a = 1 and t3.b = 2); + QUERY PLAN +--------------------------------------------- + Nested Loop Semi Join + -> Nested Loop + -> Seq Scan on t t1 + Filter: (a = 1) + -> Materialize + -> Seq Scan on t t2 + Filter: (b = 2) + -> Materialize + -> Seq Scan on t t3 + Filter: ((a = 1) AND (b = 2)) +(10 rows) + +select * from t t1, t t2 where exists + (select 1 from t t3 where t1.a = t3.a and t2.b = t3.b and t3.a = 1 and t3.b = 2); + a | b | a | b +---+---+---+--- + 1 | 2 | 1 | 2 +(1 row) + +rollback; +-- check handling of semijoin unique-ification for child relations if all RHS +-- columns are equated to constants. +begin; +create temp table p (a int, b int) partition by range (a); +create temp table p1 partition of p for values from (0) to (10); +create temp table p2 partition of p for values from (10) to (20); +insert into p values (1, 2); +insert into p values (10, 20); +set enable_partitionwise_join to on; +explain (costs off) +select * from p t1 where exists + (select 1 from p t2 where t1.a = t2.a and t1.a = 1); + QUERY PLAN +------------------------------- + Nested Loop Semi Join + -> Seq Scan on p1 t1 + Filter: (a = 1) + -> Materialize + -> Seq Scan on p1 t2 + Filter: (a = 1) +(6 rows) + +select * from p t1 where exists + (select 1 from p t2 where t1.a = t2.a and t1.a = 1); + a | b +---+--- + 1 | 2 +(1 row) + rollback; -- test cases where we can remove a join, but not a PHV computed at it begin; @@ -9468,23 +9608,20 @@ where exists (select 1 from tenk1 t3 --------------------------------------------------------------------------------- Nested Loop Output: t1.unique1, t2.hundred - -> Hash Join + -> Merge Join Output: t1.unique1, t3.tenthous - Hash Cond: (t3.thousand = t1.unique1) - -> HashAggregate + Merge Cond: (t3.thousand = t1.unique1) + -> Unique Output: t3.thousand, t3.tenthous - Group Key: t3.thousand, t3.tenthous -> Index Only Scan using tenk1_thous_tenthous on public.tenk1 t3 Output: t3.thousand, t3.tenthous - -> Hash + -> Index Only Scan using onek_unique1 on public.onek t1 Output: t1.unique1 - -> Index Only Scan using onek_unique1 on public.onek t1 - Output: t1.unique1 - Index Cond: (t1.unique1 < 1) + Index Cond: (t1.unique1 < 1) -> Index Only Scan using tenk1_hundred on public.tenk1 t2 Output: t2.hundred Index Cond: (t2.hundred = t3.tenthous) -(18 rows) +(15 rows) -- ... unless it actually is unique create table j3 as select unique1, tenthous from onek; diff --git a/src/test/regress/expected/memoize.out b/src/test/regress/expected/memoize.out index 150dc1b44cf62..fbcaf113266c5 100644 --- a/src/test/regress/expected/memoize.out +++ b/src/test/regress/expected/memoize.out @@ -545,15 +545,15 @@ EXPLAIN (COSTS OFF) SELECT * FROM tab_anti t1 WHERE t1.a IN (SELECT a FROM tab_anti t2 WHERE t2.b IN (SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0)); - QUERY PLAN -------------------------------------------------- + QUERY PLAN +--------------------------------------------------- Nested Loop Semi Join -> Seq Scan on tab_anti t1 -> Nested Loop Semi Join Join Filter: (t1.a = t2.a) -> Seq Scan on tab_anti t2 - -> Subquery Scan on "ANY_subquery" - Filter: (t2.b = "ANY_subquery".b) + -> Subquery Scan on unnamed_subquery + Filter: (t2.b = unnamed_subquery.b) -> Result One-Time Filter: (t2.a > 1) -> Seq Scan on tab_anti t3 diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out index d5368186caa9f..24e06845f921e 100644 --- a/src/test/regress/expected/partition_join.out +++ b/src/test/regress/expected/partition_join.out @@ -1134,48 +1134,50 @@ EXPLAIN (COSTS OFF) SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; QUERY PLAN --------------------------------------------------------------------------------- - Sort + Merge Append Sort Key: t1.a - -> Append - -> Nested Loop - Join Filter: (t1_2.a = t1_5.b) - -> HashAggregate - Group Key: t1_5.b + -> Nested Loop + Join Filter: (t1_2.a = t1_5.b) + -> Unique + -> Sort + Sort Key: t1_5.b -> Hash Join Hash Cond: (((t2_1.a + t2_1.b) / 2) = t1_5.b) -> Seq Scan on prt1_e_p1 t2_1 -> Hash -> Seq Scan on prt2_p1 t1_5 Filter: (a = 0) - -> Index Scan using iprt1_p1_a on prt1_p1 t1_2 - Index Cond: (a = ((t2_1.a + t2_1.b) / 2)) - Filter: (b = 0) - -> Nested Loop - Join Filter: (t1_3.a = t1_6.b) - -> HashAggregate - Group Key: t1_6.b + -> Index Scan using iprt1_p1_a on prt1_p1 t1_2 + Index Cond: (a = ((t2_1.a + t2_1.b) / 2)) + Filter: (b = 0) + -> Nested Loop + Join Filter: (t1_3.a = t1_6.b) + -> Unique + -> Sort + Sort Key: t1_6.b -> Hash Join Hash Cond: (((t2_2.a + t2_2.b) / 2) = t1_6.b) -> Seq Scan on prt1_e_p2 t2_2 -> Hash -> Seq Scan on prt2_p2 t1_6 Filter: (a = 0) - -> Index Scan using iprt1_p2_a on prt1_p2 t1_3 - Index Cond: (a = ((t2_2.a + t2_2.b) / 2)) - Filter: (b = 0) - -> Nested Loop - Join Filter: (t1_4.a = t1_7.b) - -> HashAggregate - Group Key: t1_7.b + -> Index Scan using iprt1_p2_a on prt1_p2 t1_3 + Index Cond: (a = ((t2_2.a + t2_2.b) / 2)) + Filter: (b = 0) + -> Nested Loop + Join Filter: (t1_4.a = t1_7.b) + -> Unique + -> Sort + Sort Key: t1_7.b -> Nested Loop -> Seq Scan on prt2_p3 t1_7 Filter: (a = 0) -> Index Scan using iprt1_e_p3_ab2 on prt1_e_p3 t2_3 Index Cond: (((a + b) / 2) = t1_7.b) - -> Index Scan using iprt1_p3_a on prt1_p3 t1_4 - Index Cond: (a = ((t2_3.a + t2_3.b) / 2)) - Filter: (b = 0) -(41 rows) + -> Index Scan using iprt1_p3_a on prt1_p3 t1_4 + Index Cond: (a = ((t2_3.a + t2_3.b) / 2)) + Filter: (b = 0) +(43 rows) SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; a | b | c @@ -1190,46 +1192,48 @@ EXPLAIN (COSTS OFF) SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; QUERY PLAN --------------------------------------------------------------------------- - Sort + Merge Append Sort Key: t1.a - -> Append - -> Nested Loop - -> HashAggregate - Group Key: t1_6.b + -> Nested Loop + -> Unique + -> Sort + Sort Key: t1_6.b -> Hash Semi Join Hash Cond: (t1_6.b = ((t1_9.a + t1_9.b) / 2)) -> Seq Scan on prt2_p1 t1_6 -> Hash -> Seq Scan on prt1_e_p1 t1_9 Filter: (c = 0) - -> Index Scan using iprt1_p1_a on prt1_p1 t1_3 - Index Cond: (a = t1_6.b) - Filter: (b = 0) - -> Nested Loop - -> HashAggregate - Group Key: t1_7.b + -> Index Scan using iprt1_p1_a on prt1_p1 t1_3 + Index Cond: (a = t1_6.b) + Filter: (b = 0) + -> Nested Loop + -> Unique + -> Sort + Sort Key: t1_7.b -> Hash Semi Join Hash Cond: (t1_7.b = ((t1_10.a + t1_10.b) / 2)) -> Seq Scan on prt2_p2 t1_7 -> Hash -> Seq Scan on prt1_e_p2 t1_10 Filter: (c = 0) - -> Index Scan using iprt1_p2_a on prt1_p2 t1_4 - Index Cond: (a = t1_7.b) - Filter: (b = 0) - -> Nested Loop - -> HashAggregate - Group Key: t1_8.b + -> Index Scan using iprt1_p2_a on prt1_p2 t1_4 + Index Cond: (a = t1_7.b) + Filter: (b = 0) + -> Nested Loop + -> Unique + -> Sort + Sort Key: t1_8.b -> Hash Semi Join Hash Cond: (t1_8.b = ((t1_11.a + t1_11.b) / 2)) -> Seq Scan on prt2_p3 t1_8 -> Hash -> Seq Scan on prt1_e_p3 t1_11 Filter: (c = 0) - -> Index Scan using iprt1_p3_a on prt1_p3 t1_5 - Index Cond: (a = t1_8.b) - Filter: (b = 0) -(39 rows) + -> Index Scan using iprt1_p3_a on prt1_p3 t1_5 + Index Cond: (a = t1_8.b) + Filter: (b = 0) +(41 rows) SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; a | b | c diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out index d1966cd7d829f..68ecd95180920 100644 --- a/src/test/regress/expected/partition_prune.out +++ b/src/test/regress/expected/partition_prune.out @@ -4763,7 +4763,7 @@ select min(a) over (partition by a order by a) from part_abc where a >= stable_o QUERY PLAN ---------------------------------------------------------------------------------------------- Append - -> Subquery Scan on "*SELECT* 1_1" + -> Subquery Scan on unnamed_subquery_2 -> WindowAgg Window: w1 AS (PARTITION BY part_abc.a ORDER BY part_abc.a) -> Append @@ -4780,7 +4780,7 @@ select min(a) over (partition by a order by a) from part_abc where a >= stable_o -> Index Scan using part_abc_3_2_a_idx on part_abc_3_2 part_abc_4 Index Cond: (a >= (stable_one() + 1)) Filter: (d <= stable_one()) - -> Subquery Scan on "*SELECT* 2" + -> Subquery Scan on unnamed_subquery_1 -> WindowAgg Window: w1 AS (PARTITION BY part_abc_5.a ORDER BY part_abc_5.a) -> Append diff --git a/src/test/regress/expected/predicate.out b/src/test/regress/expected/predicate.out index 59bfe33bb1ce1..1aff0b59ff874 100644 --- a/src/test/regress/expected/predicate.out +++ b/src/test/regress/expected/predicate.out @@ -409,3 +409,30 @@ SELECT * FROM pred_tab t1 DROP TABLE pred_tab; DROP TABLE pred_tab_notnull; +-- Validate that NullTest quals in constraint expressions are reduced correctly +CREATE TABLE pred_tab1 (a int NOT NULL, b int, + CONSTRAINT check_tab1 CHECK (a IS NULL OR b > 2)); +CREATE TABLE pred_tab2 (a int, b int, + CONSTRAINT check_a CHECK (a IS NOT NULL)); +SET constraint_exclusion TO ON; +-- Ensure that we get a dummy plan +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab1, pred_tab2 WHERE pred_tab2.a IS NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +-- Ensure that we get a dummy plan +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab2, pred_tab1 WHERE pred_tab1.a IS NULL OR pred_tab1.b < 2; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +RESET constraint_exclusion; +DROP TABLE pred_tab1; +DROP TABLE pred_tab2; diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out index 602a6b255bc08..6dcc95ede502c 100644 --- a/src/test/regress/expected/privileges.out +++ b/src/test/regress/expected/privileges.out @@ -513,8 +513,6 @@ CREATE VIEW atest12v AS SELECT * FROM atest12 WHERE b <<< 5; CREATE VIEW atest12sbv WITH (security_barrier=true) AS SELECT * FROM atest12 WHERE b <<< 5; -GRANT SELECT ON atest12v TO PUBLIC; -GRANT SELECT ON atest12sbv TO PUBLIC; -- This plan should use nestloop, knowing that few rows will be selected. EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; QUERY PLAN @@ -560,9 +558,18 @@ CREATE FUNCTION leak2(integer,integer) RETURNS boolean LANGUAGE plpgsql immutable; CREATE OPERATOR >>> (procedure = leak2, leftarg = integer, rightarg = integer, restrict = scalargtsel); --- This should not show any "leak" notices before failing. +-- These should not show any "leak" notices before failing. EXPLAIN (COSTS OFF) SELECT * FROM atest12 WHERE a >>> 0; ERROR: permission denied for table atest12 +EXPLAIN (COSTS OFF) SELECT * FROM atest12v WHERE a >>> 0; +ERROR: permission denied for view atest12v +EXPLAIN (COSTS OFF) SELECT * FROM atest12sbv WHERE a >>> 0; +ERROR: permission denied for view atest12sbv +-- Now regress_priv_user1 grants access to regress_priv_user2 via the views. +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT ON atest12v TO PUBLIC; +GRANT SELECT ON atest12sbv TO PUBLIC; +SET SESSION AUTHORIZATION regress_priv_user2; -- These plans should continue to use a nestloop, since they execute with the -- privileges of the view owner. EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; @@ -3146,6 +3153,11 @@ DROP USER regress_priv_user6; DROP USER regress_priv_user7; DROP USER regress_priv_user8; -- does not exist ERROR: role "regress_priv_user8" does not exist +-- leave some default ACLs for pg_upgrade's dump-restore test input. +ALTER DEFAULT PRIVILEGES FOR ROLE pg_signal_backend + REVOKE USAGE ON TYPES FROM pg_signal_backend; +ALTER DEFAULT PRIVILEGES FOR ROLE pg_read_all_settings + REVOKE USAGE ON TYPES FROM pg_read_all_settings; -- permissions with LOCK TABLE CREATE USER regress_locktable_user; CREATE TABLE lock_table (a int); diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out index 236eba2540e9d..a79325e8a2f79 100644 --- a/src/test/regress/expected/psql.out +++ b/src/test/regress/expected/psql.out @@ -4705,6 +4705,7 @@ invalid command \lo \pset arg1 arg2 \q \reset + \restrict test \s arg1 \sendpipeline \set arg1 arg2 arg3 arg4 arg5 arg6 arg7 @@ -4716,6 +4717,7 @@ invalid command \lo \t arg1 \T arg1 \timing arg1 + \unrestrict not_valid \unset arg1 \w arg1 \watch arg1 arg2 diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index 3a2eacd793f70..895ca87a0dfeb 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -36,6 +36,9 @@ LINE 1: ...pub_xxx WITH (publish_generated_columns = stored, publish_ge... CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = foo); ERROR: invalid value for publication parameter "publish_generated_columns": "foo" DETAIL: Valid values are "none" and "stored". +CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns); +ERROR: invalid value for publication parameter "publish_generated_columns": "" +DETAIL: Valid values are "none" and "stored". \dRp List of publications Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root @@ -1844,8 +1847,7 @@ DROP SCHEMA sch1 cascade; DROP SCHEMA sch2 cascade; -- ====================================================== -- Test the 'publish_generated_columns' parameter with the following values: --- 'stored', 'none', and the default (no value specified), which defaults to --- 'stored'. +-- 'stored', 'none'. SET client_min_messages = 'ERROR'; CREATE PUBLICATION pub1 FOR ALL TABLES WITH (publish_generated_columns = stored); \dRp+ pub1 @@ -1863,17 +1865,8 @@ CREATE PUBLICATION pub2 FOR ALL TABLES WITH (publish_generated_columns = none); regress_publication_user | t | t | t | t | t | none | f (1 row) -CREATE PUBLICATION pub3 FOR ALL TABLES WITH (publish_generated_columns); -\dRp+ pub3 - Publication pub3 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | t | t | t | t | t | stored | f -(1 row) - DROP PUBLICATION pub1; DROP PUBLICATION pub2; -DROP PUBLICATION pub3; -- Test the 'publish_generated_columns' parameter as 'none' and 'stored' for -- different scenarios with/without generated columns in column lists. CREATE TABLE gencols (a int, gen1 int GENERATED ALWAYS AS (a * 2) STORED); @@ -1931,6 +1924,78 @@ DROP PUBLICATION pub1; DROP PUBLICATION pub2; DROP TABLE gencols; RESET client_min_messages; +-- Test that the INSERT ON CONFLICT command correctly checks REPLICA IDENTITY +-- when the target table is published. +CREATE TABLE testpub_insert_onconfl_no_ri (a int unique, b int); +CREATE TABLE testpub_insert_onconfl_parted (a int unique, b int) PARTITION by RANGE (a); +CREATE TABLE testpub_insert_onconfl_part_no_ri PARTITION OF testpub_insert_onconfl_parted FOR VALUES FROM (1) TO (10); +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION pub1 FOR ALL TABLES; +RESET client_min_messages; +-- fail - missing REPLICA IDENTITY +INSERT INTO testpub_insert_onconfl_no_ri VALUES (1, 1) ON CONFLICT (a) DO UPDATE SET b = 2; +ERROR: cannot update table "testpub_insert_onconfl_no_ri" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +-- ok - no updates +INSERT INTO testpub_insert_onconfl_no_ri VALUES (1, 1) ON CONFLICT DO NOTHING; +-- fail - missing REPLICA IDENTITY in partition testpub_insert_onconfl_no_ri +INSERT INTO testpub_insert_onconfl_parted VALUES (1, 1) ON CONFLICT (a) DO UPDATE SET b = 2; +ERROR: cannot update table "testpub_insert_onconfl_part_no_ri" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +-- ok - no updates +INSERT INTO testpub_insert_onconfl_parted VALUES (1, 1) ON CONFLICT DO NOTHING; +DROP PUBLICATION pub1; +DROP TABLE testpub_insert_onconfl_no_ri; +DROP TABLE testpub_insert_onconfl_parted; +-- Test that the MERGE command correctly checks REPLICA IDENTITY when the +-- target table is published. +CREATE TABLE testpub_merge_no_ri (a int, b int); +CREATE TABLE testpub_merge_pk (a int primary key, b int); +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION pub1 FOR ALL TABLES; +RESET client_min_messages; +-- fail - missing REPLICA IDENTITY +MERGE INTO testpub_merge_no_ri USING testpub_merge_pk s ON s.a >= 1 + WHEN MATCHED THEN UPDATE SET b = s.b; +ERROR: cannot update table "testpub_merge_no_ri" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +-- fail - missing REPLICA IDENTITY +MERGE INTO testpub_merge_no_ri USING testpub_merge_pk s ON s.a >= 1 + WHEN MATCHED THEN DELETE; +ERROR: cannot delete from table "testpub_merge_no_ri" because it does not have a replica identity and publishes deletes +HINT: To enable deleting from the table, set REPLICA IDENTITY using ALTER TABLE. +-- ok - insert and do nothing are not restricted +MERGE INTO testpub_merge_no_ri USING testpub_merge_pk s ON s.a >= 1 + WHEN MATCHED THEN DO NOTHING + WHEN NOT MATCHED THEN INSERT (a, b) VALUES (0, 0); +-- ok - REPLICA IDENTITY is DEFAULT and table has a PK +MERGE INTO testpub_merge_pk USING testpub_merge_no_ri s ON s.a >= 1 + WHEN MATCHED AND s.a > 0 THEN UPDATE SET b = s.b + WHEN MATCHED THEN DELETE; +DROP PUBLICATION pub1; +DROP TABLE testpub_merge_no_ri; +DROP TABLE testpub_merge_pk; RESET SESSION AUTHORIZATION; DROP ROLE regress_publication_user, regress_publication_user2; DROP ROLE regress_publication_user_dummy; +-- stage objects for pg_dump tests +CREATE SCHEMA pubme CREATE TABLE t0 (c int, d int) CREATE TABLE t1 (c int); +CREATE SCHEMA pubme2 CREATE TABLE t0 (c int, d int); +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION dump_pub_qual_1ct FOR + TABLE ONLY pubme.t0 (c, d) WHERE (c > 0); +CREATE PUBLICATION dump_pub_qual_2ct FOR + TABLE ONLY pubme.t0 (c) WHERE (c > 0), + TABLE ONLY pubme.t1 (c); +CREATE PUBLICATION dump_pub_nsp_1ct FOR + TABLES IN SCHEMA pubme; +CREATE PUBLICATION dump_pub_nsp_2ct FOR + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2; +CREATE PUBLICATION dump_pub_all FOR + TABLE ONLY pubme.t0, + TABLE ONLY pubme.t1 WHERE (c < 0), + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2 + WITH (publish_via_partition_root = true); +RESET client_min_messages; diff --git a/src/test/regress/expected/random.out b/src/test/regress/expected/random.out index 43cf88a36341b..7f17b2a1b12f8 100644 --- a/src/test/regress/expected/random.out +++ b/src/test/regress/expected/random.out @@ -536,3 +536,90 @@ SELECT n, random(0, trim_scale(abs(1 - 10.0^(-n)))) FROM generate_series(-20, 20 20 | 0.60795101234744211935 (41 rows) +-- random dates +SELECT random('1979-02-08'::date,'2025-07-03'::date) AS random_date_multiple_years; + random_date_multiple_years +---------------------------- + 04-09-1986 +(1 row) + +SELECT random('4714-11-24 BC'::date,'5874897-12-31 AD'::date) AS random_date_maximum_range; + random_date_maximum_range +--------------------------- + 10-02-2898131 +(1 row) + +SELECT random('1979-02-08'::date,'1979-02-08'::date) AS random_date_empty_range; + random_date_empty_range +------------------------- + 02-08-1979 +(1 row) + +SELECT random('2024-12-31'::date, '2024-01-01'::date); -- fail +ERROR: lower bound must be less than or equal to upper bound +SELECT random('-infinity'::date, '2024-01-01'::date); -- fail +ERROR: lower and upper bounds must be finite +SELECT random('2024-12-31'::date, 'infinity'::date); -- fail +ERROR: lower and upper bounds must be finite +-- random timestamps +SELECT random('1979-02-08'::timestamp,'2025-07-03'::timestamp) AS random_timestamp_multiple_years; + random_timestamp_multiple_years +--------------------------------- + Fri Jan 27 18:52:05.366009 2017 +(1 row) + +SELECT random('4714-11-24 BC'::timestamp,'294276-12-31 23:59:59.999999'::timestamp) AS random_timestamp_maximum_range; + random_timestamp_maximum_range +----------------------------------- + Wed Mar 28 00:45:36.180395 226694 +(1 row) + +SELECT random('2024-07-01 12:00:00.000001'::timestamp, '2024-07-01 12:00:00.999999'::timestamp) AS random_narrow_range; + random_narrow_range +--------------------------------- + Mon Jul 01 12:00:00.999286 2024 +(1 row) + +SELECT random('1979-02-08'::timestamp,'1979-02-08'::timestamp) AS random_timestamp_empty_range; + random_timestamp_empty_range +------------------------------ + Thu Feb 08 00:00:00 1979 +(1 row) + +SELECT random('2024-12-31'::timestamp, '2024-01-01'::timestamp); -- fail +ERROR: lower bound must be less than or equal to upper bound +SELECT random('-infinity'::timestamp, '2024-01-01'::timestamp); -- fail +ERROR: lower and upper bounds must be finite +SELECT random('2024-12-31'::timestamp, 'infinity'::timestamp); -- fail +ERROR: lower and upper bounds must be finite +-- random timestamps with timezone +SELECT random('1979-02-08 +01'::timestamptz,'2025-07-03 +02'::timestamptz) AS random_timestamptz_multiple_years; + random_timestamptz_multiple_years +------------------------------------- + Tue Jun 14 04:41:16.652896 2016 PDT +(1 row) + +SELECT random('4714-11-24 BC +00'::timestamptz,'294276-12-31 23:59:59.999999 +00'::timestamptz) AS random_timestamptz_maximum_range; + random_timestamptz_maximum_range +-------------------------------------- + Wed Mar 26 14:07:16.980265 31603 PDT +(1 row) + +SELECT random('2024-07-01 12:00:00.000001 +04'::timestamptz, '2024-07-01 12:00:00.999999 +04'::timestamptz) AS random_timestamptz_narrow_range; + random_timestamptz_narrow_range +------------------------------------- + Mon Jul 01 01:00:00.835808 2024 PDT +(1 row) + +SELECT random('1979-02-08 +05'::timestamptz,'1979-02-08 +05'::timestamptz) AS random_timestamptz_empty_range; + random_timestamptz_empty_range +-------------------------------- + Wed Feb 07 11:00:00 1979 PST +(1 row) + +SELECT random('2024-01-01 +06'::timestamptz, '2024-01-01 +07'::timestamptz); -- fail +ERROR: lower bound must be less than or equal to upper bound +SELECT random('-infinity'::timestamptz, '2024-01-01 +07'::timestamptz); -- fail +ERROR: lower and upper bounds must be finite +SELECT random('2024-01-01 +06'::timestamptz, 'infinity'::timestamptz); -- fail +ERROR: lower and upper bounds must be finite diff --git a/src/test/regress/expected/rangefuncs.out b/src/test/regress/expected/rangefuncs.out index c21be83aa4aaf..30241e22da270 100644 --- a/src/test/regress/expected/rangefuncs.out +++ b/src/test/regress/expected/rangefuncs.out @@ -2130,10 +2130,10 @@ select testrngfunc(); explain (verbose, costs off) select * from testrngfunc(); - QUERY PLAN ----------------------------------------------------------- - Subquery Scan on "*SELECT*" - Output: "*SELECT*"."?column?", "*SELECT*"."?column?_1" + QUERY PLAN +---------------------------------------------------------------------- + Subquery Scan on unnamed_subquery + Output: unnamed_subquery."?column?", unnamed_subquery."?column?_1" -> Unique Output: (1), (2) -> Sort diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index 1c4e37d22493d..8c87950931315 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -4506,7 +4506,7 @@ RESET SESSION AUTHORIZATION; DROP VIEW rls_view; DROP TABLE rls_tbl; DROP TABLE ref_tbl; --- Leaky operator test +-- Leaky operator tests CREATE TABLE rls_tbl (a int); INSERT INTO rls_tbl SELECT x/10 FROM generate_series(1, 100) x; ANALYZE rls_tbl; @@ -4530,9 +4530,80 @@ EXPLAIN (COSTS OFF) SELECT * FROM rls_tbl WHERE a <<< 1000 or a <<< 900; One-Time Filter: false (2 rows) +RESET SESSION AUTHORIZATION; +CREATE TABLE rls_child_tbl () INHERITS (rls_tbl); +INSERT INTO rls_child_tbl SELECT x/10 FROM generate_series(1, 100) x; +ANALYZE rls_child_tbl; +CREATE TABLE rls_ptbl (a int) PARTITION BY RANGE (a); +CREATE TABLE rls_part PARTITION OF rls_ptbl FOR VALUES FROM (-100) TO (100); +INSERT INTO rls_ptbl SELECT x/10 FROM generate_series(1, 100) x; +ANALYZE rls_ptbl, rls_part; +ALTER TABLE rls_ptbl ENABLE ROW LEVEL SECURITY; +ALTER TABLE rls_part ENABLE ROW LEVEL SECURITY; +GRANT SELECT ON rls_ptbl TO regress_rls_alice; +GRANT SELECT ON rls_part TO regress_rls_alice; +CREATE POLICY p1 ON rls_tbl USING (a < 0); +CREATE POLICY p2 ON rls_ptbl USING (a < 0); +CREATE POLICY p3 ON rls_part USING (a < 0); +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_tbl WHERE a <<< 1000; + a +--- +(0 rows) + +SELECT * FROM rls_child_tbl WHERE a <<< 1000; +ERROR: permission denied for table rls_child_tbl +SELECT * FROM rls_ptbl WHERE a <<< 1000; + a +--- +(0 rows) + +SELECT * FROM rls_part WHERE a <<< 1000; + a +--- +(0 rows) + +SELECT * FROM (SELECT * FROM rls_tbl UNION ALL + SELECT * FROM rls_tbl) t WHERE a <<< 1000; + a +--- +(0 rows) + +SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL + SELECT * FROM rls_child_tbl) t WHERE a <<< 1000; +ERROR: permission denied for table rls_child_tbl +RESET SESSION AUTHORIZATION; +REVOKE SELECT ON rls_tbl FROM regress_rls_alice; +CREATE VIEW rls_tbl_view AS SELECT * FROM rls_tbl; +ALTER TABLE rls_child_tbl ENABLE ROW LEVEL SECURITY; +GRANT SELECT ON rls_child_tbl TO regress_rls_alice; +CREATE POLICY p4 ON rls_child_tbl USING (a < 0); +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_tbl WHERE a <<< 1000; +ERROR: permission denied for table rls_tbl +SELECT * FROM rls_tbl_view WHERE a <<< 1000; +ERROR: permission denied for view rls_tbl_view +SELECT * FROM rls_child_tbl WHERE a <<< 1000; + a +--- +(0 rows) + +SELECT * FROM (SELECT * FROM rls_tbl UNION ALL + SELECT * FROM rls_tbl) t WHERE a <<< 1000; +ERROR: permission denied for table rls_tbl +SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL + SELECT * FROM rls_child_tbl) t WHERE a <<< 1000; + a +--- +(0 rows) + DROP OPERATOR <<< (int, int); DROP FUNCTION op_leak(int, int); RESET SESSION AUTHORIZATION; +DROP TABLE rls_part; +DROP TABLE rls_ptbl; +DROP TABLE rls_child_tbl; +DROP VIEW rls_tbl_view; DROP TABLE rls_tbl; -- Bug #16006: whole-row Vars in a policy don't play nice with sub-selects SET SESSION AUTHORIZATION regress_rls_alice; diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index dce8c672b40fe..35e8aad7701be 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -1977,7 +1977,12 @@ pg_stat_progress_basebackup| SELECT pid, END AS backup_total, param3 AS backup_streamed, param4 AS tablespaces_total, - param5 AS tablespaces_streamed + param5 AS tablespaces_streamed, + CASE param6 + WHEN 1 THEN 'full'::text + WHEN 2 THEN 'incremental'::text + ELSE NULL::text + END AS backup_type FROM pg_stat_get_progress_info('BASEBACKUP'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20); pg_stat_progress_cluster| SELECT s.pid, s.datid, @@ -2179,13 +2184,14 @@ pg_stat_subscription_stats| SELECT ss.subid, ss.confl_insert_exists, ss.confl_update_origin_differs, ss.confl_update_exists, + ss.confl_update_deleted, ss.confl_update_missing, ss.confl_delete_origin_differs, ss.confl_delete_missing, ss.confl_multiple_unique_conflicts, ss.stats_reset FROM pg_subscription s, - LATERAL pg_stat_get_subscription_stats(s.oid) ss(subid, apply_error_count, sync_error_count, confl_insert_exists, confl_update_origin_differs, confl_update_exists, confl_update_missing, confl_delete_origin_differs, confl_delete_missing, confl_multiple_unique_conflicts, stats_reset); + LATERAL pg_stat_get_subscription_stats(s.oid) ss(subid, apply_error_count, sync_error_count, confl_insert_exists, confl_update_origin_differs, confl_update_exists, confl_update_deleted, confl_update_missing, confl_delete_origin_differs, confl_delete_missing, confl_multiple_unique_conflicts, stats_reset); pg_stat_sys_indexes| SELECT relid, indexrelid, schemaname, diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out index 6359e5fb689cb..a1f83b58b2398 100644 --- a/src/test/regress/expected/stats_ext.out +++ b/src/test/regress/expected/stats_ext.out @@ -54,6 +54,32 @@ CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), y FROM ext_stats_test; ERROR: duplicate expression in statistics definition CREATE STATISTICS tst (unrecognized) ON x, y FROM ext_stats_test; ERROR: unrecognized statistics kind "unrecognized" +-- unsupported targets +CREATE STATISTICS tst ON a FROM (VALUES (x)) AS foo; +ERROR: cannot create statistics on the specified relation +DETAIL: CREATE STATISTICS only supports tables, foreign tables and materialized views. +CREATE STATISTICS tst ON a FROM foo NATURAL JOIN bar; +ERROR: cannot create statistics on the specified relation +DETAIL: CREATE STATISTICS only supports tables, foreign tables and materialized views. +CREATE STATISTICS tst ON a FROM (SELECT * FROM ext_stats_test) AS foo; +ERROR: cannot create statistics on the specified relation +DETAIL: CREATE STATISTICS only supports tables, foreign tables and materialized views. +CREATE STATISTICS tst ON a FROM ext_stats_test s TABLESAMPLE system (x); +ERROR: cannot create statistics on the specified relation +DETAIL: CREATE STATISTICS only supports tables, foreign tables and materialized views. +CREATE STATISTICS tst ON a FROM XMLTABLE('foo' PASSING 'bar' COLUMNS a text); +ERROR: cannot create statistics on the specified relation +DETAIL: CREATE STATISTICS only supports tables, foreign tables and materialized views. +CREATE STATISTICS tst ON a FROM JSON_TABLE(jsonb '123', '$' COLUMNS (item int)); +ERROR: cannot create statistics on the specified relation +DETAIL: CREATE STATISTICS only supports tables, foreign tables and materialized views. +CREATE FUNCTION tftest(int) returns table(a int, b int) as $$ +SELECT $1, $1+i FROM generate_series(1,5) g(i); +$$ LANGUAGE sql IMMUTABLE STRICT; +CREATE STATISTICS alt_stat2 ON a FROM tftest(1); +ERROR: cannot create statistics on the specified relation +DETAIL: CREATE STATISTICS only supports tables, foreign tables and materialized views. +DROP FUNCTION tftest; -- incorrect expressions CREATE STATISTICS tst ON (y) FROM ext_stats_test; -- single column reference ERROR: extended statistics require at least 2 columns @@ -3275,9 +3301,17 @@ CREATE FUNCTION op_leak(int, int) RETURNS bool LANGUAGE plpgsql; CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, restrict = scalarltsel); +CREATE FUNCTION op_leak(record, record) RETURNS bool + AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' + LANGUAGE plpgsql; +CREATE OPERATOR <<< (procedure = op_leak, leftarg = record, rightarg = record, + restrict = scalarltsel); SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied ERROR: permission denied for table priv_test_tbl -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; +SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied +ERROR: permission denied for table priv_test_tbl +SELECT * FROM tststats.priv_test_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied ERROR: permission denied for table priv_test_tbl DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied ERROR: permission denied for table priv_test_tbl @@ -3298,10 +3332,17 @@ SELECT * FROM tststats.priv_test_view WHERE a <<< 0 OR b <<< 0; -- Should not le ---+--- (0 rows) +SELECT * FROM tststats.priv_test_view t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak + a | b +---+--- +(0 rows) + DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak -- Grant table access, but hide all data with RLS RESET SESSION AUTHORIZATION; ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY; +CREATE POLICY priv_test_tbl_pol ON tststats.priv_test_tbl USING (2 * a < 0); GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1; -- Should now have direct table access, but see nothing and leak nothing SET SESSION AUTHORIZATION regress_stats_user1; @@ -3310,12 +3351,57 @@ SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not le ---+--- (0 rows) -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; +SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak + a | b +---+--- +(0 rows) + +SELECT * FROM tststats.priv_test_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak a | b ---+--- (0 rows) DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak +-- Create plain inheritance parent table with no access permissions +RESET SESSION AUTHORIZATION; +CREATE TABLE tststats.priv_test_parent_tbl (a int, b int); +ALTER TABLE tststats.priv_test_tbl INHERIT tststats.priv_test_parent_tbl; +-- Should not have access to parent, and should leak nothing +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied +ERROR: permission denied for table priv_test_parent_tbl +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied +ERROR: permission denied for table priv_test_parent_tbl +SELECT * FROM tststats.priv_test_parent_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied +ERROR: permission denied for table priv_test_parent_tbl +DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied +ERROR: permission denied for table priv_test_parent_tbl +-- Grant table access to parent, but hide all data with RLS +RESET SESSION AUTHORIZATION; +ALTER TABLE tststats.priv_test_parent_tbl ENABLE ROW LEVEL SECURITY; +CREATE POLICY priv_test_parent_tbl_pol ON tststats.priv_test_parent_tbl USING (2 * a < 0); +GRANT SELECT, DELETE ON tststats.priv_test_parent_tbl TO regress_stats_user1; +-- Should now have direct table access to parent, but see nothing and leak nothing +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak + a | b +---+--- +(0 rows) + +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak + a | b +---+--- +(0 rows) + +SELECT * FROM tststats.priv_test_parent_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak + a | b +---+--- +(0 rows) + +DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak -- privilege checks for pg_stats_ext and pg_stats_ext_exprs RESET SESSION AUTHORIZATION; CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT); @@ -3361,11 +3447,14 @@ SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x -- Tidy up DROP OPERATOR <<< (int, int); DROP FUNCTION op_leak(int, int); +DROP OPERATOR <<< (record, record); +DROP FUNCTION op_leak(record, record); RESET SESSION AUTHORIZATION; DROP TABLE stats_ext_tbl; DROP SCHEMA tststats CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table tststats.priv_test_tbl +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table tststats.priv_test_parent_tbl +drop cascades to table tststats.priv_test_tbl drop cascades to view tststats.priv_test_view DROP USER regress_stats_user1; CREATE TABLE grouping_unique (x integer); diff --git a/src/test/regress/expected/stats_import.out b/src/test/regress/expected/stats_import.out index 48d6392b4ad4f..9e615ccd0af97 100644 --- a/src/test/regress/expected/stats_import.out +++ b/src/test/regress/expected/stats_import.out @@ -50,26 +50,26 @@ SELECT pg_clear_relation_stats('stats_import', 'test'); SELECT pg_catalog.pg_restore_relation_stats( 'relname', 'test', 'relpages', 17::integer); -ERROR: "schemaname" cannot be NULL +ERROR: argument "schemaname" must not be null -- error: relname missing SELECT pg_catalog.pg_restore_relation_stats( 'schemaname', 'stats_import', 'relpages', 17::integer); -ERROR: "relname" cannot be NULL +ERROR: argument "relname" must not be null --- error: schemaname is wrong type SELECT pg_catalog.pg_restore_relation_stats( 'schemaname', 3.6::float, 'relname', 'test', 'relpages', 17::integer); -WARNING: argument "schemaname" has type "double precision", expected type "text" -ERROR: "schemaname" cannot be NULL +WARNING: argument "schemaname" has type double precision, expected type text +ERROR: argument "schemaname" must not be null --- error: relname is wrong type SELECT pg_catalog.pg_restore_relation_stats( 'schemaname', 'stats_import', 'relname', 0::oid, 'relpages', 17::integer); -WARNING: argument "relname" has type "oid", expected type "text" -ERROR: "relname" cannot be NULL +WARNING: argument "relname" has type oid, expected type text +ERROR: argument "relname" must not be null -- error: relation not found SELECT pg_catalog.pg_restore_relation_stats( 'schemaname', 'stats_import', @@ -88,7 +88,7 @@ SELECT pg_restore_relation_stats( 'schemaname', 'stats_import', 'relname', 'test', NULL, '17'::integer); -ERROR: name at variadic position 5 is NULL +ERROR: name at variadic position 5 is null -- starting stats SELECT relpages, reltuples, relallvisible, relallfrozen FROM pg_class @@ -286,7 +286,7 @@ SELECT pg_restore_relation_stats( 'reltuples', 400.0::real, 'relallvisible', 4::integer, 'relallfrozen', 3::integer); -WARNING: argument "relpages" has type "text", expected type "integer" +WARNING: argument "relpages" has type text, expected type integer pg_restore_relation_stats --------------------------- f @@ -358,7 +358,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'attname', 'id', 'inherited', false::boolean, 'null_frac', 0.1::real); -ERROR: "schemaname" cannot be NULL +ERROR: argument "schemaname" must not be null -- error: schema does not exist SELECT pg_catalog.pg_restore_attribute_stats( 'schemaname', 'nope', @@ -373,7 +373,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'attname', 'id', 'inherited', false::boolean, 'null_frac', 0.1::real); -ERROR: "relname" cannot be NULL +ERROR: argument "relname" must not be null -- error: relname does not exist SELECT pg_catalog.pg_restore_attribute_stats( 'schemaname', 'stats_import', @@ -389,7 +389,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'attname', 'id', 'inherited', false::boolean, 'null_frac', 0.1::real); -ERROR: "relname" cannot be NULL +ERROR: argument "relname" must not be null -- error: NULL attname SELECT pg_catalog.pg_restore_attribute_stats( 'schemaname', 'stats_import', @@ -397,7 +397,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'attname', NULL, 'inherited', false::boolean, 'null_frac', 0.1::real); -ERROR: must specify either attname or attnum +ERROR: must specify either "attname" or "attnum" -- error: attname doesn't exist SELECT pg_catalog.pg_restore_attribute_stats( 'schemaname', 'stats_import', @@ -416,14 +416,14 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'attnum', 1::smallint, 'inherited', false::boolean, 'null_frac', 0.1::real); -ERROR: cannot specify both attname and attnum +ERROR: cannot specify both "attname" and "attnum" -- error: neither attname nor attnum SELECT pg_catalog.pg_restore_attribute_stats( 'schemaname', 'stats_import', 'relname', 'test', 'inherited', false::boolean, 'null_frac', 0.1::real); -ERROR: must specify either attname or attnum +ERROR: must specify either "attname" or "attnum" -- error: attribute is system column SELECT pg_catalog.pg_restore_attribute_stats( 'schemaname', 'stats_import', @@ -439,7 +439,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'attname', 'id', 'inherited', NULL::boolean, 'null_frac', 0.1::real); -ERROR: "inherited" cannot be NULL +ERROR: argument "inherited" must not be null -- ok: just the fixed values, with version, no stakinds SELECT pg_catalog.pg_restore_attribute_stats( 'schemaname', 'stats_import', @@ -527,7 +527,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.21::real, 'most_common_freqs', '{0.1,0.2,0.3}'::real[] ); -WARNING: "most_common_vals" must be specified when "most_common_freqs" is specified +WARNING: argument "most_common_vals" must be specified when argument "most_common_freqs" is specified pg_restore_attribute_stats ---------------------------- f @@ -553,7 +553,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.21::real, 'most_common_vals', '{1,2,3}'::text ); -WARNING: "most_common_freqs" must be specified when "most_common_vals" is specified +WARNING: argument "most_common_freqs" must be specified when argument "most_common_vals" is specified pg_restore_attribute_stats ---------------------------- f @@ -580,8 +580,8 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'most_common_vals', '{2,1,3}'::text, 'most_common_freqs', '{0.2,0.1}'::double precision[] ); -WARNING: argument "most_common_freqs" has type "double precision[]", expected type "real[]" -WARNING: "most_common_freqs" must be specified when "most_common_vals" is specified +WARNING: argument "most_common_freqs" has type double precision[], expected type real[] +WARNING: argument "most_common_freqs" must be specified when argument "most_common_vals" is specified pg_restore_attribute_stats ---------------------------- f @@ -659,7 +659,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.24::real, 'histogram_bounds', '{1,NULL,3,4}'::text ); -WARNING: "histogram_bounds" array cannot contain NULL values +WARNING: "histogram_bounds" array must not contain null values pg_restore_attribute_stats ---------------------------- f @@ -709,7 +709,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.25::real, 'elem_count_histogram', '{1,1,NULL,1,1,1,1,1}'::real[] ); -WARNING: "elem_count_histogram" array cannot contain NULL values +WARNING: argument "elem_count_histogram" array must not contain null values pg_restore_attribute_stats ---------------------------- f @@ -761,7 +761,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'range_empty_frac', 0.5::real, 'range_length_histogram', '{399,499,Infinity}'::text ); -WARNING: attribute "id" is not a range type +WARNING: column "id" is not a range type DETAIL: Cannot set STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM or STATISTIC_KIND_BOUNDS_HISTOGRAM. pg_restore_attribute_stats ---------------------------- @@ -788,7 +788,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.28::real, 'range_length_histogram', '{399,499,Infinity}'::text ); -WARNING: "range_empty_frac" must be specified when "range_length_histogram" is specified +WARNING: argument "range_empty_frac" must be specified when argument "range_length_histogram" is specified pg_restore_attribute_stats ---------------------------- f @@ -814,7 +814,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.29::real, 'range_empty_frac', 0.5::real ); -WARNING: "range_length_histogram" must be specified when "range_empty_frac" is specified +WARNING: argument "range_length_histogram" must be specified when argument "range_empty_frac" is specified pg_restore_attribute_stats ---------------------------- f @@ -865,7 +865,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.31::real, 'range_bounds_histogram', '{"[-1,1)","[0,4)","[1,4)","[1,100)"}'::text ); -WARNING: attribute "id" is not a range type +WARNING: column "id" is not a range type DETAIL: Cannot set STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM or STATISTIC_KIND_BOUNDS_HISTOGRAM. pg_restore_attribute_stats ---------------------------- @@ -917,7 +917,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'most_common_elems', '{3,1}'::text, 'most_common_elem_freqs', '{0.3,0.2,0.2,0.3,0.0}'::real[] ); -WARNING: unable to determine element type of attribute "arange" +WARNING: could not determine element type of column "arange" DETAIL: Cannot set STATISTIC_KIND_MCELEM or STATISTIC_KIND_DECHIST. pg_restore_attribute_stats ---------------------------- @@ -945,7 +945,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'most_common_elems', '{1,3}'::text, 'most_common_elem_freqs', '{0.3,0.2,0.2,0.3,0.0}'::real[] ); -WARNING: unable to determine element type of attribute "id" +WARNING: could not determine element type of column "id" DETAIL: Cannot set STATISTIC_KIND_MCELEM or STATISTIC_KIND_DECHIST. pg_restore_attribute_stats ---------------------------- @@ -972,7 +972,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.34::real, 'most_common_elems', '{one,two}'::text ); -WARNING: "most_common_elem_freqs" must be specified when "most_common_elems" is specified +WARNING: argument "most_common_elem_freqs" must be specified when argument "most_common_elems" is specified pg_restore_attribute_stats ---------------------------- f @@ -998,7 +998,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.35::real, 'most_common_elem_freqs', '{0.3,0.2,0.2,0.3}'::real[] ); -WARNING: "most_common_elems" must be specified when "most_common_elem_freqs" is specified +WARNING: argument "most_common_elems" must be specified when argument "most_common_elem_freqs" is specified pg_restore_attribute_stats ---------------------------- f @@ -1049,7 +1049,7 @@ SELECT pg_catalog.pg_restore_attribute_stats( 'null_frac', 0.36::real, 'elem_count_histogram', '{1,1,1,1,1,1,1,1,1,1}'::real[] ); -WARNING: unable to determine element type of attribute "id" +WARNING: could not determine element type of column "id" DETAIL: Cannot set STATISTIC_KIND_MCELEM or STATISTIC_KIND_DECHIST. pg_restore_attribute_stats ---------------------------- diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out index 1bfd33de3f3c3..ba302da51e7b2 100644 --- a/src/test/regress/expected/strings.out +++ b/src/test/regress/expected/strings.out @@ -2090,6 +2090,40 @@ SELECT c FROM toasttest; x (1 row) +DROP TABLE toasttest; +-- test with short varlenas (up to 126 data bytes reduced to a 1-byte header) +-- being toasted. +CREATE TABLE toasttest (f1 text, f2 text); +ALTER TABLE toasttest SET (toast_tuple_target = 128); +ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE EXTERNAL; +ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE EXTERNAL; +-- Here, the first value is a varlena large enough to make it toasted and +-- stored uncompressed. The second value is a short varlena, toasted +-- and stored uncompressed. +INSERT INTO toasttest values(repeat('1234', 1000), repeat('5678', 30)); +SELECT reltoastrelid::regclass AS reltoastname FROM pg_class + WHERE oid = 'toasttest'::regclass \gset +-- There should be two values inserted in the toast relation. +SELECT count(*) FROM :reltoastname WHERE chunk_seq = 0; + count +------- + 2 +(1 row) + +SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data + FROM toasttest; + f1_data | f2_data +------------+------------ + 1234123412 | 5678567856 +(1 row) + +SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp + FROM toasttest; + f1_comp | f2_comp +---------+--------- + | +(1 row) + DROP TABLE toasttest; -- -- test length diff --git a/src/test/regress/expected/subscription.out b/src/test/regress/expected/subscription.out index a98c97f761689..c7f1266fc2f9d 100644 --- a/src/test/regress/expected/subscription.out +++ b/src/test/regress/expected/subscription.out @@ -116,18 +116,18 @@ CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PU WARNING: subscription was created, but is not connected HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. \dRs+ regress_testsub4 - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN -------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | none | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | none | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub4 SET (origin = any); \dRs+ regress_testsub4 - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN -------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) DROP SUBSCRIPTION regress_testsub3; @@ -145,10 +145,10 @@ ALTER SUBSCRIPTION regress_testsub CONNECTION 'foobar'; ERROR: invalid connection string syntax: missing "=" after "foobar" in connection info string \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET PUBLICATION testpub2, testpub3 WITH (refresh = false); @@ -157,10 +157,10 @@ ALTER SUBSCRIPTION regress_testsub SET (slot_name = 'newname'); ALTER SUBSCRIPTION regress_testsub SET (password_required = false); ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = true); \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+------------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | f | t | f | f | off | dbname=regress_doesnotexist2 | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+------------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | f | t | f | f | 0 | f | off | dbname=regress_doesnotexist2 | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET (password_required = true); @@ -176,10 +176,10 @@ ERROR: unrecognized subscription parameter: "create_slot" -- ok ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/12345'); \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+------------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist2 | 0/00012345 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+------------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist2 | 0/00012345 (1 row) -- ok - with lsn = NONE @@ -188,10 +188,10 @@ ALTER SUBSCRIPTION regress_testsub SKIP (lsn = NONE); ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/0'); ERROR: invalid WAL location (LSN): 0/0 \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+------------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist2 | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+------------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist2 | 0/00000000 (1 row) BEGIN; @@ -223,10 +223,10 @@ ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = foobar); ERROR: invalid value for parameter "synchronous_commit": "foobar" HINT: Available values: local, remote_write, remote_apply, on, off. \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ----------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+------------------------------+------------ - regress_testsub_foo | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | f | local | dbname=regress_doesnotexist2 | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +---------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+------------------------------+------------ + regress_testsub_foo | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | f | 0 | f | local | dbname=regress_doesnotexist2 | 0/00000000 (1 row) -- rename back to keep the rest simple @@ -255,19 +255,19 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB WARNING: subscription was created, but is not connected HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | t | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | t | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET (binary = false); ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) DROP SUBSCRIPTION regress_testsub; @@ -279,27 +279,27 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB WARNING: subscription was created, but is not connected HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | on | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | on | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET (streaming = parallel); \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET (streaming = false); ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) -- fail - publication already exists @@ -314,10 +314,10 @@ ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refr ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false); ERROR: publication "testpub1" is already in subscription "regress_testsub" \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-----------------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub,testpub1,testpub2} | f | off | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-----------------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub,testpub1,testpub2} | f | off | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) -- fail - publication used more than once @@ -332,10 +332,10 @@ ERROR: publication "testpub3" is not in subscription "regress_testsub" -- ok - delete publications ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub2 WITH (refresh = false); \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) DROP SUBSCRIPTION regress_testsub; @@ -371,19 +371,19 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB WARNING: subscription was created, but is not connected HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | p | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | p | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) -- we can alter streaming when two_phase enabled ALTER SUBSCRIPTION regress_testsub SET (streaming = true); \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); @@ -393,10 +393,10 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB WARNING: subscription was created, but is not connected HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); @@ -409,18 +409,18 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB WARNING: subscription was created, but is not connected HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET (disable_on_error = true); \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | t | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | t | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); @@ -433,10 +433,36 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB WARNING: subscription was created, but is not connected HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. \dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------ - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +DROP SUBSCRIPTION regress_testsub; +-- fail - max_retention_duration must be integer +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, max_retention_duration = foo); +ERROR: max_retention_duration requires an integer value +-- ok +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, max_retention_duration = 1000); +NOTICE: max_retention_duration is ineffective when retain_dead_tuples is disabled +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | 1000 | f | off | dbname=regress_doesnotexist | 0/00000000 +(1 row) + +-- ok +ALTER SUBSCRIPTION regress_testsub SET (max_retention_duration = 0); +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Max retention duration | Retention active | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------+------------------+--------------------+-----------------------------+------------ + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | 0 | f | off | dbname=regress_doesnotexist | 0/00000000 (1 row) ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out index 18fed63e7381a..7a1c216a0b1b7 100644 --- a/src/test/regress/expected/subselect.out +++ b/src/test/regress/expected/subselect.out @@ -707,6 +707,212 @@ select * from numeric_table 3 (4 rows) +-- +-- Test that a semijoin implemented by unique-ifying the RHS can explore +-- different paths of the RHS rel. +-- +create table semijoin_unique_tbl (a int, b int); +insert into semijoin_unique_tbl select i%10, i%10 from generate_series(1,1000)i; +create index on semijoin_unique_tbl(a, b); +analyze semijoin_unique_tbl; +-- Ensure that we get a plan with Unique + IndexScan +explain (verbose, costs off) +select * from semijoin_unique_tbl t1, semijoin_unique_tbl t2 +where (t1.a, t2.a) in (select a, b from semijoin_unique_tbl t3) +order by t1.a, t2.a; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Nested Loop + Output: t1.a, t1.b, t2.a, t2.b + -> Merge Join + Output: t1.a, t1.b, t3.b + Merge Cond: (t3.a = t1.a) + -> Unique + Output: t3.a, t3.b + -> Index Only Scan using semijoin_unique_tbl_a_b_idx on public.semijoin_unique_tbl t3 + Output: t3.a, t3.b + -> Index Only Scan using semijoin_unique_tbl_a_b_idx on public.semijoin_unique_tbl t1 + Output: t1.a, t1.b + -> Memoize + Output: t2.a, t2.b + Cache Key: t3.b + Cache Mode: logical + -> Index Only Scan using semijoin_unique_tbl_a_b_idx on public.semijoin_unique_tbl t2 + Output: t2.a, t2.b + Index Cond: (t2.a = t3.b) +(18 rows) + +-- Ensure that we can unique-ify expressions more complex than plain Vars +explain (verbose, costs off) +select * from semijoin_unique_tbl t1, semijoin_unique_tbl t2 +where (t1.a, t2.a) in (select a+1, b+1 from semijoin_unique_tbl t3) +order by t1.a, t2.a; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Incremental Sort + Output: t1.a, t1.b, t2.a, t2.b + Sort Key: t1.a, t2.a + Presorted Key: t1.a + -> Merge Join + Output: t1.a, t1.b, t2.a, t2.b + Merge Cond: (t1.a = ((t3.a + 1))) + -> Index Only Scan using semijoin_unique_tbl_a_b_idx on public.semijoin_unique_tbl t1 + Output: t1.a, t1.b + -> Sort + Output: t2.a, t2.b, t3.a, ((t3.a + 1)) + Sort Key: ((t3.a + 1)) + -> Hash Join + Output: t2.a, t2.b, t3.a, (t3.a + 1) + Hash Cond: (t2.a = (t3.b + 1)) + -> Seq Scan on public.semijoin_unique_tbl t2 + Output: t2.a, t2.b + -> Hash + Output: t3.a, t3.b + -> HashAggregate + Output: t3.a, t3.b + Group Key: (t3.a + 1), (t3.b + 1) + -> Seq Scan on public.semijoin_unique_tbl t3 + Output: t3.a, t3.b, (t3.a + 1), (t3.b + 1) +(24 rows) + +-- encourage use of parallel plans +set parallel_setup_cost=0; +set parallel_tuple_cost=0; +set min_parallel_table_scan_size=0; +set max_parallel_workers_per_gather=4; +set enable_indexscan to off; +-- Ensure that we get a parallel plan for the unique-ification +explain (verbose, costs off) +select * from semijoin_unique_tbl t1, semijoin_unique_tbl t2 +where (t1.a, t2.a) in (select a, b from semijoin_unique_tbl t3) +order by t1.a, t2.a; + QUERY PLAN +---------------------------------------------------------------------------------------- + Nested Loop + Output: t1.a, t1.b, t2.a, t2.b + -> Merge Join + Output: t1.a, t1.b, t3.b + Merge Cond: (t3.a = t1.a) + -> Unique + Output: t3.a, t3.b + -> Gather Merge + Output: t3.a, t3.b + Workers Planned: 2 + -> Sort + Output: t3.a, t3.b + Sort Key: t3.a, t3.b + -> HashAggregate + Output: t3.a, t3.b + Group Key: t3.a, t3.b + -> Parallel Seq Scan on public.semijoin_unique_tbl t3 + Output: t3.a, t3.b + -> Materialize + Output: t1.a, t1.b + -> Gather Merge + Output: t1.a, t1.b + Workers Planned: 2 + -> Sort + Output: t1.a, t1.b + Sort Key: t1.a + -> Parallel Seq Scan on public.semijoin_unique_tbl t1 + Output: t1.a, t1.b + -> Memoize + Output: t2.a, t2.b + Cache Key: t3.b + Cache Mode: logical + -> Bitmap Heap Scan on public.semijoin_unique_tbl t2 + Output: t2.a, t2.b + Recheck Cond: (t2.a = t3.b) + -> Bitmap Index Scan on semijoin_unique_tbl_a_b_idx + Index Cond: (t2.a = t3.b) +(37 rows) + +reset enable_indexscan; +reset max_parallel_workers_per_gather; +reset min_parallel_table_scan_size; +reset parallel_tuple_cost; +reset parallel_setup_cost; +drop table semijoin_unique_tbl; +create table unique_tbl_p (a int, b int) partition by range(a); +create table unique_tbl_p1 partition of unique_tbl_p for values from (0) to (5); +create table unique_tbl_p2 partition of unique_tbl_p for values from (5) to (10); +create table unique_tbl_p3 partition of unique_tbl_p for values from (10) to (20); +insert into unique_tbl_p select i%12, i from generate_series(0, 1000)i; +create index on unique_tbl_p1(a); +create index on unique_tbl_p2(a); +create index on unique_tbl_p3(a); +analyze unique_tbl_p; +set enable_partitionwise_join to on; +-- Ensure that the unique-ification works for partition-wise join +explain (verbose, costs off) +select * from unique_tbl_p t1, unique_tbl_p t2 +where (t1.a, t2.a) in (select a, a from unique_tbl_p t3) +order by t1.a, t2.a; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Merge Append + Sort Key: t1.a + -> Nested Loop + Output: t1_1.a, t1_1.b, t2_1.a, t2_1.b + -> Nested Loop + Output: t1_1.a, t1_1.b, t3_1.a + -> Unique + Output: t3_1.a + -> Index Only Scan using unique_tbl_p1_a_idx on public.unique_tbl_p1 t3_1 + Output: t3_1.a + -> Index Scan using unique_tbl_p1_a_idx on public.unique_tbl_p1 t1_1 + Output: t1_1.a, t1_1.b + Index Cond: (t1_1.a = t3_1.a) + -> Memoize + Output: t2_1.a, t2_1.b + Cache Key: t1_1.a + Cache Mode: logical + -> Index Scan using unique_tbl_p1_a_idx on public.unique_tbl_p1 t2_1 + Output: t2_1.a, t2_1.b + Index Cond: (t2_1.a = t1_1.a) + -> Nested Loop + Output: t1_2.a, t1_2.b, t2_2.a, t2_2.b + -> Nested Loop + Output: t1_2.a, t1_2.b, t3_2.a + -> Unique + Output: t3_2.a + -> Index Only Scan using unique_tbl_p2_a_idx on public.unique_tbl_p2 t3_2 + Output: t3_2.a + -> Index Scan using unique_tbl_p2_a_idx on public.unique_tbl_p2 t1_2 + Output: t1_2.a, t1_2.b + Index Cond: (t1_2.a = t3_2.a) + -> Memoize + Output: t2_2.a, t2_2.b + Cache Key: t1_2.a + Cache Mode: logical + -> Index Scan using unique_tbl_p2_a_idx on public.unique_tbl_p2 t2_2 + Output: t2_2.a, t2_2.b + Index Cond: (t2_2.a = t1_2.a) + -> Nested Loop + Output: t1_3.a, t1_3.b, t2_3.a, t2_3.b + -> Nested Loop + Output: t1_3.a, t1_3.b, t3_3.a + -> Unique + Output: t3_3.a + -> Sort + Output: t3_3.a + Sort Key: t3_3.a + -> Seq Scan on public.unique_tbl_p3 t3_3 + Output: t3_3.a + -> Index Scan using unique_tbl_p3_a_idx on public.unique_tbl_p3 t1_3 + Output: t1_3.a, t1_3.b + Index Cond: (t1_3.a = t3_3.a) + -> Memoize + Output: t2_3.a, t2_3.b + Cache Key: t1_3.a + Cache Mode: logical + -> Index Scan using unique_tbl_p3_a_idx on public.unique_tbl_p3 t2_3 + Output: t2_3.a, t2_3.b + Index Cond: (t2_3.a = t1_3.a) +(59 rows) + +reset enable_partitionwise_join; +drop table unique_tbl_p; -- -- Test case for bug #4290: bogus calculation of subplan param sets -- @@ -773,6 +979,25 @@ select (select (a.*)::text) from view_a a; (42) (1 row) +-- +-- Test case for bug #19037: no relation entry for relid N +-- +explain (costs off) +select (1 = any(array_agg(f1))) = any (select false) from int4_tbl; + QUERY PLAN +---------------------------- + Aggregate + -> Seq Scan on int4_tbl + SubPlan 1 + -> Result +(4 rows) + +select (1 = any(array_agg(f1))) = any (select false) from int4_tbl; + ?column? +---------- + t +(1 row) + -- -- Check that whole-row Vars reading the result of a subselect don't include -- any junk columns therein @@ -1467,14 +1692,14 @@ select * from int4_tbl o where (f1, f1) in ------------------------------------------------------------------- Nested Loop Semi Join Output: o.f1 - Join Filter: (o.f1 = "ANY_subquery".f1) + Join Filter: (o.f1 = unnamed_subquery.f1) -> Seq Scan on public.int4_tbl o Output: o.f1 -> Materialize - Output: "ANY_subquery".f1, "ANY_subquery".g - -> Subquery Scan on "ANY_subquery" - Output: "ANY_subquery".f1, "ANY_subquery".g - Filter: ("ANY_subquery".f1 = "ANY_subquery".g) + Output: unnamed_subquery.f1, unnamed_subquery.g + -> Subquery Scan on unnamed_subquery + Output: unnamed_subquery.f1, unnamed_subquery.g + Filter: (unnamed_subquery.f1 = unnamed_subquery.g) -> Result Output: i.f1, ((generate_series(1, 50)) / 10) -> ProjectSet @@ -2642,8 +2867,8 @@ ON B.hundred in (SELECT min(c.hundred) FROM tenk2 C WHERE c.odd = b.odd); -> Memoize Cache Key: b.hundred, b.odd Cache Mode: binary - -> Subquery Scan on "ANY_subquery" - Filter: (b.hundred = "ANY_subquery".min) + -> Subquery Scan on unnamed_subquery + Filter: (b.hundred = unnamed_subquery.min) -> Result InitPlan 1 -> Limit @@ -2672,18 +2897,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM onek WHERE (unique1,ten) IN (VALUES (1,1), (20,0), (99,9), (17,99)) ORDER BY unique1; - QUERY PLAN ------------------------------------------------------------------ - Sort - Sort Key: onek.unique1 - -> Nested Loop - -> HashAggregate - Group Key: "*VALUES*".column1, "*VALUES*".column2 + QUERY PLAN +---------------------------------------------------------------- + Nested Loop + -> Unique + -> Sort + Sort Key: "*VALUES*".column1, "*VALUES*".column2 -> Values Scan on "*VALUES*" - -> Index Scan using onek_unique1 on onek - Index Cond: (unique1 = "*VALUES*".column1) - Filter: ("*VALUES*".column2 = ten) -(9 rows) + -> Index Scan using onek_unique1 on onek + Index Cond: (unique1 = "*VALUES*".column1) + Filter: ("*VALUES*".column2 = ten) +(8 rows) EXPLAIN (COSTS OFF) SELECT * FROM onek @@ -2858,12 +3082,10 @@ SELECT ten FROM onek WHERE unique1 IN (VALUES (1), (2) ORDER BY 1); -> Unique -> Sort Sort Key: "*VALUES*".column1 - -> Sort - Sort Key: "*VALUES*".column1 - -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*" -> Index Scan using onek_unique1 on onek Index Cond: (unique1 = "*VALUES*".column1) -(9 rows) +(7 rows) EXPLAIN (COSTS OFF) SELECT ten FROM onek WHERE unique1 IN (VALUES (1), (2) LIMIT 1); diff --git a/src/test/regress/expected/timestamp.out b/src/test/regress/expected/timestamp.out index 6aaa19c8f4e46..14a9f5b56a690 100644 --- a/src/test/regress/expected/timestamp.out +++ b/src/test/regress/expected/timestamp.out @@ -591,6 +591,16 @@ SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc Mon Feb 23 00:00:00 2004 (1 row) +SELECT date_trunc( 'week', timestamp 'infinity' ) AS inf_trunc; + inf_trunc +----------- + infinity +(1 row) + +SELECT date_trunc( 'timezone', timestamp '2004-02-29 15:44:17.71393' ) AS notsupp_trunc; +ERROR: unit "timezone" not supported for type timestamp without time zone +SELECT date_trunc( 'timezone', timestamp 'infinity' ) AS notsupp_inf_trunc; +ERROR: unit "timezone" not supported for type timestamp without time zone SELECT date_trunc( 'ago', timestamp 'infinity' ) AS invalid_trunc; ERROR: unit "ago" not recognized for type timestamp without time zone -- verify date_bin behaves the same as date_trunc for relevant intervals diff --git a/src/test/regress/expected/timestamptz.out b/src/test/regress/expected/timestamptz.out index 2a69953ff25ed..5dc8a621f6c07 100644 --- a/src/test/regress/expected/timestamptz.out +++ b/src/test/regress/expected/timestamptz.out @@ -760,6 +760,16 @@ SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' Mon Feb 23 00:00:00 2004 PST (1 row) +SELECT date_trunc( 'week', timestamp with time zone 'infinity' ) AS inf_trunc; + inf_trunc +----------- + infinity +(1 row) + +SELECT date_trunc( 'timezone', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS notsupp_trunc; +ERROR: unit "timezone" not supported for type timestamp with time zone +SELECT date_trunc( 'timezone', timestamp with time zone 'infinity' ) AS notsupp_inf_trunc; +ERROR: unit "timezone" not supported for type timestamp with time zone SELECT date_trunc( 'ago', timestamp with time zone 'infinity' ) AS invalid_trunc; ERROR: unit "ago" not recognized for type timestamp with time zone SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name @@ -780,6 +790,14 @@ SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET Thu Feb 15 20:00:00 2001 PST (1 row) +SELECT date_trunc('timezone', timestamp with time zone 'infinity', 'GMT') AS notsupp_zone_trunc; +ERROR: unit "timezone" not supported for type timestamp with time zone +SELECT date_trunc( 'week', timestamp with time zone 'infinity', 'GMT') AS inf_zone_trunc; + inf_zone_trunc +---------------- + infinity +(1 row) + SELECT date_trunc('ago', timestamp with time zone 'infinity', 'GMT') AS invalid_zone_trunc; ERROR: unit "ago" not recognized for type timestamp with time zone -- verify date_bin behaves the same as date_trunc for relevant intervals diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out index 872b9100e1a01..1eb8fba095370 100644 --- a/src/test/regress/expected/triggers.out +++ b/src/test/regress/expected/triggers.out @@ -2769,6 +2769,10 @@ NOTICE: trigger = child3_delete_trig, old table = (42,CCC) -- copy into parent sees parent-format tuples copy parent (a, b) from stdin; NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,42) +-- check detach/reattach behavior; statement triggers with transition tables +-- should not prevent a table from becoming a partition again +alter table parent detach partition child1; +alter table parent attach partition child1 for values in ('AAA'); -- DML affecting parent sees tuples collected from children even if -- there is no transition table trigger on the children drop trigger child1_insert_trig on child1; @@ -2966,6 +2970,10 @@ NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,42) create index on parent(b); copy parent (a, b) from stdin; NOTICE: trigger = parent_insert_trig, new table = (DDD,42) +-- check disinherit/reinherit behavior; statement triggers with transition +-- tables should not prevent a table from becoming an inheritance child again +alter table child1 no inherit parent; +alter table child1 inherit parent; -- DML affecting parent sees tuples collected from children even if -- there is no transition table trigger on the children drop trigger child1_insert_trig on child1; diff --git a/src/test/regress/expected/union.out b/src/test/regress/expected/union.out index 96962817ed45a..d3ea433db1577 100644 --- a/src/test/regress/expected/union.out +++ b/src/test/regress/expected/union.out @@ -942,7 +942,7 @@ SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1; ERROR: column "q2" does not exist LINE 1: ... int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1... ^ -DETAIL: There is a column named "q2" in table "*SELECT* 2", but it cannot be referenced from this part of the query. +DETAIL: There is a column named "q2" in table "unnamed_subquery", but it cannot be referenced from this part of the query. -- But this should work: SELECT q1 FROM int8_tbl EXCEPT (((SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1))) ORDER BY 1; q1 @@ -1338,14 +1338,14 @@ where q2 = q2; ---------------------------------------------------------- Unique -> Merge Append - Sort Key: "*SELECT* 1".q1 - -> Subquery Scan on "*SELECT* 1" + Sort Key: unnamed_subquery.q1 + -> Subquery Scan on unnamed_subquery -> Unique -> Sort Sort Key: i81.q1, i81.q2 -> Seq Scan on int8_tbl i81 Filter: (q2 IS NOT NULL) - -> Subquery Scan on "*SELECT* 2" + -> Subquery Scan on unnamed_subquery_1 -> Unique -> Sort Sort Key: i82.q1, i82.q2 @@ -1374,14 +1374,14 @@ where -q1 = q2; -------------------------------------------------------- Unique -> Merge Append - Sort Key: "*SELECT* 1".q1 - -> Subquery Scan on "*SELECT* 1" + Sort Key: unnamed_subquery.q1 + -> Subquery Scan on unnamed_subquery -> Unique -> Sort Sort Key: i81.q1, i81.q2 -> Seq Scan on int8_tbl i81 Filter: ((- q1) = q2) - -> Subquery Scan on "*SELECT* 2" + -> Subquery Scan on unnamed_subquery_1 -> Unique -> Sort Sort Key: i82.q1, i82.q2 diff --git a/src/test/regress/expected/vacuum.out b/src/test/regress/expected/vacuum.out index 0abcc99989e07..85c783e2e56ce 100644 --- a/src/test/regress/expected/vacuum.out +++ b/src/test/regress/expected/vacuum.out @@ -686,3 +686,49 @@ RESET ROLE; DROP TABLE vacowned; DROP TABLE vacowned_parted; DROP ROLE regress_vacuum; +-- Test checking how new toast values are allocated on rewrite. +-- Create table with plain storage (forces inline storage initially). +CREATE TABLE vac_rewrite_toast (id int, f1 TEXT STORAGE plain); +-- Insert tuple large enough to trigger toast storage on rewrite, still +-- small enough to fit on a page. +INSERT INTO vac_rewrite_toast values (1, repeat('a', 7000)); +-- Switch to external storage to force toast table usage. +ALTER TABLE vac_rewrite_toast ALTER COLUMN f1 SET STORAGE EXTERNAL; +-- This second tuple is toasted, its value should still be the +-- same after rewrite. +INSERT INTO vac_rewrite_toast values (2, repeat('a', 7000)); +SELECT pg_column_toast_chunk_id(f1) AS id_2_chunk FROM vac_rewrite_toast + WHERE id = 2 \gset +-- Check initial state of the data. +SELECT id, pg_column_toast_chunk_id(f1) IS NULL AS f1_chunk_null, + substr(f1, 5, 10) AS f1_data, + pg_column_compression(f1) AS f1_comp + FROM vac_rewrite_toast ORDER BY id; + id | f1_chunk_null | f1_data | f1_comp +----+---------------+------------+--------- + 1 | t | aaaaaaaaaa | + 2 | f | aaaaaaaaaa | +(2 rows) + +-- VACUUM FULL forces toast data rewrite. +VACUUM FULL vac_rewrite_toast; +-- Check after rewrite. +SELECT id, pg_column_toast_chunk_id(f1) IS NULL AS f1_chunk_null, + substr(f1, 5, 10) AS f1_data, + pg_column_compression(f1) AS f1_comp + FROM vac_rewrite_toast ORDER BY id; + id | f1_chunk_null | f1_data | f1_comp +----+---------------+------------+--------- + 1 | f | aaaaaaaaaa | + 2 | f | aaaaaaaaaa | +(2 rows) + +-- The same value is reused for the tuple toasted before the rewrite. +SELECT pg_column_toast_chunk_id(f1) = :'id_2_chunk' AS same_chunk + FROM vac_rewrite_toast WHERE id = 2; + same_chunk +------------ + t +(1 row) + +DROP TABLE vac_rewrite_toast; diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c index 3dbba06902405..465ac148ac9f7 100644 --- a/src/test/regress/regress.c +++ b/src/test/regress/regress.c @@ -727,7 +727,7 @@ PG_FUNCTION_INFO_V1(is_catalog_text_unique_index_oid); Datum is_catalog_text_unique_index_oid(PG_FUNCTION_ARGS) { - return IsCatalogTextUniqueIndexOid(PG_GETARG_OID(0)); + return BoolGetDatum(IsCatalogTextUniqueIndexOid(PG_GETARG_OID(0))); } PG_FUNCTION_INFO_V1(test_support_func); diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql index 277b4b198ccc4..62540b1ffa4eb 100644 --- a/src/test/regress/sql/aggregates.sql +++ b/src/test/regress/sql/aggregates.sql @@ -182,6 +182,11 @@ SELECT newcnt(*) AS cnt_1000 FROM onek; SELECT oldcnt(*) AS cnt_1000 FROM onek; SELECT sum2(q1,q2) FROM int8_tbl; +-- sanity checks +SELECT sum(q1+q2), sum(q1)+sum(q2) FROM int8_tbl; +SELECT sum(q1-q2), sum(q2-q1), sum(q1)-sum(q2) FROM int8_tbl; +SELECT sum(q1*2000), sum(-q1*2000), 2000*sum(q1) FROM int8_tbl; + -- test for outer-level aggregates -- this should work @@ -1505,15 +1510,6 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) from unnest(array['a','b']) u(v) group by v||'a' order by 1; --- Make sure that generation of HashAggregate for uniqification purposes --- does not lead to array overflow due to unexpected duplicate hash keys --- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com -set enable_memoize to off; -explain (costs off) - select 1 from tenk1 - where (hundred, thousand) in (select twothousand, twothousand from onek); -reset enable_memoize; - -- -- Hash Aggregation Spill tests -- diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql index fc6e36d0e7882..90bf5c1768238 100644 --- a/src/test/regress/sql/alter_table.sql +++ b/src/test/regress/sql/alter_table.sql @@ -2202,13 +2202,15 @@ SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment F -- filenode function call can return NULL for a relation dropped concurrently -- with the call's surrounding query, so ignore a NULL mapped_oid for -- relations that no longer exist after all calls finish. +-- Temporary relations are ignored, as not supported by pg_filenode_relation(). CREATE TEMP TABLE filenode_mapping AS SELECT oid, mapped_oid, reltablespace, relfilenode, relname FROM pg_class, pg_filenode_relation(reltablespace, pg_relation_filenode(oid)) AS mapped_oid -WHERE relkind IN ('r', 'i', 'S', 't', 'm') AND mapped_oid IS DISTINCT FROM oid; - +WHERE relkind IN ('r', 'i', 'S', 't', 'm') + AND relpersistence != 't' + AND mapped_oid IS DISTINCT FROM oid; SELECT m.* FROM filenode_mapping m LEFT JOIN pg_class c ON c.oid = m.oid WHERE c.oid IS NOT NULL OR m.mapped_oid IS NOT NULL; diff --git a/src/test/regress/sql/create_function_sql.sql b/src/test/regress/sql/create_function_sql.sql index 6d1c102d78082..3d5f2a92093bf 100644 --- a/src/test/regress/sql/create_function_sql.sql +++ b/src/test/regress/sql/create_function_sql.sql @@ -432,6 +432,23 @@ $$ SELECT array_append($1, $2) || array_append($1, $2) $$; SELECT double_append(array_append(ARRAY[q1], q2), q3) FROM (VALUES(1,2,3), (4,5,6)) v(q1,q2,q3); +-- Check that we can re-use a SQLFunctionCache after a run-time error. + +-- This function will fail with zero-divide at run time (not plan time). +CREATE FUNCTION part_hashint4_error(value int4, seed int8) RETURNS int8 +LANGUAGE SQL STRICT IMMUTABLE PARALLEL SAFE AS +$$ SELECT value + seed + random()::int/0 $$; + +-- Put it into an operator class so that FmgrInfo will be cached in relcache. +CREATE OPERATOR CLASS part_test_int4_ops_bad FOR TYPE int4 USING hash AS + FUNCTION 2 part_hashint4_error(int4, int8); + +CREATE TABLE pt(i int) PARTITION BY hash (i part_test_int4_ops_bad); +CREATE TABLE p1 PARTITION OF pt FOR VALUES WITH (modulus 4, remainder 0); + +INSERT INTO pt VALUES (1); +INSERT INTO pt VALUES (1); + -- Things that shouldn't work: CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL diff --git a/src/test/regress/sql/create_index.sql b/src/test/regress/sql/create_index.sql index e21ff426519b0..eabc9623b2061 100644 --- a/src/test/regress/sql/create_index.sql +++ b/src/test/regress/sql/create_index.sql @@ -635,7 +635,7 @@ DROP TABLE cwi_test; CREATE TABLE syscol_table (a INT); -- System columns cannot be indexed -CREATE INDEX ON syscolcol_table (ctid); +CREATE INDEX ON syscol_table (ctid); -- nor used in expressions CREATE INDEX ON syscol_table ((ctid >= '(1000,0)')); diff --git a/src/test/regress/sql/create_table.sql b/src/test/regress/sql/create_table.sql index 37a227148e9c2..9b3e93b416426 100644 --- a/src/test/regress/sql/create_table.sql +++ b/src/test/regress/sql/create_table.sql @@ -68,6 +68,14 @@ CREATE TABLE withoid() WITH (oids = true); CREATE TEMP TABLE withoutoid() WITHOUT OIDS; DROP TABLE withoutoid; CREATE TEMP TABLE withoutoid() WITH (oids = false); DROP TABLE withoutoid; +-- temporary tables are ignored by pg_filenode_relation(). +CREATE TEMP TABLE relation_filenode_check(c1 int); +SELECT relpersistence, + pg_filenode_relation (reltablespace, pg_relation_filenode(oid)) + FROM pg_class + WHERE relname = 'relation_filenode_check'; +DROP TABLE relation_filenode_check; + -- check restriction with default expressions -- invalid use of column reference in default expressions CREATE TABLE default_expr_column (id int DEFAULT (id)); diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql index bf8702116a74b..93389b57dbf95 100644 --- a/src/test/regress/sql/create_table_like.sql +++ b/src/test/regress/sql/create_table_like.sql @@ -130,6 +130,7 @@ DROP TABLE inhz; -- including storage and comments CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) ENFORCED PRIMARY KEY, b text CHECK (length(b) > 100) NOT ENFORCED); +ALTER TABLE ctlt1 ADD CONSTRAINT cc CHECK (length(b) > 100) NOT VALID; CREATE INDEX ctlt1_b_key ON ctlt1 (b); CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b)); CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1; diff --git a/src/test/regress/sql/enum.sql b/src/test/regress/sql/enum.sql index ecc4878a6782a..803ccad6a6b03 100644 --- a/src/test/regress/sql/enum.sql +++ b/src/test/regress/sql/enum.sql @@ -23,6 +23,9 @@ SELECT * FROM pg_input_error_info('mauve', 'rainbow'); SELECT * FROM pg_input_error_info(repeat('too_long', 32), 'rainbow'); \x +-- check for duplicate enum entries +CREATE TYPE dup_enum AS ENUM ('foo','bar','foo'); + -- -- adding new values -- diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql index cfcecb4e911ad..39174ad1eb9a0 100644 --- a/src/test/regress/sql/foreign_key.sql +++ b/src/test/regress/sql/foreign_key.sql @@ -1296,7 +1296,7 @@ UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500; -- check psql behavior \d fk_notpartitioned_pk --- Check the exsting FK trigger +-- Check the existing FK trigger SELECT conname, tgrelid::regclass as tgrel, regexp_replace(tgname, '[0-9]+', 'N') as tgname, tgtype FROM pg_trigger t JOIN pg_constraint c ON (t.tgconstraint = c.oid) WHERE tgrelid IN (SELECT relid FROM pg_partition_tree('fk_partitioned_fk'::regclass) diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 5f0a475894ddc..b1732453e8d3d 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -839,6 +839,13 @@ explain (costs off) select a.* from tenk1 a left join tenk1 b on a.unique1 = b.unique2 where b.unique2 is null; +-- check that we avoid de-duplicating columns redundantly +set enable_memoize to off; +explain (costs off) +select 1 from tenk1 +where (hundred, thousand) in (select twothousand, twothousand from onek); +reset enable_memoize; + -- -- regression test for bogus RTE_GROUP entries -- @@ -2420,6 +2427,69 @@ where t1.a = s.c; rollback; +-- check handling of semijoins after join removal: we must suppress +-- unique-ification of known-constant values +begin; + +create temp table t (a int unique, b int); +insert into t values (1, 2); + +explain (verbose, costs off) +select t1.a from t t1 + left join t t2 on t1.a = t2.a + join t t3 on true +where exists (select 1 from t t4 + join t t5 on t4.b = t5.b + join t t6 on t5.b = t6.b + where t1.a = t4.a and t3.a = t5.a and t4.a = 1); + +select t1.a from t t1 + left join t t2 on t1.a = t2.a + join t t3 on true +where exists (select 1 from t t4 + join t t5 on t4.b = t5.b + join t t6 on t5.b = t6.b + where t1.a = t4.a and t3.a = t5.a and t4.a = 1); + +rollback; + +-- check handling of semijoins if all RHS columns are equated to constants: we +-- should suppress unique-ification in this case. +begin; + +create temp table t (a int, b int); +insert into t values (1, 2); + +explain (costs off) +select * from t t1, t t2 where exists + (select 1 from t t3 where t1.a = t3.a and t2.b = t3.b and t3.a = 1 and t3.b = 2); + +select * from t t1, t t2 where exists + (select 1 from t t3 where t1.a = t3.a and t2.b = t3.b and t3.a = 1 and t3.b = 2); + +rollback; + +-- check handling of semijoin unique-ification for child relations if all RHS +-- columns are equated to constants. +begin; + +create temp table p (a int, b int) partition by range (a); +create temp table p1 partition of p for values from (0) to (10); +create temp table p2 partition of p for values from (10) to (20); +insert into p values (1, 2); +insert into p values (10, 20); + +set enable_partitionwise_join to on; + +explain (costs off) +select * from p t1 where exists + (select 1 from p t2 where t1.a = t2.a and t1.a = 1); + +select * from p t1 where exists + (select 1 from p t2 where t1.a = t2.a and t1.a = 1); + +rollback; + -- test cases where we can remove a join, but not a PHV computed at it begin; diff --git a/src/test/regress/sql/predicate.sql b/src/test/regress/sql/predicate.sql index d92277353a019..32302d60b6d04 100644 --- a/src/test/regress/sql/predicate.sql +++ b/src/test/regress/sql/predicate.sql @@ -201,3 +201,23 @@ SELECT * FROM pred_tab t1 DROP TABLE pred_tab; DROP TABLE pred_tab_notnull; + +-- Validate that NullTest quals in constraint expressions are reduced correctly +CREATE TABLE pred_tab1 (a int NOT NULL, b int, + CONSTRAINT check_tab1 CHECK (a IS NULL OR b > 2)); +CREATE TABLE pred_tab2 (a int, b int, + CONSTRAINT check_a CHECK (a IS NOT NULL)); + +SET constraint_exclusion TO ON; + +-- Ensure that we get a dummy plan +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab1, pred_tab2 WHERE pred_tab2.a IS NULL; + +-- Ensure that we get a dummy plan +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab2, pred_tab1 WHERE pred_tab1.a IS NULL OR pred_tab1.b < 2; + +RESET constraint_exclusion; +DROP TABLE pred_tab1; +DROP TABLE pred_tab2; diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql index 3eacc1340aad2..fe409654c0e98 100644 --- a/src/test/regress/sql/privileges.sql +++ b/src/test/regress/sql/privileges.sql @@ -346,8 +346,6 @@ CREATE VIEW atest12v AS SELECT * FROM atest12 WHERE b <<< 5; CREATE VIEW atest12sbv WITH (security_barrier=true) AS SELECT * FROM atest12 WHERE b <<< 5; -GRANT SELECT ON atest12v TO PUBLIC; -GRANT SELECT ON atest12sbv TO PUBLIC; -- This plan should use nestloop, knowing that few rows will be selected. EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; @@ -369,8 +367,16 @@ CREATE FUNCTION leak2(integer,integer) RETURNS boolean CREATE OPERATOR >>> (procedure = leak2, leftarg = integer, rightarg = integer, restrict = scalargtsel); --- This should not show any "leak" notices before failing. +-- These should not show any "leak" notices before failing. EXPLAIN (COSTS OFF) SELECT * FROM atest12 WHERE a >>> 0; +EXPLAIN (COSTS OFF) SELECT * FROM atest12v WHERE a >>> 0; +EXPLAIN (COSTS OFF) SELECT * FROM atest12sbv WHERE a >>> 0; + +-- Now regress_priv_user1 grants access to regress_priv_user2 via the views. +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT ON atest12v TO PUBLIC; +GRANT SELECT ON atest12sbv TO PUBLIC; +SET SESSION AUTHORIZATION regress_priv_user2; -- These plans should continue to use a nestloop, since they execute with the -- privileges of the view owner. @@ -1857,6 +1863,13 @@ DROP USER regress_priv_user7; DROP USER regress_priv_user8; -- does not exist +-- leave some default ACLs for pg_upgrade's dump-restore test input. +ALTER DEFAULT PRIVILEGES FOR ROLE pg_signal_backend + REVOKE USAGE ON TYPES FROM pg_signal_backend; +ALTER DEFAULT PRIVILEGES FOR ROLE pg_read_all_settings + REVOKE USAGE ON TYPES FROM pg_read_all_settings; + + -- permissions with LOCK TABLE CREATE USER regress_locktable_user; CREATE TABLE lock_table (a int); diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql index e2e3124543978..f064e4f545607 100644 --- a/src/test/regress/sql/psql.sql +++ b/src/test/regress/sql/psql.sql @@ -1073,6 +1073,7 @@ select \if false \\ (bogus \else \\ 42 \endif \\ forty_two; \pset arg1 arg2 \q \reset + \restrict test \s arg1 \sendpipeline \set arg1 arg2 arg3 arg4 arg5 arg6 arg7 @@ -1084,6 +1085,7 @@ select \if false \\ (bogus \else \\ 42 \endif \\ forty_two; \t arg1 \T arg1 \timing arg1 + \unrestrict not_valid \unset arg1 \w arg1 \watch arg1 arg2 diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql index c9e309190dfa6..3f42306139533 100644 --- a/src/test/regress/sql/publication.sql +++ b/src/test/regress/sql/publication.sql @@ -26,6 +26,7 @@ CREATE PUBLICATION testpub_xxx WITH (publish = 'cluster, vacuum'); CREATE PUBLICATION testpub_xxx WITH (publish_via_partition_root = 'true', publish_via_partition_root = '0'); CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = stored, publish_generated_columns = none); CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = foo); +CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns); \dRp @@ -1183,19 +1184,15 @@ DROP SCHEMA sch2 cascade; -- ====================================================== -- Test the 'publish_generated_columns' parameter with the following values: --- 'stored', 'none', and the default (no value specified), which defaults to --- 'stored'. +-- 'stored', 'none'. SET client_min_messages = 'ERROR'; CREATE PUBLICATION pub1 FOR ALL TABLES WITH (publish_generated_columns = stored); \dRp+ pub1 CREATE PUBLICATION pub2 FOR ALL TABLES WITH (publish_generated_columns = none); \dRp+ pub2 -CREATE PUBLICATION pub3 FOR ALL TABLES WITH (publish_generated_columns); -\dRp+ pub3 DROP PUBLICATION pub1; DROP PUBLICATION pub2; -DROP PUBLICATION pub3; -- Test the 'publish_generated_columns' parameter as 'none' and 'stored' for -- different scenarios with/without generated columns in column lists. @@ -1226,6 +1223,86 @@ DROP PUBLICATION pub2; DROP TABLE gencols; RESET client_min_messages; + +-- Test that the INSERT ON CONFLICT command correctly checks REPLICA IDENTITY +-- when the target table is published. +CREATE TABLE testpub_insert_onconfl_no_ri (a int unique, b int); +CREATE TABLE testpub_insert_onconfl_parted (a int unique, b int) PARTITION by RANGE (a); +CREATE TABLE testpub_insert_onconfl_part_no_ri PARTITION OF testpub_insert_onconfl_parted FOR VALUES FROM (1) TO (10); + +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION pub1 FOR ALL TABLES; +RESET client_min_messages; + +-- fail - missing REPLICA IDENTITY +INSERT INTO testpub_insert_onconfl_no_ri VALUES (1, 1) ON CONFLICT (a) DO UPDATE SET b = 2; + +-- ok - no updates +INSERT INTO testpub_insert_onconfl_no_ri VALUES (1, 1) ON CONFLICT DO NOTHING; + +-- fail - missing REPLICA IDENTITY in partition testpub_insert_onconfl_no_ri +INSERT INTO testpub_insert_onconfl_parted VALUES (1, 1) ON CONFLICT (a) DO UPDATE SET b = 2; + +-- ok - no updates +INSERT INTO testpub_insert_onconfl_parted VALUES (1, 1) ON CONFLICT DO NOTHING; + +DROP PUBLICATION pub1; +DROP TABLE testpub_insert_onconfl_no_ri; +DROP TABLE testpub_insert_onconfl_parted; + +-- Test that the MERGE command correctly checks REPLICA IDENTITY when the +-- target table is published. +CREATE TABLE testpub_merge_no_ri (a int, b int); +CREATE TABLE testpub_merge_pk (a int primary key, b int); + +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION pub1 FOR ALL TABLES; +RESET client_min_messages; + +-- fail - missing REPLICA IDENTITY +MERGE INTO testpub_merge_no_ri USING testpub_merge_pk s ON s.a >= 1 + WHEN MATCHED THEN UPDATE SET b = s.b; + +-- fail - missing REPLICA IDENTITY +MERGE INTO testpub_merge_no_ri USING testpub_merge_pk s ON s.a >= 1 + WHEN MATCHED THEN DELETE; + +-- ok - insert and do nothing are not restricted +MERGE INTO testpub_merge_no_ri USING testpub_merge_pk s ON s.a >= 1 + WHEN MATCHED THEN DO NOTHING + WHEN NOT MATCHED THEN INSERT (a, b) VALUES (0, 0); + +-- ok - REPLICA IDENTITY is DEFAULT and table has a PK +MERGE INTO testpub_merge_pk USING testpub_merge_no_ri s ON s.a >= 1 + WHEN MATCHED AND s.a > 0 THEN UPDATE SET b = s.b + WHEN MATCHED THEN DELETE; + +DROP PUBLICATION pub1; +DROP TABLE testpub_merge_no_ri; +DROP TABLE testpub_merge_pk; + RESET SESSION AUTHORIZATION; DROP ROLE regress_publication_user, regress_publication_user2; DROP ROLE regress_publication_user_dummy; + +-- stage objects for pg_dump tests +CREATE SCHEMA pubme CREATE TABLE t0 (c int, d int) CREATE TABLE t1 (c int); +CREATE SCHEMA pubme2 CREATE TABLE t0 (c int, d int); +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION dump_pub_qual_1ct FOR + TABLE ONLY pubme.t0 (c, d) WHERE (c > 0); +CREATE PUBLICATION dump_pub_qual_2ct FOR + TABLE ONLY pubme.t0 (c) WHERE (c > 0), + TABLE ONLY pubme.t1 (c); +CREATE PUBLICATION dump_pub_nsp_1ct FOR + TABLES IN SCHEMA pubme; +CREATE PUBLICATION dump_pub_nsp_2ct FOR + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2; +CREATE PUBLICATION dump_pub_all FOR + TABLE ONLY pubme.t0, + TABLE ONLY pubme.t1 WHERE (c < 0), + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2 + WITH (publish_via_partition_root = true); +RESET client_min_messages; diff --git a/src/test/regress/sql/random.sql b/src/test/regress/sql/random.sql index ebfa7539ede25..890f14687ef98 100644 --- a/src/test/regress/sql/random.sql +++ b/src/test/regress/sql/random.sql @@ -277,3 +277,29 @@ SELECT random(-1e30, 1e30) FROM generate_series(1, 10); SELECT random(-0.4, 0.4) FROM generate_series(1, 10); SELECT random(0, 1 - 1e-30) FROM generate_series(1, 10); SELECT n, random(0, trim_scale(abs(1 - 10.0^(-n)))) FROM generate_series(-20, 20) n; + +-- random dates +SELECT random('1979-02-08'::date,'2025-07-03'::date) AS random_date_multiple_years; +SELECT random('4714-11-24 BC'::date,'5874897-12-31 AD'::date) AS random_date_maximum_range; +SELECT random('1979-02-08'::date,'1979-02-08'::date) AS random_date_empty_range; +SELECT random('2024-12-31'::date, '2024-01-01'::date); -- fail +SELECT random('-infinity'::date, '2024-01-01'::date); -- fail +SELECT random('2024-12-31'::date, 'infinity'::date); -- fail + +-- random timestamps +SELECT random('1979-02-08'::timestamp,'2025-07-03'::timestamp) AS random_timestamp_multiple_years; +SELECT random('4714-11-24 BC'::timestamp,'294276-12-31 23:59:59.999999'::timestamp) AS random_timestamp_maximum_range; +SELECT random('2024-07-01 12:00:00.000001'::timestamp, '2024-07-01 12:00:00.999999'::timestamp) AS random_narrow_range; +SELECT random('1979-02-08'::timestamp,'1979-02-08'::timestamp) AS random_timestamp_empty_range; +SELECT random('2024-12-31'::timestamp, '2024-01-01'::timestamp); -- fail +SELECT random('-infinity'::timestamp, '2024-01-01'::timestamp); -- fail +SELECT random('2024-12-31'::timestamp, 'infinity'::timestamp); -- fail + +-- random timestamps with timezone +SELECT random('1979-02-08 +01'::timestamptz,'2025-07-03 +02'::timestamptz) AS random_timestamptz_multiple_years; +SELECT random('4714-11-24 BC +00'::timestamptz,'294276-12-31 23:59:59.999999 +00'::timestamptz) AS random_timestamptz_maximum_range; +SELECT random('2024-07-01 12:00:00.000001 +04'::timestamptz, '2024-07-01 12:00:00.999999 +04'::timestamptz) AS random_timestamptz_narrow_range; +SELECT random('1979-02-08 +05'::timestamptz,'1979-02-08 +05'::timestamptz) AS random_timestamptz_empty_range; +SELECT random('2024-01-01 +06'::timestamptz, '2024-01-01 +07'::timestamptz); -- fail +SELECT random('-infinity'::timestamptz, '2024-01-01 +07'::timestamptz); -- fail +SELECT random('2024-01-01 +06'::timestamptz, 'infinity'::timestamptz); -- fail diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index 9da967a9ef2f5..21ac0ca51eed1 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -2189,7 +2189,7 @@ DROP VIEW rls_view; DROP TABLE rls_tbl; DROP TABLE ref_tbl; --- Leaky operator test +-- Leaky operator tests CREATE TABLE rls_tbl (a int); INSERT INTO rls_tbl SELECT x/10 FROM generate_series(1, 100) x; ANALYZE rls_tbl; @@ -2205,9 +2205,58 @@ CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, restrict = scalarltsel); SELECT * FROM rls_tbl WHERE a <<< 1000; EXPLAIN (COSTS OFF) SELECT * FROM rls_tbl WHERE a <<< 1000 or a <<< 900; +RESET SESSION AUTHORIZATION; + +CREATE TABLE rls_child_tbl () INHERITS (rls_tbl); +INSERT INTO rls_child_tbl SELECT x/10 FROM generate_series(1, 100) x; +ANALYZE rls_child_tbl; + +CREATE TABLE rls_ptbl (a int) PARTITION BY RANGE (a); +CREATE TABLE rls_part PARTITION OF rls_ptbl FOR VALUES FROM (-100) TO (100); +INSERT INTO rls_ptbl SELECT x/10 FROM generate_series(1, 100) x; +ANALYZE rls_ptbl, rls_part; + +ALTER TABLE rls_ptbl ENABLE ROW LEVEL SECURITY; +ALTER TABLE rls_part ENABLE ROW LEVEL SECURITY; +GRANT SELECT ON rls_ptbl TO regress_rls_alice; +GRANT SELECT ON rls_part TO regress_rls_alice; +CREATE POLICY p1 ON rls_tbl USING (a < 0); +CREATE POLICY p2 ON rls_ptbl USING (a < 0); +CREATE POLICY p3 ON rls_part USING (a < 0); + +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_tbl WHERE a <<< 1000; +SELECT * FROM rls_child_tbl WHERE a <<< 1000; +SELECT * FROM rls_ptbl WHERE a <<< 1000; +SELECT * FROM rls_part WHERE a <<< 1000; +SELECT * FROM (SELECT * FROM rls_tbl UNION ALL + SELECT * FROM rls_tbl) t WHERE a <<< 1000; +SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL + SELECT * FROM rls_child_tbl) t WHERE a <<< 1000; +RESET SESSION AUTHORIZATION; + +REVOKE SELECT ON rls_tbl FROM regress_rls_alice; +CREATE VIEW rls_tbl_view AS SELECT * FROM rls_tbl; + +ALTER TABLE rls_child_tbl ENABLE ROW LEVEL SECURITY; +GRANT SELECT ON rls_child_tbl TO regress_rls_alice; +CREATE POLICY p4 ON rls_child_tbl USING (a < 0); + +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_tbl WHERE a <<< 1000; +SELECT * FROM rls_tbl_view WHERE a <<< 1000; +SELECT * FROM rls_child_tbl WHERE a <<< 1000; +SELECT * FROM (SELECT * FROM rls_tbl UNION ALL + SELECT * FROM rls_tbl) t WHERE a <<< 1000; +SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL + SELECT * FROM rls_child_tbl) t WHERE a <<< 1000; DROP OPERATOR <<< (int, int); DROP FUNCTION op_leak(int, int); RESET SESSION AUTHORIZATION; +DROP TABLE rls_part; +DROP TABLE rls_ptbl; +DROP TABLE rls_child_tbl; +DROP VIEW rls_tbl_view; DROP TABLE rls_tbl; -- Bug #16006: whole-row Vars in a policy don't play nice with sub-selects diff --git a/src/test/regress/sql/stats_ext.sql b/src/test/regress/sql/stats_ext.sql index da4f2fe9c938f..823c7db9dab49 100644 --- a/src/test/regress/sql/stats_ext.sql +++ b/src/test/regress/sql/stats_ext.sql @@ -40,6 +40,18 @@ CREATE STATISTICS tst ON x, x, y, x, x, (x || 'x'), (y + 1), (x || 'x'), (x || ' CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), y FROM ext_stats_test; CREATE STATISTICS tst (unrecognized) ON x, y FROM ext_stats_test; +-- unsupported targets +CREATE STATISTICS tst ON a FROM (VALUES (x)) AS foo; +CREATE STATISTICS tst ON a FROM foo NATURAL JOIN bar; +CREATE STATISTICS tst ON a FROM (SELECT * FROM ext_stats_test) AS foo; +CREATE STATISTICS tst ON a FROM ext_stats_test s TABLESAMPLE system (x); +CREATE STATISTICS tst ON a FROM XMLTABLE('foo' PASSING 'bar' COLUMNS a text); +CREATE STATISTICS tst ON a FROM JSON_TABLE(jsonb '123', '$' COLUMNS (item int)); +CREATE FUNCTION tftest(int) returns table(a int, b int) as $$ +SELECT $1, $1+i FROM generate_series(1,5) g(i); +$$ LANGUAGE sql IMMUTABLE STRICT; +CREATE STATISTICS alt_stat2 ON a FROM tftest(1); +DROP FUNCTION tftest; -- incorrect expressions CREATE STATISTICS tst ON (y) FROM ext_stats_test; -- single column reference CREATE STATISTICS tst ON y + z FROM ext_stats_test; -- missing parentheses @@ -1647,8 +1659,15 @@ CREATE FUNCTION op_leak(int, int) RETURNS bool LANGUAGE plpgsql; CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, restrict = scalarltsel); +CREATE FUNCTION op_leak(record, record) RETURNS bool + AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' + LANGUAGE plpgsql; +CREATE OPERATOR <<< (procedure = op_leak, leftarg = record, rightarg = record, + restrict = scalarltsel); SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; +SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied +SELECT * FROM tststats.priv_test_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -- Grant access via a security barrier view, but hide all data @@ -1661,19 +1680,51 @@ GRANT SELECT, DELETE ON tststats.priv_test_view TO regress_stats_user1; SET SESSION AUTHORIZATION regress_stats_user1; SELECT * FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak SELECT * FROM tststats.priv_test_view WHERE a <<< 0 OR b <<< 0; -- Should not leak +SELECT * FROM tststats.priv_test_view t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak -- Grant table access, but hide all data with RLS RESET SESSION AUTHORIZATION; ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY; +CREATE POLICY priv_test_tbl_pol ON tststats.priv_test_tbl USING (2 * a < 0); GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1; -- Should now have direct table access, but see nothing and leak nothing SET SESSION AUTHORIZATION regress_stats_user1; SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; +SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak +SELECT * FROM tststats.priv_test_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak +-- Create plain inheritance parent table with no access permissions +RESET SESSION AUTHORIZATION; +CREATE TABLE tststats.priv_test_parent_tbl (a int, b int); +ALTER TABLE tststats.priv_test_tbl INHERIT tststats.priv_test_parent_tbl; + +-- Should not have access to parent, and should leak nothing +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied +SELECT * FROM tststats.priv_test_parent_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied +DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied + +-- Grant table access to parent, but hide all data with RLS +RESET SESSION AUTHORIZATION; +ALTER TABLE tststats.priv_test_parent_tbl ENABLE ROW LEVEL SECURITY; +CREATE POLICY priv_test_parent_tbl_pol ON tststats.priv_test_parent_tbl USING (2 * a < 0); +GRANT SELECT, DELETE ON tststats.priv_test_parent_tbl TO regress_stats_user1; + +-- Should now have direct table access to parent, but see nothing and leak nothing +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak +SELECT * FROM tststats.priv_test_parent_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak +DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak + -- privilege checks for pg_stats_ext and pg_stats_ext_exprs RESET SESSION AUTHORIZATION; CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT); @@ -1703,6 +1754,8 @@ SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x -- Tidy up DROP OPERATOR <<< (int, int); DROP FUNCTION op_leak(int, int); +DROP OPERATOR <<< (record, record); +DROP FUNCTION op_leak(record, record); RESET SESSION AUTHORIZATION; DROP TABLE stats_ext_tbl; DROP SCHEMA tststats CASCADE; diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql index 92c445c243961..b94004cc08ce6 100644 --- a/src/test/regress/sql/strings.sql +++ b/src/test/regress/sql/strings.sql @@ -650,6 +650,26 @@ SELECT length(c), c::text FROM toasttest; SELECT c FROM toasttest; DROP TABLE toasttest; +-- test with short varlenas (up to 126 data bytes reduced to a 1-byte header) +-- being toasted. +CREATE TABLE toasttest (f1 text, f2 text); +ALTER TABLE toasttest SET (toast_tuple_target = 128); +ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE EXTERNAL; +ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE EXTERNAL; +-- Here, the first value is a varlena large enough to make it toasted and +-- stored uncompressed. The second value is a short varlena, toasted +-- and stored uncompressed. +INSERT INTO toasttest values(repeat('1234', 1000), repeat('5678', 30)); +SELECT reltoastrelid::regclass AS reltoastname FROM pg_class + WHERE oid = 'toasttest'::regclass \gset +-- There should be two values inserted in the toast relation. +SELECT count(*) FROM :reltoastname WHERE chunk_seq = 0; +SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data + FROM toasttest; +SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp + FROM toasttest; +DROP TABLE toasttest; + -- -- test length -- diff --git a/src/test/regress/sql/subscription.sql b/src/test/regress/sql/subscription.sql index f0f714fe747a9..ef0c298d2df7b 100644 --- a/src/test/regress/sql/subscription.sql +++ b/src/test/regress/sql/subscription.sql @@ -298,6 +298,22 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); DROP SUBSCRIPTION regress_testsub; +-- fail - max_retention_duration must be integer +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, max_retention_duration = foo); + +-- ok +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, max_retention_duration = 1000); + +\dRs+ + +-- ok +ALTER SUBSCRIPTION regress_testsub SET (max_retention_duration = 0); + +\dRs+ + +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +DROP SUBSCRIPTION regress_testsub; + -- let's do some tests with pg_create_subscription rather than superuser SET SESSION AUTHORIZATION regress_subscription_user3; diff --git a/src/test/regress/sql/subselect.sql b/src/test/regress/sql/subselect.sql index d9a841fbc9ffd..8ccebbe51e06c 100644 --- a/src/test/regress/sql/subselect.sql +++ b/src/test/regress/sql/subselect.sql @@ -361,6 +361,73 @@ select * from float_table select * from numeric_table where num_col in (select float_col from float_table); +-- +-- Test that a semijoin implemented by unique-ifying the RHS can explore +-- different paths of the RHS rel. +-- + +create table semijoin_unique_tbl (a int, b int); +insert into semijoin_unique_tbl select i%10, i%10 from generate_series(1,1000)i; +create index on semijoin_unique_tbl(a, b); +analyze semijoin_unique_tbl; + +-- Ensure that we get a plan with Unique + IndexScan +explain (verbose, costs off) +select * from semijoin_unique_tbl t1, semijoin_unique_tbl t2 +where (t1.a, t2.a) in (select a, b from semijoin_unique_tbl t3) +order by t1.a, t2.a; + +-- Ensure that we can unique-ify expressions more complex than plain Vars +explain (verbose, costs off) +select * from semijoin_unique_tbl t1, semijoin_unique_tbl t2 +where (t1.a, t2.a) in (select a+1, b+1 from semijoin_unique_tbl t3) +order by t1.a, t2.a; + +-- encourage use of parallel plans +set parallel_setup_cost=0; +set parallel_tuple_cost=0; +set min_parallel_table_scan_size=0; +set max_parallel_workers_per_gather=4; + +set enable_indexscan to off; + +-- Ensure that we get a parallel plan for the unique-ification +explain (verbose, costs off) +select * from semijoin_unique_tbl t1, semijoin_unique_tbl t2 +where (t1.a, t2.a) in (select a, b from semijoin_unique_tbl t3) +order by t1.a, t2.a; + +reset enable_indexscan; + +reset max_parallel_workers_per_gather; +reset min_parallel_table_scan_size; +reset parallel_tuple_cost; +reset parallel_setup_cost; + +drop table semijoin_unique_tbl; + +create table unique_tbl_p (a int, b int) partition by range(a); +create table unique_tbl_p1 partition of unique_tbl_p for values from (0) to (5); +create table unique_tbl_p2 partition of unique_tbl_p for values from (5) to (10); +create table unique_tbl_p3 partition of unique_tbl_p for values from (10) to (20); +insert into unique_tbl_p select i%12, i from generate_series(0, 1000)i; +create index on unique_tbl_p1(a); +create index on unique_tbl_p2(a); +create index on unique_tbl_p3(a); +analyze unique_tbl_p; + +set enable_partitionwise_join to on; + +-- Ensure that the unique-ification works for partition-wise join +explain (verbose, costs off) +select * from unique_tbl_p t1, unique_tbl_p t2 +where (t1.a, t2.a) in (select a, a from unique_tbl_p t3) +order by t1.a, t2.a; + +reset enable_partitionwise_join; + +drop table unique_tbl_p; + -- -- Test case for bug #4290: bogus calculation of subplan param sets -- @@ -412,6 +479,15 @@ select (select view_a) from view_a; select (select (select view_a)) from view_a; select (select (a.*)::text) from view_a a; +-- +-- Test case for bug #19037: no relation entry for relid N +-- + +explain (costs off) +select (1 = any(array_agg(f1))) = any (select false) from int4_tbl; + +select (1 = any(array_agg(f1))) = any (select false) from int4_tbl; + -- -- Check that whole-row Vars reading the result of a subselect don't include -- any junk columns therein diff --git a/src/test/regress/sql/timestamp.sql b/src/test/regress/sql/timestamp.sql index 55f80530ea05e..313757ed041a1 100644 --- a/src/test/regress/sql/timestamp.sql +++ b/src/test/regress/sql/timestamp.sql @@ -175,7 +175,9 @@ SELECT d1 - timestamp without time zone '1997-01-02' AS diff FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc; - +SELECT date_trunc( 'week', timestamp 'infinity' ) AS inf_trunc; +SELECT date_trunc( 'timezone', timestamp '2004-02-29 15:44:17.71393' ) AS notsupp_trunc; +SELECT date_trunc( 'timezone', timestamp 'infinity' ) AS notsupp_inf_trunc; SELECT date_trunc( 'ago', timestamp 'infinity' ) AS invalid_trunc; -- verify date_bin behaves the same as date_trunc for relevant intervals diff --git a/src/test/regress/sql/timestamptz.sql b/src/test/regress/sql/timestamptz.sql index caca3123f13f6..6ace851d16911 100644 --- a/src/test/regress/sql/timestamptz.sql +++ b/src/test/regress/sql/timestamptz.sql @@ -217,15 +217,18 @@ SELECT d1 - timestamp with time zone '1997-01-02' AS diff FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS week_trunc; +SELECT date_trunc( 'week', timestamp with time zone 'infinity' ) AS inf_trunc; +SELECT date_trunc( 'timezone', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS notsupp_trunc; +SELECT date_trunc( 'timezone', timestamp with time zone 'infinity' ) AS notsupp_inf_trunc; SELECT date_trunc( 'ago', timestamp with time zone 'infinity' ) AS invalid_trunc; SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'GMT') as gmt_trunc; -- fixed-offset abbreviation SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET') as vet_trunc; -- variable-offset abbreviation +SELECT date_trunc('timezone', timestamp with time zone 'infinity', 'GMT') AS notsupp_zone_trunc; +SELECT date_trunc( 'week', timestamp with time zone 'infinity', 'GMT') AS inf_zone_trunc; SELECT date_trunc('ago', timestamp with time zone 'infinity', 'GMT') AS invalid_zone_trunc; - - -- verify date_bin behaves the same as date_trunc for relevant intervals SELECT str, diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql index d674b25c83be4..5f7f75d7ba5d9 100644 --- a/src/test/regress/sql/triggers.sql +++ b/src/test/regress/sql/triggers.sql @@ -1935,6 +1935,11 @@ BBB 42 CCC 42 \. +-- check detach/reattach behavior; statement triggers with transition tables +-- should not prevent a table from becoming a partition again +alter table parent detach partition child1; +alter table parent attach partition child1 for values in ('AAA'); + -- DML affecting parent sees tuples collected from children even if -- there is no transition table trigger on the children drop trigger child1_insert_trig on child1; @@ -2154,6 +2159,11 @@ copy parent (a, b) from stdin; DDD 42 \. +-- check disinherit/reinherit behavior; statement triggers with transition +-- tables should not prevent a table from becoming an inheritance child again +alter table child1 no inherit parent; +alter table child1 inherit parent; + -- DML affecting parent sees tuples collected from children even if -- there is no transition table trigger on the children drop trigger child1_insert_trig on child1; diff --git a/src/test/regress/sql/vacuum.sql b/src/test/regress/sql/vacuum.sql index a72bdb5b619d9..247b8e23b2357 100644 --- a/src/test/regress/sql/vacuum.sql +++ b/src/test/regress/sql/vacuum.sql @@ -495,3 +495,33 @@ RESET ROLE; DROP TABLE vacowned; DROP TABLE vacowned_parted; DROP ROLE regress_vacuum; + +-- Test checking how new toast values are allocated on rewrite. +-- Create table with plain storage (forces inline storage initially). +CREATE TABLE vac_rewrite_toast (id int, f1 TEXT STORAGE plain); +-- Insert tuple large enough to trigger toast storage on rewrite, still +-- small enough to fit on a page. +INSERT INTO vac_rewrite_toast values (1, repeat('a', 7000)); +-- Switch to external storage to force toast table usage. +ALTER TABLE vac_rewrite_toast ALTER COLUMN f1 SET STORAGE EXTERNAL; +-- This second tuple is toasted, its value should still be the +-- same after rewrite. +INSERT INTO vac_rewrite_toast values (2, repeat('a', 7000)); +SELECT pg_column_toast_chunk_id(f1) AS id_2_chunk FROM vac_rewrite_toast + WHERE id = 2 \gset +-- Check initial state of the data. +SELECT id, pg_column_toast_chunk_id(f1) IS NULL AS f1_chunk_null, + substr(f1, 5, 10) AS f1_data, + pg_column_compression(f1) AS f1_comp + FROM vac_rewrite_toast ORDER BY id; +-- VACUUM FULL forces toast data rewrite. +VACUUM FULL vac_rewrite_toast; +-- Check after rewrite. +SELECT id, pg_column_toast_chunk_id(f1) IS NULL AS f1_chunk_null, + substr(f1, 5, 10) AS f1_data, + pg_column_compression(f1) AS f1_comp + FROM vac_rewrite_toast ORDER BY id; +-- The same value is reused for the tuple toasted before the rewrite. +SELECT pg_column_toast_chunk_id(f1) = :'id_2_chunk' AS same_chunk + FROM vac_rewrite_toast WHERE id = 2; +DROP TABLE vac_rewrite_toast; diff --git a/src/test/subscription/Makefile b/src/test/subscription/Makefile index 50b65d8f6ea21..9d97e7d5c0d6d 100644 --- a/src/test/subscription/Makefile +++ b/src/test/subscription/Makefile @@ -13,9 +13,11 @@ subdir = src/test/subscription top_builddir = ../../.. include $(top_builddir)/src/Makefile.global -EXTRA_INSTALL = contrib/hstore +EXTRA_INSTALL = contrib/hstore \ + src/test/modules/injection_points export with_icu +export enable_injection_points check: $(prove_check) diff --git a/src/test/subscription/meson.build b/src/test/subscription/meson.build index 586ffba434e11..20b4e523d9307 100644 --- a/src/test/subscription/meson.build +++ b/src/test/subscription/meson.build @@ -5,7 +5,10 @@ tests += { 'sd': meson.current_source_dir(), 'bd': meson.current_build_dir(), 'tap': { - 'env': {'with_icu': icu.found() ? 'yes' : 'no'}, + 'env': { + 'with_icu': icu.found() ? 'yes' : 'no', + 'enable_injection_points': get_option('injection_points') ? 'yes' : 'no', + }, 'tests': [ 't/001_rep_changes.pl', 't/002_types.pl', diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl index 916fdb48b3b34..ca55d8df50d84 100644 --- a/src/test/subscription/t/001_rep_changes.pl +++ b/src/test/subscription/t/001_rep_changes.pl @@ -365,10 +365,10 @@ my $logfile = slurp_file($node_subscriber->logfile, $log_location); ok( $logfile =~ - qr/conflict detected on relation "public.tab_full_pk": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote tuple \(1, quux\); replica identity \(a\)=\(1\)/m, + qr/conflict detected on relation "public.tab_full_pk": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(1, quux\); replica identity \(a\)=\(1\)/m, 'update target row is missing'); ok( $logfile =~ - qr/conflict detected on relation "public.tab_full": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote tuple \(26\); replica identity full \(25\)/m, + qr/conflict detected on relation "public.tab_full": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(26\); replica identity full \(25\)/m, 'update target row is missing'); ok( $logfile =~ qr/conflict detected on relation "public.tab_full_pk": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(2\)/m, diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl index 4f78dd48815f0..763a91e75a3ea 100644 --- a/src/test/subscription/t/013_partition.pl +++ b/src/test/subscription/t/013_partition.pl @@ -368,7 +368,7 @@ BEGIN my $logfile = slurp_file($node_subscriber1->logfile(), $log_location); ok( $logfile =~ - qr/conflict detected on relation "public.tab1_2_2": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote tuple \(null, 4, quux\); replica identity \(a\)=\(4\)/, + qr/conflict detected on relation "public.tab1_2_2": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(null, 4, quux\); replica identity \(a\)=\(4\)/, 'update target row is missing in tab1_2_2'); ok( $logfile =~ qr/conflict detected on relation "public.tab1_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/, @@ -781,7 +781,7 @@ BEGIN $logfile = slurp_file($node_subscriber1->logfile(), $log_location); ok( $logfile =~ - qr/conflict detected on relation "public.tab2_1": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote tuple \(pub_tab2, quux, 5\); replica identity \(a\)=\(5\)/, + qr/conflict detected on relation "public.tab2_1": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(pub_tab2, quux, 5\); replica identity \(a\)=\(5\)/, 'update target row is missing in tab2_1'); ok( $logfile =~ qr/conflict detected on relation "public.tab2_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/, @@ -802,8 +802,8 @@ BEGIN $logfile = slurp_file($node_subscriber1->logfile(), $log_location); ok( $logfile =~ - qr/conflict detected on relation "public.tab2_1": conflict=update_origin_differs.*\n.*DETAIL:.* Updating the row that was modified locally in transaction [0-9]+ at .*\n.*Existing local tuple \(yyy, null, 3\); remote tuple \(pub_tab2, quux, 3\); replica identity \(a\)=\(3\)/, - 'updating a tuple that was modified by a different origin'); + qr/conflict detected on relation "public.tab2_1": conflict=update_origin_differs.*\n.*DETAIL:.* Updating the row that was modified locally in transaction [0-9]+ at .*\n.*Existing local row \(yyy, null, 3\); remote row \(pub_tab2, quux, 3\); replica identity \(a\)=\(3\)/, + 'updating a row that was modified by a different origin'); # The remaining tests no longer test conflict detection. $node_subscriber1->append_conf('postgresql.conf', diff --git a/src/test/subscription/t/029_on_error.pl b/src/test/subscription/t/029_on_error.pl index 243662e1240c8..b59d20599fd23 100644 --- a/src/test/subscription/t/029_on_error.pl +++ b/src/test/subscription/t/029_on_error.pl @@ -30,7 +30,7 @@ sub test_skip_lsn # ERROR with its CONTEXT when retrieving this information. my $contents = slurp_file($node_subscriber->logfile, $offset); $contents =~ - qr/conflict detected on relation "public.tbl".*\n.*DETAIL:.* Key already exists in unique index "tbl_pkey", modified by .*origin.* transaction \d+ at .*\n.*Key \(i\)=\(\d+\); existing local tuple .*; remote tuple .*\n.*CONTEXT:.* for replication target relation "public.tbl" in transaction \d+, finished at ([[:xdigit:]]+\/[[:xdigit:]]+)/m + qr/conflict detected on relation "public.tbl".*\n.*DETAIL:.* Key already exists in unique index "tbl_pkey", modified by .*origin.* transaction \d+ at .*\n.*Key \(i\)=\(\d+\); existing local row .*; remote row .*\n.*CONTEXT:.* for replication target relation "public.tbl" in transaction \d+, finished at ([[:xdigit:]]+\/[[:xdigit:]]+)/m or die "could not get error-LSN"; my $lsn = $1; diff --git a/src/test/subscription/t/030_origin.pl b/src/test/subscription/t/030_origin.pl index 5b82848e5e6d3..ec6518ca01004 100644 --- a/src/test/subscription/t/030_origin.pl +++ b/src/test/subscription/t/030_origin.pl @@ -163,7 +163,7 @@ $node_C->safe_psql('postgres', "UPDATE tab SET a = 33 WHERE a = 32;"); $node_B->wait_for_log( - qr/conflict detected on relation "public.tab": conflict=update_origin_differs.*\n.*DETAIL:.* Updating the row that was modified by a different origin ".*" in transaction [0-9]+ at .*\n.*Existing local tuple \(32\); remote tuple \(33\); replica identity \(a\)=\(32\)/ + qr/conflict detected on relation "public.tab": conflict=update_origin_differs.*\n.*DETAIL:.* Updating the row that was modified by a different origin ".*" in transaction [0-9]+ at .*\n.*Existing local row \(32\); remote row \(33\); replica identity \(a\)=\(32\)/ ); $node_B->safe_psql('postgres', "DELETE FROM tab;"); @@ -179,7 +179,7 @@ $node_C->safe_psql('postgres', "DELETE FROM tab WHERE a = 33;"); $node_B->wait_for_log( - qr/conflict detected on relation "public.tab": conflict=delete_origin_differs.*\n.*DETAIL:.* Deleting the row that was modified by a different origin ".*" in transaction [0-9]+ at .*\n.*Existing local tuple \(33\); replica identity \(a\)=\(33\)/ + qr/conflict detected on relation "public.tab": conflict=delete_origin_differs.*\n.*DETAIL:.* Deleting the row that was modified by a different origin ".*" in transaction [0-9]+ at .*\n.*Existing local row \(33\); replica identity \(a\)=\(33\)/ ); # The remaining tests no longer test conflict detection. diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl index 976d53a870e5e..db0d5b464e825 100644 --- a/src/test/subscription/t/035_conflicts.pl +++ b/src/test/subscription/t/035_conflicts.pl @@ -79,11 +79,11 @@ $node_subscriber->wait_for_log( qr/conflict detected on relation \"public.conf_tab\": conflict=multiple_unique_conflicts.* .*Key already exists in unique index \"conf_tab_pkey\".* -.*Key \(a\)=\(2\); existing local tuple \(2, 2, 2\); remote tuple \(2, 3, 4\).* +.*Key \(a\)=\(2\); existing local row \(2, 2, 2\); remote row \(2, 3, 4\).* .*Key already exists in unique index \"conf_tab_b_key\".* -.*Key \(b\)=\(3\); existing local tuple \(3, 3, 3\); remote tuple \(2, 3, 4\).* +.*Key \(b\)=\(3\); existing local row \(3, 3, 3\); remote row \(2, 3, 4\).* .*Key already exists in unique index \"conf_tab_c_key\".* -.*Key \(c\)=\(4\); existing local tuple \(4, 4, 4\); remote tuple \(2, 3, 4\)./, +.*Key \(c\)=\(4\); existing local row \(4, 4, 4\); remote row \(2, 3, 4\)./, $log_offset); pass('multiple_unique_conflicts detected during insert'); @@ -111,11 +111,11 @@ $node_subscriber->wait_for_log( qr/conflict detected on relation \"public.conf_tab\": conflict=multiple_unique_conflicts.* .*Key already exists in unique index \"conf_tab_pkey\".* -.*Key \(a\)=\(6\); existing local tuple \(6, 6, 6\); remote tuple \(6, 7, 8\).* +.*Key \(a\)=\(6\); existing local row \(6, 6, 6\); remote row \(6, 7, 8\).* .*Key already exists in unique index \"conf_tab_b_key\".* -.*Key \(b\)=\(7\); existing local tuple \(7, 7, 7\); remote tuple \(6, 7, 8\).* +.*Key \(b\)=\(7\); existing local row \(7, 7, 7\); remote row \(6, 7, 8\).* .*Key already exists in unique index \"conf_tab_c_key\".* -.*Key \(c\)=\(8\); existing local tuple \(8, 8, 8\); remote tuple \(6, 7, 8\)./, +.*Key \(c\)=\(8\); existing local row \(8, 8, 8\); remote row \(6, 7, 8\)./, $log_offset); pass('multiple_unique_conflicts detected during update'); @@ -139,9 +139,9 @@ $node_subscriber->wait_for_log( qr/conflict detected on relation \"public.conf_tab_2_p1\": conflict=multiple_unique_conflicts.* .*Key already exists in unique index \"conf_tab_2_p1_pkey\".* -.*Key \(a\)=\(55\); existing local tuple \(55, 2, 3\); remote tuple \(55, 2, 3\).* +.*Key \(a\)=\(55\); existing local row \(55, 2, 3\); remote row \(55, 2, 3\).* .*Key already exists in unique index \"conf_tab_2_p1_a_b_key\".* -.*Key \(a, b\)=\(55, 2\); existing local tuple \(55, 2, 3\); remote tuple \(55, 2, 3\)./, +.*Key \(a, b\)=\(55, 2\); existing local row \(55, 2, 3\); remote row \(55, 2, 3\)./, $log_offset); pass('multiple_unique_conflicts detected on a leaf partition during insert'); @@ -150,7 +150,9 @@ # Setup a bidirectional logical replication between node_A & node_B ############################################################################### -# Initialize nodes. +# Initialize nodes. Enable the track_commit_timestamp on both nodes to detect +# the conflict when attempting to update a row that was previously modified by +# a different origin. # node_A. Increase the log_min_messages setting to DEBUG2 to debug test # failures. Disable autovacuum to avoid generating xid that could affect the @@ -158,7 +160,8 @@ my $node_A = $node_publisher; $node_A->append_conf( 'postgresql.conf', - qq{autovacuum = off + qq{track_commit_timestamp = on + autovacuum = off log_min_messages = 'debug2'}); $node_A->restart; @@ -270,6 +273,8 @@ ############################################################################### # Check that dead tuples on node A cannot be cleaned by VACUUM until the # concurrent transactions on Node B have been applied and flushed on Node A. +# Also, check that an update_deleted conflict is detected when updating a row +# that was deleted by a different origin. ############################################################################### # Insert a record @@ -288,6 +293,8 @@ "SELECT count(*) = 0 FROM pg_stat_activity WHERE backend_type = 'logical replication apply worker'" ); +my $log_location = -s $node_B->logfile; + $node_B->safe_psql('postgres', "UPDATE tab SET b = 3 WHERE a = 1;"); $node_A->safe_psql('postgres', "DELETE FROM tab WHERE a = 1;"); @@ -299,10 +306,30 @@ qr/1 are dead but not yet removable/, 'the deleted column is non-removable'); +# Ensure the DELETE is replayed on Node B +$node_A->wait_for_catchup($subname_BA); + +# Check the conflict detected on Node B +my $logfile = slurp_file($node_B->logfile(), $log_location); +ok( $logfile =~ + qr/conflict detected on relation "public.tab": conflict=delete_origin_differs.* +.*DETAIL:.* Deleting the row that was modified locally in transaction [0-9]+ at .* +.*Existing local row \(1, 3\); replica identity \(a\)=\(1\)/, + 'delete target row was modified in tab'); + +$log_location = -s $node_A->logfile; + $node_A->safe_psql( 'postgres', "ALTER SUBSCRIPTION $subname_AB ENABLE;"); $node_B->wait_for_catchup($subname_AB); +$logfile = slurp_file($node_A->logfile(), $log_location); +ok( $logfile =~ + qr/conflict detected on relation "public.tab": conflict=update_deleted.* +.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .* +.*Remote row \(1, 3\); replica identity \(a\)=\(1\)/, + 'update target row was deleted in tab'); + # Remember the next transaction ID to be assigned my $next_xid = $node_A->safe_psql('postgres', "SELECT txid_current() + 1;"); @@ -324,6 +351,283 @@ qr/1 removed, 1 remain, 0 are dead but not yet removable/, 'the deleted column is removed'); +############################################################################### +# Ensure that the deleted tuple needed to detect an update_deleted conflict is +# accessible via a sequential table scan. +############################################################################### + +# Drop the primary key from tab on node A and set REPLICA IDENTITY to FULL to +# enforce sequential scanning of the table. +$node_A->safe_psql('postgres', "ALTER TABLE tab REPLICA IDENTITY FULL"); +$node_B->safe_psql('postgres', "ALTER TABLE tab REPLICA IDENTITY FULL"); +$node_A->safe_psql('postgres', "ALTER TABLE tab DROP CONSTRAINT tab_pkey;"); + +# Disable the logical replication from node B to node A +$node_A->safe_psql('postgres', "ALTER SUBSCRIPTION $subname_AB DISABLE"); + +# Wait for the apply worker to stop +$node_A->poll_query_until('postgres', + "SELECT count(*) = 0 FROM pg_stat_activity WHERE backend_type = 'logical replication apply worker'" +); + +$node_B->safe_psql('postgres', "UPDATE tab SET b = 4 WHERE a = 2;"); +$node_A->safe_psql('postgres', "DELETE FROM tab WHERE a = 2;"); + +$log_location = -s $node_A->logfile; + +$node_A->safe_psql( + 'postgres', "ALTER SUBSCRIPTION $subname_AB ENABLE;"); +$node_B->wait_for_catchup($subname_AB); + +$logfile = slurp_file($node_A->logfile(), $log_location); +ok( $logfile =~ + qr/conflict detected on relation "public.tab": conflict=update_deleted.* +.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .* +.*Remote row \(2, 4\); replica identity full \(2, 2\)/, + 'update target row was deleted in tab'); + +############################################################################### +# Check that the xmin value of the conflict detection slot can be advanced when +# the subscription has no tables. +############################################################################### + +# Remove the table from the publication +$node_B->safe_psql('postgres', "ALTER PUBLICATION tap_pub_B DROP TABLE tab"); + +$node_A->safe_psql('postgres', + "ALTER SUBSCRIPTION $subname_AB REFRESH PUBLICATION"); + +# Remember the next transaction ID to be assigned +$next_xid = $node_A->safe_psql('postgres', "SELECT txid_current() + 1;"); + +# Confirm that the xmin value is advanced to the latest nextXid. If no +# transactions are running, the apply worker selects nextXid as the candidate +# for the non-removable xid. See GetOldestActiveTransactionId(). +ok( $node_A->poll_query_until( + 'postgres', + "SELECT xmin = $next_xid from pg_replication_slots WHERE slot_name = 'pg_conflict_detection'" + ), + "the xmin value of slot 'pg_conflict_detection' is updated on Node A"); + +# Re-add the table to the publication for further tests +$node_B->safe_psql('postgres', "ALTER PUBLICATION tap_pub_B ADD TABLE tab"); + +$node_A->safe_psql('postgres', + "ALTER SUBSCRIPTION $subname_AB REFRESH PUBLICATION WITH (copy_data = false)"); + +############################################################################### +# Test that publisher's transactions marked with DELAY_CHKPT_IN_COMMIT prevent +# concurrently deleted tuples on the subscriber from being removed. This test +# also acts as a safeguard to prevent developers from moving the commit +# timestamp acquisition before marking DELAY_CHKPT_IN_COMMIT in +# RecordTransactionCommitPrepared. +############################################################################### + +my $injection_points_supported = $node_B->check_extension('injection_points'); + +# This test depends on an injection point to block the prepared transaction +# commit after marking DELAY_CHKPT_IN_COMMIT flag. +if ($injection_points_supported != 0) +{ + $node_B->append_conf('postgresql.conf', + "shared_preload_libraries = 'injection_points' + max_prepared_transactions = 1"); + $node_B->restart; + + # Disable the subscription on Node B for testing only one-way + # replication. + $node_B->psql('postgres', "ALTER SUBSCRIPTION $subname_BA DISABLE;"); + + # Wait for the apply worker to stop + $node_B->poll_query_until('postgres', + "SELECT count(*) = 0 FROM pg_stat_activity WHERE backend_type = 'logical replication apply worker'" + ); + + # Truncate the table to cleanup existing dead rows in the table. Then insert + # a new row. + $node_B->safe_psql( + 'postgres', qq( + TRUNCATE tab; + INSERT INTO tab VALUES(1, 1); + )); + + $node_B->wait_for_catchup($subname_AB); + + # Create the injection_points extension on the publisher node and attach to the + # commit-after-delay-checkpoint injection point. + $node_B->safe_psql( + 'postgres', + "CREATE EXTENSION injection_points; + SELECT injection_points_attach('commit-after-delay-checkpoint', 'wait');" + ); + + # Start a background session on the publisher node to perform an update and + # pause at the injection point. + my $pub_session = $node_B->background_psql('postgres'); + $pub_session->query_until( + qr/starting_bg_psql/, + q{ + \echo starting_bg_psql + BEGIN; + UPDATE tab SET b = 2 WHERE a = 1; + PREPARE TRANSACTION 'txn_with_later_commit_ts'; + COMMIT PREPARED 'txn_with_later_commit_ts'; + } + ); + + # Confirm the update is suspended + $result = + $node_B->safe_psql('postgres', 'SELECT * FROM tab WHERE a = 1'); + is($result, qq(1|1), 'publisher sees the old row'); + + # Delete the row on the subscriber. The deleted row should be retained due to a + # transaction on the publisher, which is currently marked with the + # DELAY_CHKPT_IN_COMMIT flag. + $node_A->safe_psql('postgres', "DELETE FROM tab WHERE a = 1;"); + + # Get the commit timestamp for the delete + my $sub_ts = $node_A->safe_psql('postgres', + "SELECT timestamp FROM pg_last_committed_xact();"); + + $log_location = -s $node_A->logfile; + + # Confirm that the apply worker keeps requesting publisher status, while + # awaiting the prepared transaction to commit. Thus, the request log should + # appear more than once. + $node_A->wait_for_log( + qr/sending publisher status request message/, + $log_location); + + $log_location = -s $node_A->logfile; + + $node_A->wait_for_log( + qr/sending publisher status request message/, + $log_location); + + # Confirm that the dead tuple cannot be removed + ($cmdret, $stdout, $stderr) = + $node_A->psql('postgres', qq(VACUUM (verbose) public.tab;)); + + ok($stderr =~ qr/1 are dead but not yet removable/, + 'the deleted column is non-removable'); + + $log_location = -s $node_A->logfile; + + # Wakeup and detach the injection point on the publisher node. The prepared + # transaction should now commit. + $node_B->safe_psql( + 'postgres', + "SELECT injection_points_wakeup('commit-after-delay-checkpoint'); + SELECT injection_points_detach('commit-after-delay-checkpoint');" + ); + + # Close the background session on the publisher node + ok($pub_session->quit, "close publisher session"); + + # Confirm that the transaction committed + $result = + $node_B->safe_psql('postgres', 'SELECT * FROM tab WHERE a = 1'); + is($result, qq(1|2), 'publisher sees the new row'); + + # Ensure the UPDATE is replayed on subscriber + $node_B->wait_for_catchup($subname_AB); + + $logfile = slurp_file($node_A->logfile(), $log_location); + ok( $logfile =~ + qr/conflict detected on relation "public.tab": conflict=update_deleted.* +.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .* +.*Remote row \(1, 2\); replica identity full \(1, 1\)/, + 'update target row was deleted in tab'); + + # Remember the next transaction ID to be assigned + $next_xid = + $node_A->safe_psql('postgres', "SELECT txid_current() + 1;"); + + # Confirm that the xmin value is advanced to the latest nextXid after the + # prepared transaction on the publisher has been committed. + ok( $node_A->poll_query_until( + 'postgres', + "SELECT xmin = $next_xid from pg_replication_slots WHERE slot_name = 'pg_conflict_detection'" + ), + "the xmin value of slot 'pg_conflict_detection' is updated on subscriber" + ); + + # Confirm that the dead tuple can be removed now + ($cmdret, $stdout, $stderr) = + $node_A->psql('postgres', qq(VACUUM (verbose) public.tab;)); + + ok($stderr =~ qr/1 removed, 0 remain, 0 are dead but not yet removable/, + 'the deleted column is removed'); + + # Get the commit timestamp for the publisher's update + my $pub_ts = $node_B->safe_psql('postgres', + "SELECT pg_xact_commit_timestamp(xmin) from tab where a=1;"); + + # Check that the commit timestamp for the update on the publisher is later than + # or equal to the timestamp of the local deletion, as the commit timestamp + # should be assigned after marking the DELAY_CHKPT_IN_COMMIT flag. + $result = $node_B->safe_psql('postgres', + "SELECT '$pub_ts'::timestamp >= '$sub_ts'::timestamp"); + is($result, qq(t), + "pub UPDATE's timestamp is later than that of sub's DELETE"); + + # Re-enable the subscription for further tests + $node_B->psql('postgres', "ALTER SUBSCRIPTION $subname_BA ENABLE;"); +} + +############################################################################### +# Check that dead tuple retention stops due to the wait time surpassing +# max_retention_duration. +############################################################################### + +# Create a physical slot +$node_B->safe_psql('postgres', + "SELECT * FROM pg_create_physical_replication_slot('blocker');"); + +# Add the inactive physical slot to synchronized_standby_slots +$node_B->append_conf('postgresql.conf', + "synchronized_standby_slots = 'blocker'"); +$node_B->reload; + +# Enable failover to activate the synchronized_standby_slots setting +$node_A->safe_psql('postgres', "ALTER SUBSCRIPTION $subname_AB DISABLE;"); +$node_A->safe_psql('postgres', "ALTER SUBSCRIPTION $subname_AB SET (failover = true);"); +$node_A->safe_psql('postgres', "ALTER SUBSCRIPTION $subname_AB ENABLE;"); + +# Insert a record +$node_B->safe_psql('postgres', "INSERT INTO tab VALUES (5, 5);"); + +# Advance the xid on Node A to trigger the next cycle of oldest_nonremovable_xid +# advancement. +$node_A->safe_psql('postgres', "SELECT txid_current() + 1;"); + +$log_offset = -s $node_A->logfile; + +# Set max_retention_duration to a minimal value to initiate retention stop. +$node_A->safe_psql('postgres', + "ALTER SUBSCRIPTION $subname_AB SET (max_retention_duration = 1);"); + +# Confirm that the retention is stopped +$node_A->wait_for_log( + qr/logical replication worker for subscription "tap_sub_a_b" has stopped retaining the information for detecting conflicts/, + $log_offset); + +ok( $node_A->poll_query_until( + 'postgres', + "SELECT xmin IS NULL from pg_replication_slots WHERE slot_name = 'pg_conflict_detection'" + ), + "the xmin value of slot 'pg_conflict_detection' is invalid on Node A"); + +$result = $node_A->safe_psql('postgres', + "SELECT subretentionactive FROM pg_subscription WHERE subname='$subname_AB';"); +is($result, qq(f), 'retention is inactive'); + +# Drop the physical slot and reset the synchronized_standby_slots setting +$node_B->safe_psql('postgres', + "SELECT * FROM pg_drop_replication_slot('blocker');"); +$node_B->adjust_conf('postgresql.conf', 'synchronized_standby_slots', "''"); +$node_B->reload; + ############################################################################### # Check that the replication slot pg_conflict_detection is dropped after # removing all the subscriptions. diff --git a/src/test/subscription/t/100_bugs.pl b/src/test/subscription/t/100_bugs.pl index 5e3577011833b..5022305491807 100644 --- a/src/test/subscription/t/100_bugs.pl +++ b/src/test/subscription/t/100_bugs.pl @@ -575,4 +575,34 @@ BEGIN $node_publisher->stop('fast'); $node_subscriber->stop('fast'); +# BUG #18988 +# The bug happened due to a self-deadlock between the DROP SUBSCRIPTION +# command and the walsender process for accessing pg_subscription. This +# occurred when DROP SUBSCRIPTION attempted to remove a replication slot by +# connecting to a newly created database whose caches are not yet +# initialized. +# +# The bug is fixed by reducing the lock-level during DROP SUBSCRIPTION. +$node_publisher->start(); + +$publisher_connstr = $node_publisher->connstr . ' dbname=regress_db'; +$node_publisher->safe_psql( + 'postgres', qq( + CREATE DATABASE regress_db; + CREATE SUBSCRIPTION regress_sub1 CONNECTION '$publisher_connstr' PUBLICATION regress_pub WITH (connect=false); +)); + +my ($ret, $stdout, $stderr) = + $node_publisher->psql('postgres', q{DROP SUBSCRIPTION regress_sub1}); + +isnt($ret, 0, "replication slot does not exist: exit code not 0"); +like( + $stderr, + qr/ERROR: could not drop replication slot "regress_sub1" on publisher/, + "could not drop replication slot: error message"); + +$node_publisher->safe_psql('postgres', "DROP DATABASE regress_db"); + +$node_publisher->stop('fast'); + done_testing(); diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c index 671b4d76237d5..504c0235ffbcb 100644 --- a/src/timezone/pgtz.c +++ b/src/timezone/pgtz.c @@ -364,8 +364,8 @@ pg_timezone_initialize(void) * We may not yet know where PGSHAREDIR is (in particular this is true in * an EXEC_BACKEND subprocess). So use "GMT", which pg_tzset forces to be * interpreted without reference to the filesystem. This corresponds to - * the bootstrap default for these variables in guc_tables.c, although in - * principle it could be different. + * the bootstrap default for these variables in guc_parameters.dat, + * although in principle it could be different. */ session_timezone = pg_tzset("GMT"); log_timezone = session_timezone; diff --git a/src/tools/add_commit_links.pl b/src/tools/add_commit_links.pl index 710a649203209..87a6e47e6fead 100755 --- a/src/tools/add_commit_links.pl +++ b/src/tools/add_commit_links.pl @@ -50,6 +50,8 @@ sub process_file # Get major version number from the file name. $file =~ m/-(\d+)\./; my $major_version = $1; + die "file name $file is not in the expected format\n" + unless defined $major_version; open(my $fh, '<', $file) || die "could not open file $file: $!\n"; open(my $tfh, '>', $tmpfile) || die "could not open file $tmpfile: $!\n"; diff --git a/src/tools/ci/README b/src/tools/ci/README index 12c1e7c308fa9..d183648a8d02b 100644 --- a/src/tools/ci/README +++ b/src/tools/ci/README @@ -82,3 +82,14 @@ defined in .cirrus.yml, by redefining the relevant yaml anchors. Custom compute resources can be provided using - https://cirrus-ci.org/guide/supported-computing-services/ - https://cirrus-ci.org/guide/persistent-workers/ + + +Enabling manual tasks by default +================================ + +Some tasks are not triggered automatically by default, to avoid using up CI +credits too quickly. This can be changed on the repository level, e.g. when +custom compute resources are configured. + +The following repository level environment variables are recognized: +- REPO_CI_AUTOMATIC_TRIGGER_TASKS - space-separated list of (mingw|netbsd|openbsd) diff --git a/src/tools/gen_keywordlist.pl b/src/tools/gen_keywordlist.pl index 6ec83ff33f9a9..bcb0d8027a04d 100644 --- a/src/tools/gen_keywordlist.pl +++ b/src/tools/gen_keywordlist.pl @@ -169,7 +169,16 @@ # Emit the struct that wraps all this lookup info into one variable. -printf $kwdef "static " if !$extern; +if ($extern) +{ + # redundant declaration to silence -Wmissing-variable-declarations + printf $kwdef "extern PGDLLIMPORT const ScanKeywordList %s;\n\n", + $varname; +} +else +{ + printf $kwdef "static "; +} printf $kwdef "const ScanKeywordList %s = {\n", $varname; printf $kwdef qq|\t%s_kw_string,\n|, $varname; printf $kwdef qq|\t%s_kw_offsets,\n|, $varname; diff --git a/src/tools/pginclude/README b/src/tools/pginclude/README index 2f8fe6b78baa5..944bcb01c64dc 100644 --- a/src/tools/pginclude/README +++ b/src/tools/pginclude/README @@ -55,8 +55,12 @@ and are skipped by the headerscheck script. The easy way to run the script is to say "make -s headerscheck" in the top-level build directory after completing a build. You should -have included "--with-perl --with-python" in your configure options, -else you're likely to get errors about related headers not being found. +have included at least + + --with-llvm --with-perl --with-python + +in your configure options, else you're likely to get errors about +related headers not being found. A limitation of the current script is that it doesn't know exactly which headers are for frontend or backend; when in doubt it uses postgres.h as @@ -78,8 +82,12 @@ and are skipped by the script in the --cplusplus mode. The easy way to run the script is to say "make -s cpluspluscheck" in the top-level build directory after completing a build. You should -have included "--with-perl --with-python" in your configure options, -else you're likely to get errors about related headers not being found. +have included at least + + --with-llvm --with-perl --with-python + +in your configure options, else you're likely to get errors about +related headers not being found. If you are using a non-g++-compatible C++ compiler, you may need to override the script's CXXFLAGS setting by setting a suitable environment diff --git a/src/tools/pginclude/headerscheck b/src/tools/pginclude/headerscheck index 9e86d0493621e..17138a7569e4f 100755 --- a/src/tools/pginclude/headerscheck +++ b/src/tools/pginclude/headerscheck @@ -44,6 +44,7 @@ CXXFLAGS=${CXXFLAGS:- -fsyntax-only -Wall} MGLOB="$builddir/src/Makefile.global" CPPFLAGS=`sed -n 's/^CPPFLAGS[ ]*=[ ]*//p' "$MGLOB"` CFLAGS=`sed -n 's/^CFLAGS[ ]*=[ ]*//p' "$MGLOB"` +ICU_CFLAGS=`sed -n 's/^ICU_CFLAGS[ ]*=[ ]*//p' "$MGLOB"` CC=`sed -n 's/^CC[ ]*=[ ]*//p' "$MGLOB"` CXX=`sed -n 's/^CXX[ ]*=[ ]*//p' "$MGLOB"` PG_SYSROOT=`sed -n 's/^PG_SYSROOT[ ]*=[ ]*//p' "$MGLOB"` @@ -64,11 +65,11 @@ if $cplusplus; then -I*|-D*) CXXPPFLAGS="$CXXPPFLAGS $flag";; esac done - COMPILER_FLAGS="$CXXPPFLAGS $CXXFLAGS" + COMPILER_FLAGS="$CXXPPFLAGS $CXXFLAGS $ICU_CFLAGS" else ext=c COMPILER=${CC:-gcc} - COMPILER_FLAGS="$CPPFLAGS $CFLAGS" + COMPILER_FLAGS="$CPPFLAGS $CFLAGS $ICU_CFLAGS" fi # Create temp directory. @@ -97,7 +98,7 @@ do # Additional Windows-specific headers. test "$f" = src/include/port/win32_port.h && continue test "$f" = src/include/port/win32/netdb.h && continue - $cplusplus && test "$f" = src/include/port/win32/sys/resource.h && continue + test "$f" = src/include/port/win32/sys/resource.h && continue test "$f" = src/include/port/win32/sys/socket.h && continue test "$f" = src/include/port/win32_msvc/dirent.h && continue test "$f" = src/include/port/win32_msvc/utime.h && continue diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 3daba26b23723..a13e816289023 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -391,7 +391,6 @@ CachedFunctionHashEntry CachedFunctionHashKey CachedPlan CachedPlanSource -CachedPlanType CallContext CallStmt CancelRequestPacket @@ -1991,6 +1990,7 @@ PLyScalarToOb PLySubtransactionData PLySubtransactionObject PLyTransformToOb +PLyTrigType PLyTupleToOb PLyUnicode_FromStringAndSize_t PLy_elog_impl_t @@ -2276,6 +2276,7 @@ PlanInvalItem PlanRowMark PlanState PlannedStmt +PlannedStmtOrigin PlannerGlobal PlannerInfo PlannerParamItem @@ -3159,7 +3160,6 @@ UnicodeNormalizationForm UnicodeNormalizationQC Unique UniquePath -UniquePathMethod UniqueRelInfo UniqueState UnlistenStmt @@ -3175,7 +3175,6 @@ UpgradeTaskSlotState UpgradeTaskStep UploadManifestCmd UpperRelationKind -UpperUniquePath UserAuth UserContext UserMapping diff --git a/src/tools/testint128.c b/src/tools/testint128.c deleted file mode 100644 index a25631e277d2e..0000000000000 --- a/src/tools/testint128.c +++ /dev/null @@ -1,170 +0,0 @@ -/*------------------------------------------------------------------------- - * - * testint128.c - * Testbed for roll-our-own 128-bit integer arithmetic. - * - * This is a standalone test program that compares the behavior of an - * implementation in int128.h to an (assumed correct) int128 native type. - * - * Copyright (c) 2017-2025, PostgreSQL Global Development Group - * - * - * IDENTIFICATION - * src/tools/testint128.c - * - *------------------------------------------------------------------------- - */ - -#include "postgres_fe.h" - -/* - * By default, we test the non-native implementation in int128.h; but - * by predefining USE_NATIVE_INT128 to 1, you can test the native - * implementation, just to be sure. - */ -#ifndef USE_NATIVE_INT128 -#define USE_NATIVE_INT128 0 -#endif - -#include "common/int128.h" -#include "common/pg_prng.h" - -/* - * We assume the parts of this union are laid out compatibly. - */ -typedef union -{ - int128 i128; - INT128 I128; - union - { -#ifdef WORDS_BIGENDIAN - int64 hi; - uint64 lo; -#else - uint64 lo; - int64 hi; -#endif - } hl; -} test128; - - -/* - * Control version of comparator. - */ -static inline int -my_int128_compare(int128 x, int128 y) -{ - if (x < y) - return -1; - if (x > y) - return 1; - return 0; -} - -/* - * Main program. - * - * Generates a lot of random numbers and tests the implementation for each. - * The results should be reproducible, since we use a fixed PRNG seed. - * - * You can give a loop count if you don't like the default 1B iterations. - */ -int -main(int argc, char **argv) -{ - long count; - - pg_prng_seed(&pg_global_prng_state, 0); - - if (argc >= 2) - count = strtol(argv[1], NULL, 0); - else - count = 1000000000; - - while (count-- > 0) - { - int64 x = pg_prng_uint64(&pg_global_prng_state); - int64 y = pg_prng_uint64(&pg_global_prng_state); - int64 z = pg_prng_uint64(&pg_global_prng_state); - test128 t1; - test128 t2; - - /* check unsigned addition */ - t1.hl.hi = x; - t1.hl.lo = y; - t2 = t1; - t1.i128 += (int128) (uint64) z; - int128_add_uint64(&t2.I128, (uint64) z); - - if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) - { - printf("%016lX%016lX + unsigned %lX\n", x, y, z); - printf("native = %016lX%016lX\n", t1.hl.hi, t1.hl.lo); - printf("result = %016lX%016lX\n", t2.hl.hi, t2.hl.lo); - return 1; - } - - /* check signed addition */ - t1.hl.hi = x; - t1.hl.lo = y; - t2 = t1; - t1.i128 += (int128) z; - int128_add_int64(&t2.I128, z); - - if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) - { - printf("%016lX%016lX + signed %lX\n", x, y, z); - printf("native = %016lX%016lX\n", t1.hl.hi, t1.hl.lo); - printf("result = %016lX%016lX\n", t2.hl.hi, t2.hl.lo); - return 1; - } - - /* check multiplication */ - t1.i128 = (int128) x * (int128) y; - - t2.hl.hi = t2.hl.lo = 0; - int128_add_int64_mul_int64(&t2.I128, x, y); - - if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo) - { - printf("%lX * %lX\n", x, y); - printf("native = %016lX%016lX\n", t1.hl.hi, t1.hl.lo); - printf("result = %016lX%016lX\n", t2.hl.hi, t2.hl.lo); - return 1; - } - - /* check comparison */ - t1.hl.hi = x; - t1.hl.lo = y; - t2.hl.hi = z; - t2.hl.lo = pg_prng_uint64(&pg_global_prng_state); - - if (my_int128_compare(t1.i128, t2.i128) != - int128_compare(t1.I128, t2.I128)) - { - printf("comparison failure: %d vs %d\n", - my_int128_compare(t1.i128, t2.i128), - int128_compare(t1.I128, t2.I128)); - printf("arg1 = %016lX%016lX\n", t1.hl.hi, t1.hl.lo); - printf("arg2 = %016lX%016lX\n", t2.hl.hi, t2.hl.lo); - return 1; - } - - /* check case with identical hi parts; above will hardly ever hit it */ - t2.hl.hi = x; - - if (my_int128_compare(t1.i128, t2.i128) != - int128_compare(t1.I128, t2.I128)) - { - printf("comparison failure: %d vs %d\n", - my_int128_compare(t1.i128, t2.i128), - int128_compare(t1.I128, t2.I128)); - printf("arg1 = %016lX%016lX\n", t1.hl.hi, t1.hl.lo); - printf("arg2 = %016lX%016lX\n", t2.hl.hi, t2.hl.lo); - return 1; - } - } - - return 0; -} diff --git a/src/tools/valgrind.supp b/src/tools/valgrind.supp index 2ad5b81526d3f..3880007dfb3bb 100644 --- a/src/tools/valgrind.supp +++ b/src/tools/valgrind.supp @@ -194,3 +194,36 @@ Memcheck:Addr8 fun:pg_numa_touch_mem_if_required } + + +# Memory-leak suppressions +# Note that a suppression rule will silence complaints about memory blocks +# allocated in matching places, but it won't prevent "indirectly lost" +# complaints about blocks that are only reachable via the suppressed blocks. + +# Suppress complaints about stuff leaked during function cache loading. +# Both the PL/pgSQL and SQL-function parsing processes generate some cruft +# within the function's cache context, which doesn't seem worth the trouble +# to get rid of. Moreover, there are cases where CachedFunction structs +# are intentionally leaked because we're unsure if any fn_extra pointers +# remain. +{ + hide_function_cache_leaks + Memcheck:Leak + match-leak-kinds: definite,possible,indirect + + ... + fun:cached_function_compile +} + +# Suppress complaints about stuff leaked during TS dictionary loading. +# Not very much is typically lost there, and preventing it would +# require a risky API change for TS tmplinit functions. +{ + hide_ts_dictionary_leaks + Memcheck:Leak + match-leak-kinds: definite,possible,indirect + + ... + fun:lookup_ts_dictionary_cache +}